MPI on B4F cluster: Difference between revisions
Jump to navigation
Jump to search
(Created page with " module list module purge module load gcc/4.8.1 openmpi/gcc/64/1.6.5 slurm/2.5.7 mpicc hello_mpi.c -o test_hello_world ldd test_hello_world linux-vdso.so.1 => (0x00002a...") |
No edit summary |
||
Line 1: | Line 1: | ||
<source lang='c'> | |||
#include <stdio.h> | |||
#include <mpi.h> | |||
int main(int argc, char ** argv) { | |||
int size,rank,namelen; | |||
char processor_name[MPI_MAX_PROCESSOR_NAME]; | |||
MPI_Init(&argc, &argv); | |||
MPI_Comm_rank(MPI_COMM_WORLD,&rank); | |||
MPI_Comm_size(MPI_COMM_WORLD,&size); | |||
MPI_Get_processor_name(processor_name, &namelen); | |||
printf("Hello MPI! Process %d of %d on %s\n", rank, size, processor_name); | |||
MPI_Finalize(); | |||
} | |||
</source> | |||
<source lang='bash'> | |||
module list | module list | ||
</source> | |||
<source lang='bash'> | |||
module purge | module purge | ||
</source> | |||
<source lang='bash'> | |||
module load gcc/4.8.1 openmpi/gcc/64/1.6.5 slurm/2.5.7 | module load gcc/4.8.1 openmpi/gcc/64/1.6.5 slurm/2.5.7 | ||
</source> | |||
<source lang='bash'> | |||
mpicc hello_mpi.c -o test_hello_world | mpicc hello_mpi.c -o test_hello_world | ||
</source> | |||
<source lang='bash'> | |||
ldd test_hello_world | ldd test_hello_world | ||
</source> | |||
linux-vdso.so.1 => (0x00002aaaaaacb000) | linux-vdso.so.1 => (0x00002aaaaaacb000) | ||
Line 21: | Line 47: | ||
/lib64/ld-linux-x86-64.so.2 (0x00002aaaaaaab000) | /lib64/ld-linux-x86-64.so.2 (0x00002aaaaaaab000) | ||
<source lang='bash'> | |||
srun --nodes=2 --ntasks-per-node=4 --partition=ABGC --mpi=openmpi ./test_hello_world | srun --nodes=2 --ntasks-per-node=4 --partition=ABGC --mpi=openmpi ./test_hello_world | ||
</source> | |||
Hello MPI! Process 4 of 8 on node011 | Hello MPI! Process 4 of 8 on node011 |
Revision as of 23:32, 12 December 2013
<source lang='c'>
- include <stdio.h>
- include <mpi.h>
int main(int argc, char ** argv) {
int size,rank,namelen; char processor_name[MPI_MAX_PROCESSOR_NAME]; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_size(MPI_COMM_WORLD,&size); MPI_Get_processor_name(processor_name, &namelen); printf("Hello MPI! Process %d of %d on %s\n", rank, size, processor_name); MPI_Finalize();
} </source>
<source lang='bash'> module list </source>
<source lang='bash'> module purge </source>
<source lang='bash'> module load gcc/4.8.1 openmpi/gcc/64/1.6.5 slurm/2.5.7 </source>
<source lang='bash'> mpicc hello_mpi.c -o test_hello_world </source>
<source lang='bash'> ldd test_hello_world </source>
linux-vdso.so.1 => (0x00002aaaaaacb000) libmpi.so.1 => /cm/shared/apps/openmpi/gcc/64/1.6.5/lib64/libmpi.so.1 (0x00002aaaaaccd000) libdl.so.2 => /lib64/libdl.so.2 (0x00002aaaab080000) libm.so.6 => /lib64/libm.so.6 (0x00002aaaab284000) libnuma.so.1 => /usr/lib64/libnuma.so.1 (0x0000003e29400000) librt.so.1 => /lib64/librt.so.1 (0x00002aaaab509000) libnsl.so.1 => /lib64/libnsl.so.1 (0x00002aaaab711000) libutil.so.1 => /lib64/libutil.so.1 (0x00002aaaab92a000) libpthread.so.0 => /lib64/libpthread.so.0 (0x00002aaaabb2e000) libc.so.6 => /lib64/libc.so.6 (0x00002aaaabd4b000) /lib64/ld-linux-x86-64.so.2 (0x00002aaaaaaab000)
<source lang='bash'> srun --nodes=2 --ntasks-per-node=4 --partition=ABGC --mpi=openmpi ./test_hello_world </source>
Hello MPI! Process 4 of 8 on node011 Hello MPI! Process 1 of 8 on node010 Hello MPI! Process 7 of 8 on node011 Hello MPI! Process 6 of 8 on node011 Hello MPI! Process 5 of 8 on node011 Hello MPI! Process 2 of 8 on node010 Hello MPI! Process 0 of 8 on node010 Hello MPI! Process 3 of 8 on node010