Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Jussi Enkovaara
MPI
Commits
f1f4b2de
Commit
f1f4b2de
authored
Jun 29, 2018
by
Jussi Enkovaara
Browse files
Collective exercises
parent
1816a6f6
Changes
21
Hide whitespace changes
Inline
Side-by-side
collectives/README.md
0 → 100644
View file @
f1f4b2de
## Collective operations
In this exercise we test different routines for collective
communication. Write a program for four MPI processes, such that each
process has a data vector with the following data:

In addition, each task has a receive buffer for eight elements and the
values in the buffer are initialized to -1.
Implement communication that sends and receives values from these data
vectors to the receive buffers using a single collective routine in
each case, so that the receive buffers will have the following values:
a)

b)

c)

d)

You can start from scratch or use the skeleton code found in
[
c/collective.c
](
c/collective.c
)
,
[
fortran/collective.F90
](
fortran/collective.F90
)
or
[
python/collective.py
](
python/collective.py
)
collectives/c/collective.c
0 → 100644
View file @
f1f4b2de
#include
<stdio.h>
#include
<stdlib.h>
#include
<mpi.h>
#define NTASKS 4
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
);
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
);
int
main
(
int
argc
,
char
*
argv
[])
{
int
ntasks
,
rank
,
color
;
int
sendbuf
[
2
*
NTASKS
],
recvbuf
[
2
*
NTASKS
];
int
printbuf
[
2
*
NTASKS
*
NTASKS
];
MPI_Comm
sub_comm
;
MPI_Init
(
&
argc
,
&
argv
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
if
(
ntasks
!=
NTASKS
)
{
if
(
rank
==
0
)
{
fprintf
(
stderr
,
"Run this program with %i tasks.
\n
"
,
NTASKS
);
}
MPI_Abort
(
MPI_COMM_WORLD
,
-
1
);
}
/* Initialize message buffers */
init_buffers
(
sendbuf
,
recvbuf
,
2
*
NTASKS
);
/* Print data that will be sent */
print_buffers
(
printbuf
,
sendbuf
,
2
*
NTASKS
);
/* TODO: use a single collective communication call (and maybe prepare
* some parameters for the call) */
/* Print data that was received */
/* TODO: add correct buffer */
print_buffers
(
printbuf
,
...,
2
*
NTASKS
);
MPI_Finalize
();
return
0
;
}
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
)
{
int
rank
,
i
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
recvbuffer
[
i
]
=
-
1
;
sendbuffer
[
i
]
=
i
+
buffersize
*
rank
;
}
}
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
)
{
int
i
,
j
,
rank
,
ntasks
;
MPI_Gather
(
sendbuffer
,
buffersize
,
MPI_INT
,
printbuffer
,
buffersize
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
if
(
rank
==
0
)
{
for
(
j
=
0
;
j
<
ntasks
;
j
++
)
{
printf
(
"Task %i:"
,
j
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
printf
(
" %2i"
,
printbuffer
[
i
+
buffersize
*
j
]);
}
printf
(
"
\n
"
);
}
printf
(
"
\n
"
);
}
}
collectives/c/solution/alltoall.c
0 → 100644
View file @
f1f4b2de
#include
<stdio.h>
#include
<stdlib.h>
#include
<mpi.h>
#define NTASKS 4
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
);
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
);
int
main
(
int
argc
,
char
*
argv
[])
{
int
ntasks
,
rank
,
color
;
int
sendbuf
[
2
*
NTASKS
],
recvbuf
[
2
*
NTASKS
];
int
printbuf
[
2
*
NTASKS
*
NTASKS
];
MPI_Comm
sub_comm
;
MPI_Init
(
&
argc
,
&
argv
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
if
(
ntasks
!=
NTASKS
)
{
if
(
rank
==
0
)
{
fprintf
(
stderr
,
"Run this program with %i tasks.
\n
"
,
NTASKS
);
}
MPI_Abort
(
MPI_COMM_WORLD
,
-
1
);
}
/* Initialize message buffers */
init_buffers
(
sendbuf
,
recvbuf
,
2
*
NTASKS
);
/* Print data that will be sent */
print_buffers
(
printbuf
,
sendbuf
,
2
*
NTASKS
);
/* Perform the all-to-all communication pattern */
MPI_Alltoall
(
sendbuf
,
2
,
MPI_INT
,
recvbuf
,
2
,
MPI_INT
,
MPI_COMM_WORLD
);
/* Print data that was received */
print_buffers
(
printbuf
,
recvbuf
,
2
*
NTASKS
);
MPI_Finalize
();
return
0
;
}
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
)
{
int
rank
,
i
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
recvbuffer
[
i
]
=
-
1
;
sendbuffer
[
i
]
=
i
+
buffersize
*
rank
;
}
}
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
)
{
int
i
,
j
,
rank
,
ntasks
;
MPI_Gather
(
sendbuffer
,
buffersize
,
MPI_INT
,
printbuffer
,
buffersize
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
if
(
rank
==
0
)
{
for
(
j
=
0
;
j
<
ntasks
;
j
++
)
{
printf
(
"Task %i:"
,
j
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
printf
(
" %2i"
,
printbuffer
[
i
+
buffersize
*
j
]);
}
printf
(
"
\n
"
);
}
printf
(
"
\n
"
);
}
}
collectives/c/solution/broadcast.c
0 → 100644
View file @
f1f4b2de
#include
<stdio.h>
#include
<stdlib.h>
#include
<mpi.h>
#define NTASKS 4
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
);
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
);
int
main
(
int
argc
,
char
*
argv
[])
{
int
ntasks
,
rank
,
color
;
int
sendbuf
[
2
*
NTASKS
],
recvbuf
[
2
*
NTASKS
];
int
printbuf
[
2
*
NTASKS
*
NTASKS
];
MPI_Comm
sub_comm
;
MPI_Init
(
&
argc
,
&
argv
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
if
(
ntasks
!=
NTASKS
)
{
if
(
rank
==
0
)
{
fprintf
(
stderr
,
"Run this program with %i tasks.
\n
"
,
NTASKS
);
}
MPI_Abort
(
MPI_COMM_WORLD
,
-
1
);
}
/* Initialize message buffers */
init_buffers
(
sendbuf
,
recvbuf
,
2
*
NTASKS
);
/* Print data that will be sent */
print_buffers
(
printbuf
,
sendbuf
,
2
*
NTASKS
);
/* Send (0,1,2,...,7) everywhere */
MPI_Bcast
(
sendbuf
,
2
*
NTASKS
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
/* Print data that was received */
print_buffers
(
printbuf
,
sendbuf
,
2
*
NTASKS
);
MPI_Finalize
();
return
0
;
}
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
)
{
int
rank
,
i
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
recvbuffer
[
i
]
=
-
1
;
sendbuffer
[
i
]
=
i
+
buffersize
*
rank
;
}
}
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
)
{
int
i
,
j
,
rank
,
ntasks
;
MPI_Gather
(
sendbuffer
,
buffersize
,
MPI_INT
,
printbuffer
,
buffersize
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
if
(
rank
==
0
)
{
for
(
j
=
0
;
j
<
ntasks
;
j
++
)
{
printf
(
"Task %i:"
,
j
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
printf
(
" %2i"
,
printbuffer
[
i
+
buffersize
*
j
]);
}
printf
(
"
\n
"
);
}
printf
(
"
\n
"
);
}
}
collectives/c/solution/gatherv.c
0 → 100644
View file @
f1f4b2de
#include
<stdio.h>
#include
<stdlib.h>
#include
<mpi.h>
#define NTASKS 4
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
);
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
);
int
main
(
int
argc
,
char
*
argv
[])
{
int
ntasks
,
rank
,
color
;
int
sendbuf
[
2
*
NTASKS
],
recvbuf
[
2
*
NTASKS
];
int
printbuf
[
2
*
NTASKS
*
NTASKS
];
MPI_Comm
sub_comm
;
MPI_Init
(
&
argc
,
&
argv
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
if
(
ntasks
!=
NTASKS
)
{
if
(
rank
==
0
)
{
fprintf
(
stderr
,
"Run this program with %i tasks.
\n
"
,
NTASKS
);
}
MPI_Abort
(
MPI_COMM_WORLD
,
-
1
);
}
/* Initialize message buffers */
init_buffers
(
sendbuf
,
recvbuf
,
2
*
NTASKS
);
/* Print data that will be sent */
print_buffers
(
printbuf
,
sendbuf
,
2
*
NTASKS
);
/* Gather varying size data to task 1 */
int
offsets
[
NTASKS
]
=
{
0
,
1
,
2
,
4
};
int
counts
[
NTASKS
]
=
{
1
,
1
,
2
,
4
};
MPI_Gatherv
(
sendbuf
,
counts
[
rank
],
MPI_INT
,
recvbuf
,
counts
,
offsets
,
MPI_INT
,
1
,
MPI_COMM_WORLD
);
/* Print data that was received */
print_buffers
(
printbuf
,
recvbuf
,
2
*
NTASKS
);
MPI_Finalize
();
return
0
;
}
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
)
{
int
rank
,
i
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
recvbuffer
[
i
]
=
-
1
;
sendbuffer
[
i
]
=
i
+
buffersize
*
rank
;
}
}
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
)
{
int
i
,
j
,
rank
,
ntasks
;
MPI_Gather
(
sendbuffer
,
buffersize
,
MPI_INT
,
printbuffer
,
buffersize
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
if
(
rank
==
0
)
{
for
(
j
=
0
;
j
<
ntasks
;
j
++
)
{
printf
(
"Task %i:"
,
j
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
printf
(
" %2i"
,
printbuffer
[
i
+
buffersize
*
j
]);
}
printf
(
"
\n
"
);
}
printf
(
"
\n
"
);
}
}
collectives/c/solution/scatter.c
0 → 100644
View file @
f1f4b2de
#include
<stdio.h>
#include
<stdlib.h>
#include
<mpi.h>
#define NTASKS 4
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
);
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
);
int
main
(
int
argc
,
char
*
argv
[])
{
int
ntasks
,
rank
,
color
;
int
sendbuf
[
2
*
NTASKS
],
recvbuf
[
2
*
NTASKS
];
int
printbuf
[
2
*
NTASKS
*
NTASKS
];
MPI_Comm
sub_comm
;
MPI_Init
(
&
argc
,
&
argv
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
if
(
ntasks
!=
NTASKS
)
{
if
(
rank
==
0
)
{
fprintf
(
stderr
,
"Run this program with %i tasks.
\n
"
,
NTASKS
);
}
MPI_Abort
(
MPI_COMM_WORLD
,
-
1
);
}
/* Initialize message buffers */
init_buffers
(
sendbuf
,
recvbuf
,
2
*
NTASKS
);
/* Print data that will be sent */
print_buffers
(
printbuf
,
sendbuf
,
2
*
NTASKS
);
/* Scatter the elements from task 0 */
MPI_Scatter
(
sendbuf
,
2
,
MPI_INT
,
recvbuf
,
2
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
/* Print data that was received */
print_buffers
(
printbuf
,
recvbuf
,
2
*
NTASKS
);
MPI_Finalize
();
return
0
;
}
void
init_buffers
(
int
*
sendbuffer
,
int
*
recvbuffer
,
int
buffersize
)
{
int
rank
,
i
;
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
recvbuffer
[
i
]
=
-
1
;
sendbuffer
[
i
]
=
i
+
buffersize
*
rank
;
}
}
void
print_buffers
(
int
*
printbuffer
,
int
*
sendbuffer
,
int
buffersize
)
{
int
i
,
j
,
rank
,
ntasks
;
MPI_Gather
(
sendbuffer
,
buffersize
,
MPI_INT
,
printbuffer
,
buffersize
,
MPI_INT
,
0
,
MPI_COMM_WORLD
);
MPI_Comm_rank
(
MPI_COMM_WORLD
,
&
rank
);
MPI_Comm_size
(
MPI_COMM_WORLD
,
&
ntasks
);
if
(
rank
==
0
)
{
for
(
j
=
0
;
j
<
ntasks
;
j
++
)
{
printf
(
"Task %i:"
,
j
);
for
(
i
=
0
;
i
<
buffersize
;
i
++
)
{
printf
(
" %2i"
,
printbuffer
[
i
+
buffersize
*
j
]);
}
printf
(
"
\n
"
);
}
printf
(
"
\n
"
);
}
}
collectives/fortran/collective.F90
0 → 100644
View file @
f1f4b2de
program
coll_exer
use
mpi
implicit
none
integer
,
parameter
::
n_mpi_tasks
=
4
integer
::
ntasks
,
rank
,
ierr
,
i
,
color
,
sub_comm
integer
,
dimension
(
2
*
n_mpi_tasks
)
::
sendbuf
,
recvbuf
integer
,
dimension
(
2
*
n_mpi_tasks
**
2
)
::
printbuf
call
mpi_init
(
ierr
)
call
mpi_comm_size
(
MPI_COMM_WORLD
,
ntasks
,
ierr
)
call
mpi_comm_rank
(
MPI_COMM_WORLD
,
rank
,
ierr
)
if
(
ntasks
/
=
n_mpi_tasks
)
then
if
(
rank
==
0
)
then
print
*
,
"Run this program with "
,
n_mpi_tasks
,
" tasks."
end
if
call
mpi_abort
(
MPI_COMM_WORLD
,
-1
,
ierr
)
end
if
! Initialize message buffers
call
init_buffers
! Print data that will be sent
call
print_buffers
(
sendbuf
)
! TODO: use a single collective communication call (and maybe prepare
! some parameters for the call)
! Print data that was received
! TODO: add correct buffer
call
print_buffers
(
...
)
call
mpi_finalize
(
ierr
)
contains
subroutine
init_buffers
implicit
none
integer
::
i
do
i
=
1
,
2
*
n_mpi_tasks
recvbuf
(
i
)
=
-1
sendbuf
(
i
)
=
i
+
2
*
n_mpi_tasks
*
rank
-
1
end
do
end
subroutine
init_buffers
subroutine
print_buffers
(
buffer
)
implicit
none
integer
,
dimension
(:),
intent
(
in
)
::
buffer
integer
,
parameter
::
bufsize
=
2
*
n_mpi_tasks
integer
::
i
character
(
len
=
40
)
::
pformat
write
(
pformat
,
'(A,I3,A)'
)
'(A4,I2,":",'
,
bufsize
,
'I3)'
call
mpi_gather
(
buffer
,
bufsize
,
MPI_INTEGER
,
&
&
printbuf
,
bufsize
,
MPI_INTEGER
,
&
&
0
,
MPI_COMM_WORLD
,
ierr
)
if
(
rank
==
0
)
then
do
i
=
1
,
ntasks
write
(
*
,
pformat
)
'Task'
,
i
-
1
,
printbuf
((
i
-1
)
*
bufsize
+1
:
i
*
bufsize
)
end
do
print
*
end
if
end
subroutine
print_buffers
end
program
coll_exer
collectives/fortran/solution/alltoall.F90
0 → 100644
View file @
f1f4b2de
program
coll_exer
use
mpi
implicit
none
integer
,
parameter
::
n_mpi_tasks
=
4
integer
::
ntasks
,
rank
,
ierr
,
i
,
color
,
sub_comm
integer
,
dimension
(
2
*
n_mpi_tasks
)
::
sendbuf
,
recvbuf
integer
,
dimension
(
2
*
n_mpi_tasks
**
2
)
::
printbuf
call
mpi_init
(
ierr
)
call
mpi_comm_size
(
MPI_COMM_WORLD
,
ntasks
,
ierr
)
call
mpi_comm_rank
(
MPI_COMM_WORLD
,
rank
,
ierr
)
if
(
ntasks
/
=
n_mpi_tasks
)
then
if
(
rank
==
0
)
then
print
*
,
"Run this program with "
,
n_mpi_tasks
,
" tasks."
end
if
call
mpi_abort
(
MPI_COMM_WORLD
,
-1
,
ierr
)
end
if
! Initialize message buffers
call
init_buffers
! Print data that will be sent
call
print_buffers
(
sendbuf
)
! Carry out the all-to-all pattern
call
mpi_alltoall
(
sendbuf
,
2
,
MPI_INTEGER
,
recvbuf
,
2
,
MPI_INTEGER
,
&
&
MPI_COMM_WORLD
,
ierr
)
! Print data that was received
call
print_buffers
(
recvbuf
)
call
mpi_finalize
(
ierr
)
contains
subroutine
init_buffers
implicit
none
integer
::
i
do
i
=
1
,
2
*
n_mpi_tasks
recvbuf
(
i
)
=
-1
sendbuf
(
i
)
=
i
+
2
*
n_mpi_tasks
*
rank
-
1
end
do
end
subroutine
init_buffers
subroutine
print_buffers
(
buffer
)
implicit
none
integer
,
dimension
(:),
intent
(
in
)
::
buffer
integer
,
parameter
::
bufsize
=
2
*
n_mpi_tasks
integer
::
i
character
(
len
=
40
)
::
pformat
write
(
pformat
,
'(A,I3,A)'
)
'(A4,I2,":",'
,
bufsize
,
'I3)'
call
mpi_gather
(
buffer
,
bufsize
,
MPI_INTEGER
,
&
&
printbuf
,
bufsize
,
MPI_INTEGER
,
&
&
0
,
MPI_COMM_WORLD
,
ierr
)
if
(
rank
==
0
)
then
do
i
=
1
,
ntasks
write
(
*
,
pformat
)
'Task'
,
i
-
1
,
printbuf
((
i
-1
)
*
bufsize
+1
:
i
*
bufsize
)
end
do
print
*
end
if
end
subroutine
print_buffers
end
program
coll_exer
collectives/fortran/solution/broadcast.F90
0 → 100644
View file @
f1f4b2de
program
coll_exer
use
mpi
implicit
none
integer
,
parameter
::
n_mpi_tasks
=
4
integer
::
ntasks
,
rank
,
ierr
,
i
,
color
,
sub_comm
integer
,
dimension
(
2
*
n_mpi_tasks
)
::
sendbuf
,
recvbuf
integer
,
dimension
(
2
*
n_mpi_tasks
**
2
)
::
printbuf
call
mpi_init
(
ierr
)
call
mpi_comm_size
(
MPI_COMM_WORLD
,
ntasks
,
ierr
)
call
mpi_comm_rank
(
MPI_COMM_WORLD
,
rank
,
ierr
)