Skip to content
Commits on Source (24)
......@@ -2,7 +2,39 @@
This repository contains various exercises and examples on parallel programming with message passing interface (MPI).
A working MPI installation is needed, please see individual exercise/example for detailed build instructions.
A working MPI installation is needed for building the code. Simple cases can
be built and run as:
- mpicc -o exe exercise.c ; mpirun -np xxx ./exe (C)
- mpif90 -o exe exercise.c ; mpirun -np xxx ./exe (Fortran)
- mpirun -np xxx python program.py (Python)
where mpicc/mpif90/mpirun should be replaced by the correct commands for
the particular computer platform. For more complex cases a Makefile is
provided.
## Exercises
- [Hello world](hello-world) Simplest possible MPI program (C, Fortran and
Python versions). Level: **basic**
- [Message exchange](message-exchange) Simple point-to-point communication
(C, Fortran and Python versions). Level: **basic**
- [Message chain](message-chain) Point-to-point communication in one
dimensional aperiodic chain. (C, Fortran and Python versions).
Level: **intermediate**
- [Collective communciation](collectives) Basic collective communication
patterns (C, Fortran and Python versions). Level: **basic/intermediate**
- [Parallel I/O](parallel-io) Simple parallel I/O using Posix calls and
MPI I/O (C and Fortran versions). Level: **basic/intermediate**
- [User defined datatypes](datatypes) Communication of non-uniform data using
user defined datatypes (C, Fortran and Python versions).
Level: **intermediate/advanced**
## Examples
- [Heat equation](heat-equation) A two dimensional heat equation solver which
is parallelized with MPI. The code features non-blocking point-to-point
communication, user defined datatypes, and parallel I/O with MPI I/O
(C, Fortran and Python versions). Level: **advanced**
## How to contribute
......
Copyright (C) 2018 CSC - IT Center for Science Ltd.
Licensed under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Code is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Copy of the GNU General Public License can be obtained from
<http://www.gnu.org/licenses/>.
## Collective operations
In this exercise we test different routines for collective
communication. Write a program for four MPI processes, such that each
process has a data vector with the following data:
![](img/sendbuffer.png)
In addition, each task has a receive buffer for eight elements and the
values in the buffer are initialized to -1.
Implement communication that sends and receives values from these data
vectors to the receive buffers using a single collective routine in
each case, so that the receive buffers will have the following values:
a)
![](img/bcast.png)
b)
![](img/scatter.png)
c)
![](img/gatherv.png)
d)
![](img/alltoall.png)
You can start from scratch or use the skeleton code found in
[c/collective.c](c/collective.c),
[fortran/collective.F90](fortran/collective.F90) or
[python/collective.py](python/collective.py)
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* TODO: use a single collective communication call (and maybe prepare
* some parameters for the call) */
/* Print data that was received */
/* TODO: add correct buffer */
print_buffers(printbuf, ..., 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Perform the all-to-all communication pattern */
MPI_Alltoall(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, recvbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Send (0,1,2,...,7) everywhere */
MPI_Bcast(sendbuf, 2 * NTASKS, MPI_INT, 0, MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Gather varying size data to task 1 */
int offsets[NTASKS] = { 0, 1, 2, 4 };
int counts[NTASKS] = { 1, 1, 2, 4 };
MPI_Gatherv(sendbuf, counts[rank], MPI_INT, recvbuf, counts,
offsets, MPI_INT, 1, MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, recvbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
#include <stdio.h>
#include <stdlib.h>
#include <mpi.h>
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Scatter the elements from task 0 */
MPI_Scatter(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0,
MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, recvbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! TODO: use a single collective communication call (and maybe prepare
! some parameters for the call)
! Print data that was received
! TODO: add correct buffer
call print_buffers(...)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Carry out the all-to-all pattern
call mpi_alltoall(sendbuf, 2, MPI_INTEGER, recvbuf, 2, MPI_INTEGER, &
& MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(recvbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Send (0,1,..,7) everywhere
call mpi_bcast(sendbuf, 2*ntasks, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(sendbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
integer, dimension(n_mpi_tasks) :: offsets, counts
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Gather varying size data to task 1
counts = (/1,1,2,4/)
offsets(1) = 0
do i = 2, ntasks
offsets(i) = offsets(i-1) + counts(i-1)
end do
call mpi_gatherv(sendbuf, counts(rank+1), MPI_INTEGER, recvbuf, counts, &
& offsets, MPI_INTEGER, 1, MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(recvbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Scatter the elements from task 0
call mpi_scatter(sendbuf, 2, MPI_INTEGER, recvbuf, 2, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(recvbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
from __future__ import print_function
from mpi4py import MPI
import numpy
from sys import stdout
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
assert size == 4, 'Number of MPI tasks has to be 4.'
data = numpy.arange(8) + rank*8
buff = numpy.zeros(8, int)
buff[:] = -1
# ... wait for every rank to finish ...
stdout.flush()
comm.barrier()
if rank == 0:
print('')
print('-' * 32)
print('')
print('Data vectors:')
print(' Task {0}: {1}'.format(rank, data))
stdout.flush()
comm.barrier()
# Implement the requested collective operation
from __future__ import print_function
from mpi4py import MPI
import numpy
from sys import stdout
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
assert size == 4, 'Number of MPI tasks has to be 4.'
data = numpy.arange(8) + rank*8
buff = numpy.zeros(8, int)
buff[:] = -1
# ... wait for every rank to finish ...
stdout.flush()
comm.barrier()
if rank == 0:
print('')
print('-' * 32)
print('')
print('Data vectors:')
print(' Task {0}: {1}'.format(rank, data))
stdout.flush()
comm.barrier()
if rank == 0:
print('')
print('Alltoall:')
# Gatherv
counts = (1, 1, 2, 4)
offsets = (0, 1, 2, 4)
comm.Alltoall(data, buff)
print(' Task {0}: {1}'.format(rank, buff))