pax_global_header 0000666 0000000 0000000 00000000064 13316511015 0014506 g ustar 00root root 0000000 0000000 52 comment=6f1b94fee54a4fa30bb92a221dca8b92e1829875
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/ 0000775 0000000 0000000 00000000000 13316511015 0017217 5 ustar 00root root 0000000 0000000 MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/README.md 0000664 0000000 0000000 00000004270 13316511015 0020501 0 ustar 00root root 0000000 0000000 # Parallel programming with MPI
This repository contains various exercises and examples on parallel programming with message passing interface (MPI).
A working MPI installation is needed for building the code. Simple cases can
be built and run as:
- mpicc -o exe exercise.c ; mpirun -np xxx ./exe (C)
- mpif90 -o exe exercise.c ; mpirun -np xxx ./exe (Fortran)
- mpirun -np xxx python program.py (Python)
where mpicc/mpif90/mpirun should be replaced by the correct commands for
the particular computer platform. For more complex cases a Makefile is
provided.
## Exercises
- [Hello world](hello-world) Simplest possible MPI program (C, Fortran and
Python versions). Level: **basic**
- [Message exchange](message-exchange) Simple point-to-point communication
(C, Fortran and Python versions). Level: **basic**
- [Message chain](message-chain) Point-to-point communication in one
dimensional aperiodic chain. (C, Fortran and Python versions).
Level: **intermediate**
- [Collective communciation](collectives) Basic collective communication
patterns (C, Fortran and Python versions). Level: **basic/intermediate**
- [Parallel I/O](parallel-io) Simple parallel I/O using Posix calls and
MPI I/O (C and Fortran versions). Level: **basic/intermediate**
- [User defined datatypes](datatypes) Communication of non-uniform data using
user defined datatypes (C, Fortran and Python versions).
Level: **intermediate/advanced**
## Examples
- [Heat equation](heat-equation) A two dimensional heat equation solver which
is parallelized with MPI. The code features non-blocking point-to-point
communication, user defined datatypes, and parallel I/O with MPI I/O
(C, Fortran and Python versions). Level: **advanced**
## How to contribute
Any contributions (new exercises and examples, bug fixes, improvements etc.) are
warmly welcome. In order to contribute, please follow the standard
Gitlab workflow:
1. Fork the project into your personal space
2. Create a feature branch
3. Work on your contributions
4. Push the commit(s) to your fork
5. Submit a merge request to the master branch
As a quality assurance, the merge request is reviewed by PRACE staff before it is accepted into main branch.
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/ 0000775 0000000 0000000 00000000000 13316511015 0021533 5 ustar 00root root 0000000 0000000 MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/LICENSE.txt 0000664 0000000 0000000 00000001051 13316511015 0023353 0 ustar 00root root 0000000 0000000
Copyright (C) 2018 CSC - IT Center for Science Ltd.
Licensed under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Code is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Copy of the GNU General Public License can be obtained from
.
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/README.md 0000664 0000000 0000000 00000001535 13316511015 0023016 0 ustar 00root root 0000000 0000000 ## Collective operations
In this exercise we test different routines for collective
communication. Write a program for four MPI processes, such that each
process has a data vector with the following data:
![](img/sendbuffer.png)
In addition, each task has a receive buffer for eight elements and the
values in the buffer are initialized to -1.
Implement communication that sends and receives values from these data
vectors to the receive buffers using a single collective routine in
each case, so that the receive buffers will have the following values:
a)
![](img/bcast.png)
b)
![](img/scatter.png)
c)
![](img/gatherv.png)
d)
![](img/alltoall.png)
You can start from scratch or use the skeleton code found in
[c/collective.c](c/collective.c),
[fortran/collective.F90](fortran/collective.F90) or
[python/collective.py](python/collective.py)
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/c/ 0000775 0000000 0000000 00000000000 13316511015 0021755 5 ustar 00root root 0000000 0000000 MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/c/collective.c 0000664 0000000 0000000 00000003714 13316511015 0024257 0 ustar 00root root 0000000 0000000 #include
#include
#include
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* TODO: use a single collective communication call (and maybe prepare
* some parameters for the call) */
/* Print data that was received */
/* TODO: add correct buffer */
print_buffers(printbuf, ..., 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/c/solution/ 0000775 0000000 0000000 00000000000 13316511015 0023631 5 ustar 00root root 0000000 0000000 MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/c/solution/alltoall.c 0000664 0000000 0000000 00000003667 13316511015 0025615 0 ustar 00root root 0000000 0000000 #include
#include
#include
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Perform the all-to-all communication pattern */
MPI_Alltoall(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, recvbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/c/solution/broadcast.c 0000664 0000000 0000000 00000003634 13316511015 0025745 0 ustar 00root root 0000000 0000000 #include
#include
#include
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Send (0,1,2,...,7) everywhere */
MPI_Bcast(sendbuf, 2 * NTASKS, MPI_INT, 0, MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/c/solution/gatherv.c 0000664 0000000 0000000 00000004053 13316511015 0025437 0 ustar 00root root 0000000 0000000 #include
#include
#include
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Gather varying size data to task 1 */
int offsets[NTASKS] = { 0, 1, 2, 4 };
int counts[NTASKS] = { 1, 1, 2, 4 };
MPI_Gatherv(sendbuf, counts[rank], MPI_INT, recvbuf, counts,
offsets, MPI_INT, 1, MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, recvbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/c/solution/scatter.c 0000664 0000000 0000000 00000003675 13316511015 0025455 0 ustar 00root root 0000000 0000000 #include
#include
#include
#define NTASKS 4
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize);
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize);
int main(int argc, char *argv[])
{
int ntasks, rank, color;
int sendbuf[2 * NTASKS], recvbuf[2 * NTASKS];
int printbuf[2 * NTASKS * NTASKS];
MPI_Comm sub_comm;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
if (ntasks != NTASKS) {
if (rank == 0) {
fprintf(stderr, "Run this program with %i tasks.\n", NTASKS);
}
MPI_Abort(MPI_COMM_WORLD, -1);
}
/* Initialize message buffers */
init_buffers(sendbuf, recvbuf, 2 * NTASKS);
/* Print data that will be sent */
print_buffers(printbuf, sendbuf, 2 * NTASKS);
/* Scatter the elements from task 0 */
MPI_Scatter(sendbuf, 2, MPI_INT, recvbuf, 2, MPI_INT, 0,
MPI_COMM_WORLD);
/* Print data that was received */
print_buffers(printbuf, recvbuf, 2 * NTASKS);
MPI_Finalize();
return 0;
}
void init_buffers(int *sendbuffer, int *recvbuffer, int buffersize)
{
int rank, i;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
for (i = 0; i < buffersize; i++) {
recvbuffer[i] = -1;
sendbuffer[i] = i + buffersize * rank;
}
}
void print_buffers(int *printbuffer, int *sendbuffer, int buffersize)
{
int i, j, rank, ntasks;
MPI_Gather(sendbuffer, buffersize, MPI_INT,
printbuffer, buffersize, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
if (rank == 0) {
for (j = 0; j < ntasks; j++) {
printf("Task %i:", j);
for (i = 0; i < buffersize; i++) {
printf(" %2i", printbuffer[i + buffersize * j]);
}
printf("\n");
}
printf("\n");
}
}
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/fortran/ 0000775 0000000 0000000 00000000000 13316511015 0023206 5 ustar 00root root 0000000 0000000 MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/fortran/collective.F90 0000664 0000000 0000000 00000003313 13316511015 0025617 0 ustar 00root root 0000000 0000000 program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! TODO: use a single collective communication call (and maybe prepare
! some parameters for the call)
! Print data that was received
! TODO: add correct buffer
call print_buffers(...)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/fortran/solution/ 0000775 0000000 0000000 00000000000 13316511015 0025062 5 ustar 00root root 0000000 0000000 MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/fortran/solution/alltoall.F90 0000664 0000000 0000000 00000003316 13316511015 0027151 0 ustar 00root root 0000000 0000000 program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Carry out the all-to-all pattern
call mpi_alltoall(sendbuf, 2, MPI_INTEGER, recvbuf, 2, MPI_INTEGER, &
& MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(recvbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/fortran/solution/broadcast.F90 0000664 0000000 0000000 00000003253 13316511015 0027307 0 ustar 00root root 0000000 0000000 program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Send (0,1,..,7) everywhere
call mpi_bcast(sendbuf, 2*ntasks, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(sendbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/fortran/solution/gatherv.F90 0000664 0000000 0000000 00000003623 13316511015 0027006 0 ustar 00root root 0000000 0000000 program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
integer, dimension(n_mpi_tasks) :: offsets, counts
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Gather varying size data to task 1
counts = (/1,1,2,4/)
offsets(1) = 0
do i = 2, ntasks
offsets(i) = offsets(i-1) + counts(i-1)
end do
call mpi_gatherv(sendbuf, counts(rank+1), MPI_INTEGER, recvbuf, counts, &
& offsets, MPI_INTEGER, 1, MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(recvbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/fortran/solution/scatter.F90 0000664 0000000 0000000 00000003320 13316511015 0027005 0 ustar 00root root 0000000 0000000 program coll_exer
use mpi
implicit none
integer, parameter :: n_mpi_tasks = 4
integer :: ntasks, rank, ierr, i, color, sub_comm
integer, dimension(2*n_mpi_tasks) :: sendbuf, recvbuf
integer, dimension(2*n_mpi_tasks**2) :: printbuf
call mpi_init(ierr)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, ierr)
call mpi_comm_rank(MPI_COMM_WORLD, rank, ierr)
if (ntasks /= n_mpi_tasks) then
if (rank == 0) then
print *, "Run this program with ", n_mpi_tasks, " tasks."
end if
call mpi_abort(MPI_COMM_WORLD, -1, ierr)
end if
! Initialize message buffers
call init_buffers
! Print data that will be sent
call print_buffers(sendbuf)
! Scatter the elements from task 0
call mpi_scatter(sendbuf, 2, MPI_INTEGER, recvbuf, 2, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
! Print data that was received
call print_buffers(recvbuf)
call mpi_finalize(ierr)
contains
subroutine init_buffers
implicit none
integer :: i
do i = 1, 2*n_mpi_tasks
recvbuf(i) = -1
sendbuf(i) = i + 2*n_mpi_tasks * rank - 1
end do
end subroutine init_buffers
subroutine print_buffers(buffer)
implicit none
integer, dimension(:), intent(in) :: buffer
integer, parameter :: bufsize = 2*n_mpi_tasks
integer :: i
character(len=40) :: pformat
write(pformat,'(A,I3,A)') '(A4,I2,":",', bufsize, 'I3)'
call mpi_gather(buffer, bufsize, MPI_INTEGER, &
& printbuf, bufsize, MPI_INTEGER, &
& 0, MPI_COMM_WORLD, ierr)
if (rank == 0) then
do i = 1, ntasks
write(*,pformat) 'Task', i - 1, printbuf((i-1)*bufsize+1:i*bufsize)
end do
print *
end if
end subroutine print_buffers
end program coll_exer
MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/img/ 0000775 0000000 0000000 00000000000 13316511015 0022307 5 ustar 00root root 0000000 0000000 MPI-6f1b94fee54a4fa30bb92a221dca8b92e1829875/collectives/img/alltoall.png 0000664 0000000 0000000 00000011746 13316511015 0024632 0 ustar 00root root 0000000 0000000 PNG
IHDR T sBIT|d tEXtSoftware gnome-screenshot> xIDATx=n:a
,e¥˔.]z )]z).Rx)U-(
DQĢ#|G ' D< O x @ " A ' DlR_ t8$/ oԗDtԗĺW: A ' D< O x @ " A ' D4n}?W ОgۿN )t[HaюQ):REs|tjY$q ->t\aywt( XdQ%ݏ1~MZї.`L>3t ݧh^2[*5t*Bi]ݦ~5u\Dc_[-\ ZkGvPYrдKS19|Ĭ?I}BT霝.9V=i]պю)qݥ!kA,x4hMby o]ݮhi\Jv.A+I-^$:nZԭumiZ\BsgxPtfp,nrو,utAst kzP4Tj_ԝR1d|.'Uu^:{yoimu_KLf:k8Jn!p[e0BSC^6}^J@ǏhJ!R
F>һ[+X*}9A.Sdk<\scnt]?n(ZQk3}ѓPڽ)t]'xv<8>Vwrz
]YzX!y}f!6瓖T{ț\F>Mǡn/z[vN
DZR-
)w)6uncNie\Y/`ߚ
`C-l
ZXOp>+oFծԗDv]|ytԗĺW* < O x @ " A ' D< O x @ " A ' D< t> 6_J).S+7ԮTdyJ/Q):Rw}4xWYN!URuQǁOԍ3>1_8)sdn1sD;L|B%w_{ugnt ^>Ԭ5<Ʈun)XBm>Wy\*!bO;b9YZMu!pr;ǁkri;8vJ dkRG^Kk?f;:;%t7B_9tM_kuk
KVTe:gzJ%-'5 I]7y*]jmRwHn. QݦsvVc٩qi{ؔca[˵Rr\[{ŮvWi[X\K M3횆FjTnR-^dZ9^gn䎟[ЬmЅCn
g [mlSl.5,%eA51Ԓ\迿0__h,utAsU%*R,G]7t{Il;?]Ng\!:5J\Jds6+u{jo18a{SQLgsS41O}߲x*`3nogZ[ӉtuycOU9uRXRn \c8=>gdO;B'bVP3tpou