Skip to content
Copyright (C) 2018 CSC - IT Center for Science Ltd.
Licensed under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Code is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Copy of the GNU General Public License can be obtained from
<http://www.gnu.org/licenses/>.
## Simple message exchange
a) Write a simple program where two processes send and receive a
message to/from each other using `MPI_Send` and `MPI_Recv`. The message
content is an integer array, where each element is initialized to the
rank of the process. After receiving a message, each process should
print out the rank of the process and the first element in the
received array. You may start from scratch or use as a starting point
the skeleton code found in [exchange.c](exchange.c),
[exchange.F90](exchange.F90) or [exchange.py](exchange.py)
b) Increase the message size to 100,000, recompile and run. It is very likely
that the program will dead lock, try to figure out reason for this, and
how to resolve it.
program exchange
use mpi
implicit none
integer, parameter :: size = 100
integer :: rc, myid, ntasks, count
integer :: status(MPI_STATUS_SIZE)
integer :: message(size)
integer :: receiveBuffer(size)
call mpi_init(rc)
call mpi_comm_rank(MPI_COMM_WORLD, myid, rc)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, rc)
message = myid
! TODO: Implement sending and receiving as defined in the assignment
if ( myid == 0 ) then
write(*,'(A10,I3,A10,I3)') 'Rank: ', myid, &
' received ', receiveBuffer(1)
else if (myid == 1) then
write(*,'(A10,I3,A10,I3)') 'Rank: ', myid, &
' received ', receiveBuffer(1)
end if
call mpi_finalize(rc)
end program exchange
#include<stdio.h>
#include<stdlib.h>
#include<mpi.h>
int main(int argc, char *argv[])
{
int i, myid, ntasks;
int size = 100;
int *message;
int *receiveBuffer;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
/* Allocate message */
message = malloc(sizeof(int) * size);
receiveBuffer = malloc(sizeof(int) * size);
/* Initialize message */
for (i = 0; i < size; i++) {
message[i] = myid;
}
/* TODO: */
/* Send and receive messages as defined in exercise */
if (myid == 0) {
printf("Rank %i received %i\n", myid, receiveBuffer[0]);
} else if (myid == 1) {
printf("Rank %i received %i\n", myid, receiveBuffer[0]);
}
free(message);
free(receiveBuffer);
MPI_Finalize();
return 0;
}
from __future__ import print_function
from mpi4py import MPI
import numpy
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Simple message exchange using numpy arrays
n = 100000
data = numpy.zeros(n, int) + rank
buff = numpy.empty(n, int)
# TODO:
# Send and receive messages as defined in exercise
if rank == 0:
elif rank == 1:
print("Rank {0} received an array filled with {1}s.".format(rank, buff[0]))
# Simple message exchange of Python objects
meta = {'rank': rank}
# TODO:
# Send and receive messages as defined in exercise
if rank == 0:
elif rank == 1:
print("Rank {0} received a message from rank {1}.".format(rank, msg['rank']))
program exchange
use mpi
implicit none
integer, parameter :: size = 100000
integer :: rc, myid, ntasks, count
integer :: status(MPI_STATUS_SIZE)
integer :: message(size)
integer :: receiveBuffer(size)
call mpi_init(rc)
call mpi_comm_rank(MPI_COMM_WORLD, myid, rc)
call mpi_comm_size(MPI_COMM_WORLD, ntasks, rc)
message = myid
! Send and receive as defined in the assignment
if ( myid == 0 ) then
call mpi_send(message, size, MPI_INTEGER, 1, &
1, MPI_COMM_WORLD, rc)
call mpi_recv(receiveBuffer, size, MPI_INTEGER, 1, &
2, MPI_COMM_WORLD, status, rc)
write(*,'(A10,I3,A10,I3)') 'Rank: ', myid, &
' received ', receiveBuffer(1)
else if (myid == 1) then
! One MPI tasks needs to start with send and the other one with
! receive, otherwise the program dead locks with large message
! sizes in most MPI implementations
call mpi_recv(receiveBuffer, size, MPI_INTEGER, 0, &
1, MPI_COMM_WORLD, status, rc)
call mpi_send(message, size, MPI_INTEGER, 0, &
2, MPI_COMM_WORLD, rc)
write(*,'(A10,I3,A10,I3)') 'Rank: ', myid, &
' received ', receiveBuffer(1)
end if
call mpi_finalize(rc)
end program exchange
#include<stdio.h>
#include<stdlib.h>
#include<mpi.h>
int main(int argc, char *argv[])
{
int i, myid, ntasks;
int size = 100000;
int *message;
int *receiveBuffer;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
/* Allocate message */
message = malloc(sizeof(int) * size);
receiveBuffer = malloc(sizeof(int) * size);
/* Initialize message */
for (i = 0; i < size; i++) {
message[i] = myid;
}
/* Send and receive messages as defined in exercise */
if (myid == 0) {
MPI_Send(message, size, MPI_INT, 1, 1, MPI_COMM_WORLD);
MPI_Recv(receiveBuffer, size, MPI_INT, 1, 2, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
printf("Rank %i received %i\n", myid, receiveBuffer[0]);
} else if (myid == 1) {
/* One MPI tasks needs to start with send and the other one with
* receive, otherwise the program dead locks with large message
* sizes in most MPI implementations */
MPI_Recv(receiveBuffer, size, MPI_INT, 0, 1, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
MPI_Send(message, size, MPI_INT, 0, 2, MPI_COMM_WORLD);
printf("Rank %i received %i\n", myid, receiveBuffer[0]);
}
free(message);
free(receiveBuffer);
MPI_Finalize();
return 0;
}
from __future__ import print_function
from mpi4py import MPI
import numpy
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# Simple message exchange
meta = {'rank': rank}
if rank == 0:
comm.send(meta, dest=1)
msg = comm.recv(source=1)
elif rank == 1:
msg = comm.recv(source=0)
comm.send(meta, dest=0)
print("Rank {0} received a message from rank {1}.".format(rank, msg['rank']))
# Simple message exchange using numpy arrays
n = 100000
data = numpy.zeros(n, int) + rank
buff = numpy.empty(n, int)
if rank == 0:
comm.Send(data, dest=1)
comm.Recv(buff, source=1)
elif rank == 1:
# One MPI tasks needs to start with send and the other one with
# receive, otherwise the program dead locks with large message
# sizes in most MPI implementations
comm.Recv(buff, source=0)
comm.Send(data, dest=0)
print("Rank {0} received an array filled with {1}s.".format(rank, buff[0]))
Copyright (C) 2018 CSC - IT Center for Science Ltd.
Licensed under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
Code is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
Copy of the GNU General Public License can be obtained from
<http://www.gnu.org/licenses/>.
## Parallel I/O
a) Write data from all MPI tasks to a single file using the spokesman
strategy. Gather data to a single MPI task and write it to a file. The
data should be kept in the order of the MPI ranks.
b) Verify the above write by reading the file using the spokesman
strategy. Use different number of MPI tasks than in writing.
c) Implement the above write so that all the MPI tasks write in to
separate files. Skeleton codes are found in
[spokesman.c](c/spokesman.c) and
[spokesman_reader.c](c/spokesman_reader.c), or in
[spokesman.F90](fortran/spokesman.F90) and
[spokesman_reader.F90](fortran/spokesman_reader.F90)
d) Rewrite exercise a) so that all MPI tasks participate in the
writing/reading in to a single file using MPI-I/O.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <mpi.h>
#define DATASIZE 64
#define WRITER_ID 0
void mpiio_writer(int, int *, int);
int main(int argc, char *argv[])
{
int my_id, ntasks, i, localsize;
int *localvector;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
if (ntasks > 64) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
if (DATASIZE % ntasks != 0) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
localsize = DATASIZE / ntasks;
localvector = (int *) malloc(localsize * sizeof(int));
for (i = 0; i < localsize; i++) {
localvector[i] = i + 1 + localsize * my_id;
}
mpiio_writer(my_id, localvector, localsize);
free(localvector);
MPI_Finalize();
return 0;
}
void mpiio_writer(int my_id, int *localvector, int localsize)
{
MPI_File fh;
MPI_Offset offset;
MPI_File_open(MPI_COMM_WORLD, "mpiio.dat",
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
offset = my_id * localsize * sizeof(int);
MPI_File_write_at_all(fh, offset, localvector,
localsize, MPI_INT, MPI_STATUS_IGNORE);
MPI_File_close(&fh);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <mpi.h>
#define DATASIZE 64
#define WRITER_ID 0
void single_writer(int, int *, int);
void many_writers(int, int *, int);
int main(int argc, char *argv[])
{
int my_id, ntasks, i, localsize;
int *localvector;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
if (ntasks > 64) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
if (DATASIZE % ntasks != 0) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
localsize = DATASIZE / ntasks;
localvector = (int *) malloc(localsize * sizeof(int));
for (i = 0; i < localsize; i++) {
localvector[i] = i + 1 + localsize * my_id;
}
many_writers(my_id, localvector, localsize);
free(localvector);
MPI_Finalize();
return 0;
}
void single_writer(int my_id, int *localvector, int localsize)
{
FILE *fp;
int *fullvector;
fullvector = (int *) malloc(DATASIZE * sizeof(int));
MPI_Gather(localvector, localsize, MPI_INT, fullvector, localsize,
MPI_INT, WRITER_ID, MPI_COMM_WORLD);
if (my_id == WRITER_ID) {
if ((fp = fopen("singlewriter.dat", "wb")) == NULL) {
fprintf(stderr, "Error: %d (%s)\n", errno, strerror(errno));
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
} else {
fwrite(fullvector, sizeof(int), DATASIZE, fp);
fclose(fp);
printf("Wrote %d elements to file singlewriter.dat\n", DATASIZE);
}
}
free(fullvector);
}
void many_writers(int my_id, int *localvector, int localsize)
{
FILE *fp;
char filename[64];
sprintf(filename, "manywriters-%d.dat", my_id);
if ((fp = fopen(filename, "wb")) == NULL) {
fprintf(stderr, "Error: %d (%s)\n", errno, strerror(errno));
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
} else {
fwrite(localvector, sizeof(int), localsize, fp);
fclose(fp);
printf("Wrote %d elements to file manywriters-%d.dat\n", localsize,
my_id);
}
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <mpi.h>
#define DATASIZE 64
#define WRITER_ID 0
void single_writer(int, int *, int);
int main(int argc, char *argv[])
{
int my_id, ntasks, i, localsize;
int *localvector;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
if (ntasks > 64) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
if (DATASIZE % ntasks != 0) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
localsize = DATASIZE / ntasks;
localvector = (int *) malloc(localsize * sizeof(int));
for (i = 0; i < localsize; i++) {
localvector[i] = i + 1 + localsize * my_id;
}
single_writer(my_id, localvector, localsize);
free(localvector);
MPI_Finalize();
return 0;
}
void single_writer(int my_id, int *localvector, int localsize)
{
FILE *fp;
int *fullvector;
fullvector = (int *) malloc(DATASIZE * sizeof(int));
MPI_Gather(localvector, localsize, MPI_INT, fullvector, localsize,
MPI_INT, WRITER_ID, MPI_COMM_WORLD);
if (my_id == WRITER_ID) {
if ((fp = fopen("singlewriter.dat", "wb")) == NULL) {
fprintf(stderr, "Error: %d (%s)\n", errno, strerror(errno));
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
} else {
fwrite(fullvector, sizeof(int), DATASIZE, fp);
fclose(fp);
printf("Wrote %d elements to file singlewriter.dat\n", DATASIZE);
}
}
free(fullvector);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <mpi.h>
#define DATASIZE 64
#define WRITER_ID 0
void single_reader(int, int *, int);
void ordered_print(int, int, int *, int);
int main(int argc, char *argv[])
{
int my_id, ntasks, localsize;
int *localvector;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
if (ntasks > 64) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
if (DATASIZE % ntasks != 0) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
localsize = DATASIZE / ntasks;
localvector = (int *) malloc(localsize * sizeof(int));
single_reader(my_id, localvector, localsize);
ordered_print(ntasks, my_id, localvector, localsize);
free(localvector);
MPI_Finalize();
return 0;
}
void single_reader(int my_id, int *localvector, int localsize)
{
FILE *fp;
int *fullvector, nread;
char *fname = "singlewriter.dat";
fullvector = (int *) malloc(DATASIZE * sizeof(int));
if (my_id == WRITER_ID) {
if ((fp = fopen(fname, "rb")) == NULL) {
fprintf(stderr, "Error: %d (%s)\n", errno, strerror(errno));
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
} else {
nread = fread(fullvector, sizeof(int), DATASIZE, fp);
fclose(fp);
if (nread != DATASIZE) {
fprintf(stderr, "Warning! The number of read elements is "
" incorrect.\n");
} else {
printf("Read %i numbers from file %s\n", nread, fname);
}
}
}
MPI_Scatter(fullvector, localsize, MPI_INT, localvector, localsize,
MPI_INT, WRITER_ID, MPI_COMM_WORLD);
free(fullvector);
}
/* Try to avoid this type of pattern when ever possible.
Here we are using this serialized output just to make the
debugging easier. */
void ordered_print(int ntasks, int rank, int *buffer, int n)
{
int task, i;
for (task = 0; task < ntasks; task++) {
if (rank == task) {
printf("Task %i received:", rank);
for (i = 0; i < n; i++) {
printf(" %2i", buffer[i]);
}
printf("\n");
}
MPI_Barrier(MPI_COMM_WORLD);
}
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <mpi.h>
#define DATASIZE 64
#define WRITER_ID 0
void single_writer(int, int *, int);
int main(int argc, char *argv[])
{
int my_id, ntasks, i, localsize;
int *localvector;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
if (ntasks > 64) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
if (DATASIZE % ntasks != 0) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
localsize = DATASIZE / ntasks;
localvector = (int *) malloc(localsize * sizeof(int));
for (i = 0; i < localsize; i++) {
localvector[i] = i + 1 + localsize * my_id;
}
single_writer(my_id, localvector, localsize);
free(localvector);
MPI_Finalize();
return 0;
}
void single_writer(int my_id, int *localvector, int localsize)
{
FILE *fp;
int *fullvector;
/* TODO: Implement a function that will write the data to file so that
a single process does the file io. Use rank WRITER_ID as the io rank */
free(fullvector);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <mpi.h>
#define DATASIZE 64
#define WRITER_ID 0
void single_reader(int, int *, int);
void ordered_print(int, int, int *, int);
int main(int argc, char *argv[])
{
int my_id, ntasks, localsize;
int *localvector;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &ntasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_id);
if (ntasks > 64) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
if (DATASIZE % ntasks != 0) {
fprintf(stderr, "Datasize (64) should be divisible by number "
"of tasks.\n");
MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
}
localsize = DATASIZE / ntasks;
localvector = (int *) malloc(localsize * sizeof(int));
single_reader(my_id, localvector, localsize);
ordered_print(ntasks, my_id, localvector, localsize);
free(localvector);
MPI_Finalize();
return 0;
}
void single_reader(int my_id, int *localvector, int localsize)
{
FILE *fp;
int *fullvector, nread;
char *fname = "singlewriter.dat";
/* TODO: Implement a function that will read the data from a file so that
a single process does the file io. Use rank WRITER_ID as the io rank */
free(fullvector);
}
/* Try to avoid this type of pattern when ever possible.
Here we are using this serialized output just to make the
debugging easier. */
void ordered_print(int ntasks, int rank, int *buffer, int n)
{
int task, i;
for (task = 0; task < ntasks; task++) {
if (rank == task) {
printf("Task %i received:", rank);
for (i = 0; i < n; i++) {
printf(" %2i", buffer[i]);
}
printf("\n");
}
MPI_Barrier(MPI_COMM_WORLD);
}
}
program pario
use mpi
use, intrinsic :: iso_fortran_env, only : error_unit, output_unit
implicit none
integer, parameter :: datasize = 64, writer_id = 0
integer :: rc, my_id, ntasks, localsize, i
integer, dimension(:), allocatable :: localvector
call mpi_init(rc)
call mpi_comm_size(mpi_comm_world, ntasks, rc)
call mpi_comm_rank(mpi_comm_world, my_id, rc)
if (ntasks > 64) then
write(error_unit, *) 'Maximum number of tasks is 64!'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
if (mod(datasize, ntasks) /= 0) then
write(error_unit,*) 'Datasize (64) should be divisible by number of tasks'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
localsize = datasize / ntasks
allocate(localvector(localsize))
localvector = [(i + my_id * localsize, i=1,localsize)]
call mpiio_writer()
deallocate(localvector)
call mpi_finalize(rc)
contains
subroutine mpiio_writer()
implicit none
integer :: fh, rc, dsize
integer(kind=MPI_OFFSET_KIND) :: offset;
call mpi_type_size(MPI_INTEGER, dsize, rc)
offset = my_id * localsize * dsize
call mpi_file_open(MPI_COMM_WORLD, 'mpiio.dat', &
& MPI_MODE_CREATE+MPI_MODE_WRONLY, MPI_INFO_NULL, fh, rc)
call mpi_file_write_at_all(fh, offset, localvector, localsize, &
& MPI_INTEGER, MPI_STATUS_IGNORE, rc)
call mpi_file_close(fh, rc)
end subroutine mpiio_writer
end program pario
program pario
use mpi
use, intrinsic :: iso_fortran_env, only : error_unit, output_unit
implicit none
integer, parameter :: datasize = 64, writer_id = 0
integer :: rc, my_id, ntasks, localsize, i
integer, dimension(:), allocatable :: localvector
integer, dimension(datasize) :: fullvector
call mpi_init(rc)
call mpi_comm_size(mpi_comm_world, ntasks, rc)
call mpi_comm_rank(mpi_comm_world, my_id, rc)
if (ntasks > 64) then
write(error_unit, *) 'Maximum number of tasks is 64!'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
if (mod(datasize, ntasks) /= 0) then
write(error_unit,*) 'Datasize (64) should be divisible by number of tasks'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
localsize = datasize / ntasks
allocate(localvector(localsize))
localvector = [(i + my_id * localsize, i=1,localsize)]
call many_writers()
deallocate(localvector)
call mpi_finalize(rc)
contains
subroutine single_writer()
implicit none
call mpi_gather(localvector, localsize, mpi_integer, fullvector, &
& localsize, mpi_integer, writer_id, mpi_comm_world, rc)
if (my_id == writer_id) then
open(10, file='singlewriter.dat', status='replace', form='unformatted', &
& access='stream')
write(10, pos=1) fullvector
close (10)
write(output_unit,'(A,I0,A)') 'Wrote ', size(fullvector), &
& ' elements to file singlewriter.dat'
end if
end subroutine single_writer
subroutine many_writers()
implicit none
character(len=85) :: filename
write(filename, '(A,I0,A)') 'manywriters-', my_id, '.dat'
open(my_id+10, file=filename, status='replace', form='unformatted', &
& access='stream')
write(my_id+10, pos=1) localvector
close (my_id+10)
write(output_unit,'(A,I0,A,A)') 'Wrote ', size(localvector), &
& ' elements to file ', filename
end subroutine many_writers
end program pario
program pario
use mpi
use, intrinsic :: iso_fortran_env, only : error_unit, output_unit
implicit none
integer, parameter :: datasize = 64, writer_id = 0
integer :: rc, my_id, ntasks, localsize, i
integer, dimension(:), allocatable :: localvector
integer, dimension(datasize) :: fullvector
call mpi_init(rc)
call mpi_comm_size(mpi_comm_world, ntasks, rc)
call mpi_comm_rank(mpi_comm_world, my_id, rc)
if (ntasks > 64) then
write(error_unit, *) 'Maximum number of tasks is 64!'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
if (mod(datasize, ntasks) /= 0) then
write(error_unit,*) 'Datasize (64) should be divisible by number of tasks'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
localsize = datasize / ntasks
allocate(localvector(localsize))
localvector = [(i + my_id * localsize, i=1,localsize)]
call single_writer()
deallocate(localvector)
call mpi_finalize(rc)
contains
subroutine single_writer()
implicit none
call mpi_gather(localvector, localsize, mpi_integer, fullvector, &
& localsize, mpi_integer, writer_id, mpi_comm_world, rc)
if (my_id == writer_id) then
open(10, file='singlewriter.dat', status='replace', form='unformatted', &
& access='stream')
write(10, pos=1) fullvector
close (10)
write(output_unit,'(A,I0,A)') 'Wrote ', size(fullvector), &
& ' elements to file singlewriter.dat'
end if
end subroutine single_writer
end program pario
program pario
use mpi
use, intrinsic :: iso_fortran_env, only : error_unit, output_unit
implicit none
integer, parameter :: datasize = 64, writer_id = 0
integer :: rc, my_id, ntasks, localsize, i
integer, dimension(:), allocatable :: localvector
integer, dimension(datasize) :: fullvector
call mpi_init(rc)
call mpi_comm_size(mpi_comm_world, ntasks, rc)
call mpi_comm_rank(mpi_comm_world, my_id, rc)
if (ntasks > 64) then
write(error_unit, *) 'Maximum number of tasks is 64!'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
if (mod(datasize, ntasks) /= 0) then
write(error_unit, *) 'Datasize (64) should be divisible by number of tasks'
call mpi_abort(MPI_COMM_WORLD, -1, rc)
end if
localsize = datasize / ntasks
allocate(localvector(localsize))
localvector = [(i + my_id * localsize, i=1,localsize)]
call single_reader()
call ordered_print()
deallocate(localvector)
call mpi_finalize(rc)
contains
subroutine single_reader()
implicit none
if (my_id == writer_id) then
open(10, file='singlewriter.dat', status='old', form='unformatted', &
& access='stream')
read(10, pos=1) fullvector
close(10)
write(output_unit,'(A,I0,A)') 'Read ', size(fullvector), &
& ' elements from file ex1a.dat'
end if
call mpi_scatter(fullvector, localsize, mpi_integer, localvector, &
& localsize, mpi_integer, writer_id, mpi_comm_world, rc)
end subroutine single_reader
subroutine ordered_print
implicit none
integer :: task
do task = 0, ntasks-1
if (my_id == task) then
write(output_unit, '(A,I0,A)', advance='no') 'Task ', &
& my_id, ' received:'
do i = 1, localsize
write(output_unit, '(I3)', advance='no') localvector(i)
end do
write(output_unit,*) ' '
end if
call mpi_barrier(MPI_COMM_WORLD, rc)
end do
end subroutine ordered_print
end program pario