Commit 5ed9bb3f authored by Jussi Enkovaara's avatar Jussi Enkovaara
Browse files

Fortran version of 2D heat equation

parent f2f8f97c
FC=mpif90
CC=gcc
FCFLAGS=-O3 -Wall
CCFLAGS=-O3 -Wall
LDFLAGS=
LIBS=-lpng
EXE=heat_mpi
OBJS=main.o heat_mod.o core.o setup.o utilities.o io.o pngwriter_mod.o
OBJS_PNG=pngwriter.o
all: $(EXE)
pngwriter.o: pngwriter.c pngwriter.h
core.o: core.F90 heat_mod.o
utilities.o: utilities.F90 heat_mod.o
io.o: io.F90 heat_mod.o pngwriter_mod.o
setup.o: setup.F90 heat_mod.o utilities.o io.o
pngwriter_mod.o: pngwriter_mod.F90 heat_mod.o
main.o: main.F90 heat_mod.o core.o io.o setup.o utilities.o
$(EXE): $(OBJS) $(OBJS_PNG)
$(FC) $(FCFLAGS) $(OBJS) $(OBJS_PNG) -o $@ $(LDFLAGS) $(LIBS)
%.o: %.F90
$(FC) $(FCFLAGS) -c $< -o $@
%.o: %.c
$(CC) $(CCFLAGS) -c $< -o $@
.PHONY: clean
clean:
-/bin/rm -f $(EXE) a.out *.o *.mod *.png *~
This source diff could not be displayed because it is too large. You can view the blob instead.
! Main solver routines for heat equation solver
module core
use heat
contains
! Exchange the boundary data between MPI tasks
subroutine exchange_init(field0, parallel)
use mpi
implicit none
type(field), intent(inout) :: field0
type(parallel_data), intent(in) :: parallel
integer :: ierr
! Send to left, receive from right
call mpi_isend(field0%data(0, 1), 1, parallel%columntype, &
& parallel%nleft, 11, parallel%comm, parallel%requests(1), ierr)
call mpi_irecv(field0%data(0, field0%ny + 1), 1, parallel%columntype, &
& parallel%nright, 11, &
& parallel%comm, parallel%requests(2), ierr)
! Send to right, receive from left
call mpi_isend(field0%data(0, field0%ny), 1, parallel%columntype, &
& parallel%nright, 12, parallel%comm, parallel%requests(3), ierr)
call mpi_irecv(field0%data(0, 0), 1, parallel%columntype, &
& parallel%nleft, 12, &
& parallel%comm, parallel%requests(4), ierr)
! Send to up receive from down
call mpi_isend(field0%data(1, 0), 1, parallel%rowtype, &
& parallel%nup, 13, parallel%comm, parallel%requests(5), ierr)
call mpi_irecv(field0%data(field0%nx+1, 0), 1, parallel%rowtype, &
& parallel%ndown, 13, parallel%comm, parallel%requests(6), ierr)
! Send to the down, receive from up
call mpi_isend(field0%data(field0%nx, 0), 1, parallel%rowtype, &
& parallel%ndown, 14, parallel%comm, parallel%requests(7), ierr)
call mpi_irecv(field0%data(0, 0), 1, parallel%rowtype, &
& parallel%nup, 14, parallel%comm, parallel%requests(8), ierr)
end subroutine exchange_init
! Finalize the non-blocking communication
subroutine exchange_finalize(parallel)
use mpi
implicit none
type(parallel_data), intent(inout) :: parallel
integer :: ierr
call mpi_waitall(8, parallel%requests, mpi_statuses_ignore, ierr)
end subroutine exchange_finalize
! Compute one time step of temperature evolution
! Arguments:
! curr (type(field)): current temperature values
! prev (type(field)): values from previous time step
! a (real(dp)): update equation constant
! dt (real(dp)): time step value
subroutine evolve_interior(curr, prev, a, dt)
implicit none
type(field), intent(inout) :: curr, prev
real(dp) :: a, dt
integer :: i, j, nx, ny
nx = curr%nx
ny = curr%ny
do j = 2, ny - 1
do i = 2, nx - 1
curr%data(i, j) = prev%data(i, j) + a * dt * &
& ((prev%data(i-1, j) - 2.0 * prev%data(i, j) + &
& prev%data(i+1, j)) / curr%dx**2 + &
& (prev%data(i, j-1) - 2.0 * prev%data(i, j) + &
& prev%data(i, j+1)) / curr%dy**2)
end do
end do
end subroutine evolve_interior
! Compute one time step of temperature evolution
! Arguments:
! curr (type(field)): current temperature values
! prev (type(field)): values from previous time step
! a (real(dp)): update equation constant
! dt (real(dp)): time step value
! Update only the border-dependent part
subroutine evolve_edges(curr, prev, a, dt)
implicit none
type(field), intent(inout) :: curr, prev
real(dp) :: a, dt
integer :: i, j, nx, ny
nx = curr%nx
ny = curr%ny
j = 1
do i = 1, nx
curr%data(i, j) = prev%data(i, j) + a * dt * &
& ((prev%data(i-1, j) - 2.0 * prev%data(i, j) + &
& prev%data(i+1, j)) / curr%dx**2 + &
& (prev%data(i, j-1) - 2.0 * prev%data(i, j) + &
& prev%data(i, j+1)) / curr%dy**2)
end do
j = ny
do i = 1, nx
curr%data(i, j) = prev%data(i, j) + a * dt * &
& ((prev%data(i-1, j) - 2.0 * prev%data(i, j) + &
& prev%data(i+1, j)) / curr%dx**2 + &
& (prev%data(i, j-1) - 2.0 * prev%data(i, j) + &
& prev%data(i, j+1)) / curr%dy**2)
end do
i = 1
do j = 1, ny
curr%data(i, j) = prev%data(i, j) + a * dt * &
& ((prev%data(i-1, j) - 2.0 * prev%data(i, j) + &
& prev%data(i+1, j)) / curr%dx**2 + &
& (prev%data(i, j-1) - 2.0 * prev%data(i, j) + &
& prev%data(i, j+1)) / curr%dy**2)
end do
i = nx
do j = 1, ny
curr%data(i, j) = prev%data(i, j) + a * dt * &
& ((prev%data(i-1, j) - 2.0 * prev%data(i, j) + &
& prev%data(i+1, j)) / curr%dx**2 + &
& (prev%data(i, j-1) - 2.0 * prev%data(i, j) + &
& prev%data(i, j+1)) / curr%dy**2)
end do
end subroutine evolve_edges
end module core
! Field metadata for heat equation solver
module heat
use iso_fortran_env, only : REAL64
implicit none
integer, parameter :: dp = REAL64
real(dp), parameter :: DX = 0.01, DY = 0.01 ! Fixed grid spacing
type :: field
integer :: nx ! local dimension of the field
integer :: ny
integer :: nx_full ! global dimension of the field
integer :: ny_full
real(dp) :: dx
real(dp) :: dy
real(dp), dimension(:,:), allocatable :: data
end type field
type :: parallel_data
integer :: size
integer :: rank
integer :: nup, ndown, nleft, nright ! Ranks of neighbouring MPI tasks
integer :: comm
integer :: requests(8) ! Non-blocking communication handles
integer :: rowtype ! MPI Datatype for communication of
! rows
integer :: columntype ! MPI Datatype for communication of
! columns
integer :: subarraytype ! MPI Datatype for text I/O
integer :: restarttype ! MPI Datatype for restart I/O
integer :: filetype ! MPI Datatype for file view in
! restart I/O
end type parallel_data
contains
! Initialize the field type metadata
! Arguments:
! field0 (type(field)): input field
! nx, ny, dx, dy: field dimensions and spatial step size
subroutine set_field_dimensions(field0, nx, ny, parallel)
implicit none
type(field), intent(out) :: field0
integer, intent(in) :: nx, ny
type(parallel_data), intent(in) :: parallel
integer :: nx_local, ny_local
integer, dimension(2) :: dims, coords
logical :: periods(2)
integer :: ierr
call mpi_cart_get(parallel%comm, 2, dims, periods, coords, ierr)
nx_local = nx / dims(1)
ny_local = ny / dims(2)
field0%dx = DX
field0%dy = DY
field0%nx = nx_local
field0%ny = ny_local
field0%nx_full = nx
field0%ny_full = ny
end subroutine set_field_dimensions
subroutine parallel_setup(parallel, nx, ny)
use mpi
implicit none
type(parallel_data), intent(out) :: parallel
integer, intent(in), optional :: nx, ny
integer :: nx_local, ny_local
integer :: world_size
integer :: dims(2) = (/0, 0/)
logical :: periods(2) = (/.false., .false./)
integer :: coords(2)
integer, dimension(2) :: sizes, subsizes, offsets
integer :: ierr
call mpi_comm_size(MPI_COMM_WORLD, world_size, ierr)
! Set grid dimensions
call mpi_dims_create(world_size, 2, dims, ierr)
nx_local = nx / dims(1)
ny_local = ny / dims(2)
! Ensure that the grid is divisible to the MPI tasks
if (nx_local * dims(1) /= nx) then
write(*,*) 'Cannot divide grid evenly to processors in x-direction', &
& nx_local, dims(1), nx
call mpi_abort(MPI_COMM_WORLD, -2, ierr)
end if
if (ny_local * dims(2) /= ny) then
write(*,*) 'Cannot divide grid evenly to processors in y-direction', &
& ny_local, dims(2), ny
call mpi_abort(MPI_COMM_WORLD, -2, ierr)
end if
! Create cartesian communicator
call mpi_cart_create(MPI_COMM_WORLD, 2, dims, periods, .true., parallel%comm, ierr)
call mpi_cart_shift(parallel%comm, 0, 1, parallel%nup, parallel%ndown, ierr)
call mpi_cart_shift(parallel%comm, 1, 1, parallel%nleft, &
& parallel%nright, ierr)
call mpi_comm_size(parallel%comm, parallel%size, ierr)
call mpi_comm_rank(parallel%comm, parallel%rank, ierr)
if (parallel%rank == 0) then
write(*,'(A, I3, A3, I3)') 'Using domain decomposition', dims(1), 'x', &
& dims(2)
write(*,'(A, I5, A3, I5)') 'Local domain size', nx_local, 'x', ny_local
end if
! Create datatypes for halo exchange
call mpi_type_vector(ny_local + 2, 1, nx_local + 2, MPI_DOUBLE_PRECISION, &
& parallel%rowtype, ierr)
call mpi_type_contiguous(nx_local + 2, MPI_DOUBLE_PRECISION, &
& parallel%columntype, ierr)
call mpi_type_commit(parallel%rowtype, ierr)
call mpi_type_commit(parallel%columntype, ierr)
! Create datatype for subblock needed in I/O
! Rank 0 uses datatype for receiving data into full array while
! other ranks use datatype for sending the inner part of array
sizes(1) = nx_local + 2
sizes(2) = ny_local + 2
subsizes(1) = nx_local
subsizes(2) = ny_local
offsets(1) = 1
offsets(2) = 1
if (parallel%rank == 0) then
sizes(1) = nx
sizes(2) = ny
offsets(1) = 0
offsets(2) = 0
end if
call mpi_type_create_subarray(2, sizes, subsizes, offsets, &
& MPI_ORDER_FORTRAN, MPI_DOUBLE_PRECISION, parallel%subarraytype, &
& ierr)
call mpi_type_commit(parallel%subarraytype, ierr)
! Create datatypes for restart I/O
! For boundary ranks also the ghost layer (boundary condition)
! is written
call mpi_cart_coords(parallel%comm, parallel%rank, 2, coords, ierr);
sizes(1) = nx + 2
sizes(2) = ny + 2
offsets(1) = 1 + coords(1) * nx_local
offsets(2) = 1 + coords(2) * ny_local
if (coords(1) == 0) then
offsets(1) = offsets(1) - 1
subsizes(1) = subsizes(1) + 1
end if
if (coords(1) == dims(1) - 1) then
subsizes(1) = subsizes(1) + 1;
end if
if (coords(2) == 0) then
offsets(2) = offsets(2) - 1
subsizes(2) = subsizes(2) + 1
end if
if (coords(2) == dims(2) - 1) then
subsizes(2) = subsizes(2) + 1
end if
call mpi_type_create_subarray(2, sizes, subsizes, offsets, &
& MPI_ORDER_FORTRAN, MPI_DOUBLE_PRECISION, parallel%filetype, ierr)
call mpi_type_commit(parallel%filetype, ierr)
sizes(1) = nx_local + 2
sizes(2) = ny_local + 2
offsets(1) = 1
offsets(2) = 1
if (coords(1) == 0) then
offsets(1) = 0
end if
if (coords(2) == 0) then
offsets(2) = 0
end if
call mpi_type_create_subarray(2, sizes, subsizes, offsets, &
& MPI_ORDER_FORTRAN, MPI_DOUBLE_PRECISION, parallel%restarttype, ierr)
call mpi_type_commit(parallel%restarttype, ierr)
end subroutine parallel_setup
end module heat
! I/O routines for heat equation solver
module io
use heat
use mpi
contains
! Output routine, saves the temperature distribution as a png image
! Arguments:
! curr (type(field)): variable with the temperature data
! iter (integer): index of the time step
subroutine write_field(curr, iter, parallel)
use pngwriter
implicit none
type(field), intent(in) :: curr
integer, intent(in) :: iter
type(parallel_data), intent(in) :: parallel
character(len=85) :: filename
integer :: stat
real(dp), dimension(:,:), allocatable, target :: full_data
integer :: coords(2)
integer :: ix, jy
integer :: p, ierr
if (parallel%rank == 0) then
allocate(full_data(curr%nx_full, curr%ny_full))
! Copy rand #0 data to the global array
full_data(1:curr%nx, 1:curr%ny) = curr%data(1:curr%nx, 1:curr%ny)
! Receive data from other ranks
do p = 1, parallel%size - 1
call mpi_cart_coords(parallel%comm, p, 2, coords, ierr)
ix = coords(1) * curr%nx + 1
jy = coords(2) * curr%ny + 1
call mpi_recv(full_data(ix, jy), 1, parallel%subarraytype, p, 22, &
& parallel%comm, MPI_STATUS_IGNORE, ierr)
end do
write(filename,'(A5,I4.4,A4,A)') 'heat_', iter, '.png'
stat = save_png(full_data, curr%nx_full, curr%ny_full, filename)
deallocate(full_data)
else
! Send data
call mpi_ssend(curr%data, 1, parallel%subarraytype, 0, 22, &
& parallel%comm, ierr)
end if
end subroutine write_field
! Reads the temperature distribution from an input file
! Arguments:
! field0 (type(field)): field variable that will store the
! read data
! filename (char): name of the input file
! Note that this version assumes the input data to be in C memory layout
subroutine read_field(field0, filename, parallel)
implicit none
type(field), intent(out) :: field0
character(len=85), intent(in) :: filename
type(parallel_data), intent(out) :: parallel
integer :: nx, ny, i, p, ierr
character(len=2) :: dummy
real(dp), dimension(:,:), allocatable :: full_data
integer :: coords(2)
integer :: ix, jy
open(10, file=filename)
! Read the header
read(10, *) dummy, nx, ny
call parallel_setup(parallel, nx, ny)
call set_field_dimensions(field0, nx, ny, parallel)
! The arrays for temperature field contain also a halo region
allocate(field0%data(0:field0%nx+1, 0:field0%ny+1))
if (parallel%rank == 0) then
allocate(full_data(nx, ny))
! Read the data
do i = 1, nx
read(10, *) full_data(i, 1:ny)
end do
! Copy own local part
field0%data(1:field0%nx, 1:field0%ny) = full_data(1:field0%nx, 1:field0%ny)
! Send to other process
do p=1, parallel%size - 1
call mpi_cart_coords(parallel%comm, p, 2, coords, ierr)
ix = coords(1) * field0%nx + 1
jy = coords(2) * field0%ny + 1
call mpi_send(full_data(ix, jy), 1, parallel%subarraytype, p, 44, &
& parallel%comm, ierr)
end do
else
! Receive data
call mpi_recv(field0%data, 1, parallel%subarraytype, 0, 44, &
& parallel%comm, MPI_STATUS_IGNORE, ierr)
end if
! Set the boundary values
field0%data(1:field0%nx, 0) = field0%data(1:field0%nx, 1)
field0%data(1:field0%nx, field0%ny+1) = field0%data(1:field0%nx, field0%ny)
field0%data(0, 0:field0%ny+1) = field0%data(1, 0:field0%ny+1)
field0%data(field0%nx+1, 0:field0%ny+1) = field0%data(field0%nx, 0:field0%ny+1)
close(10)
if (parallel%rank == 0) then
deallocate(full_data)
end if
end subroutine read_field
! Write a restart checkpoint that contains field dimensions, current
! iteration number and temperature field.
subroutine write_restart(temp, parallel, iter)
implicit none
type(field), intent(in) :: temp
type(parallel_data), intent(in) :: parallel
integer, intent(in) :: iter
integer :: fp
integer(kind=MPI_OFFSET_KIND) :: disp
integer :: ierr
call mpi_file_open(MPI_COMM_WORLD, "HEAT_RESTART.dat", &
& MPI_MODE_CREATE + MPI_MODE_WRONLY, &
& MPI_INFO_NULL, fp, ierr)
! write the restart header by the rank#0
if (parallel%rank == 0) then
disp = 0
call mpi_file_write_at(fp, disp, temp%nx_full, 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
disp = disp + sizeof(temp%nx_full)
call mpi_file_write_at(fp, disp, temp%ny_full, 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
disp = disp + sizeof(temp%ny_full)
call mpi_file_write_at(fp, disp, iter, 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
end if
! For simplicity, skip three integers at the beginning of file in all tasks
disp = 3 * sizeof(temp%nx_full)
call mpi_file_set_view(fp, 0, MPI_DOUBLE_PRECISION, parallel%filetype, &
& 'native', MPI_INFO_NULL, ierr);
call mpi_file_write_at_all(fp, disp, temp%data, &
& 1, parallel%restarttype, MPI_STATUS_IGNORE, ierr)
call mpi_file_close(fp, ierr)
end subroutine write_restart
! Read a restart checkpoint that contains field dimensions, current
! iteration number and temperature field.
subroutine read_restart(temp, parallel, iter)
implicit none
type(field), intent(inout) :: temp
type(parallel_data), intent(inout) :: parallel
integer, intent(out) :: iter
integer :: rows, cols
integer :: fp
integer(kind=MPI_OFFSET_KIND) :: disp
integer :: ierr
call mpi_file_open(MPI_COMM_WORLD, "HEAT_RESTART.dat", MPI_MODE_RDONLY, &
& MPI_INFO_NULL, fp, ierr)
! read in the restart header (dimensions, number of preceding steps) by the
! rank#0 and broadcast it to other ranks
if (parallel%rank == 0) then
disp = 0
call mpi_file_read_at(fp, disp, rows, 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
disp = disp + sizeof(rows)
call mpi_file_read_at(fp, disp, cols, 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
disp = disp + sizeof(cols)
call mpi_file_read_at(fp, disp, iter, 1, MPI_INTEGER, MPI_STATUS_IGNORE, ierr)
end if
call mpi_bcast(rows, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
call mpi_bcast(cols, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
call mpi_bcast(iter, 1, MPI_INTEGER, 0, MPI_COMM_WORLD, ierr)
! set correct dimensions to MPI metadata
call parallel_setup(parallel, rows, cols)
! set local dimensions and allocate memory for the data
call set_field_dimensions(temp, rows, cols, parallel)
allocate(temp%data(0:temp%nx+1, 0:temp%ny+1))
temp%data(:,:) = 0.0
! read in restart data