Commit f2f8f97c authored by Jussi Enkovaara's avatar Jussi Enkovaara
Browse files

C version of two dimensional heat equation solver

parent 95cd288e
CC=mpicc
CCFLAGS=-O3 -Wall
LDFLAGS=
LIBS=-lpng -lm
EXE=heat_mpi
OBJS=core.o setup.o utilities.o io.o main.o
OBJS_PNG=pngwriter.o
all: $(EXE)
pngwriter.o: pngwriter.c pngwriter.h
core.o: core.c heat.h
utilities.o: utilities.c heat.h
setup.o: setup.c heat.h
io.o: io.c heat.h
main.o: main.c heat.h
$(OBJS_PNG): C_COMPILER := $(CC)
$(OBJS): C_COMPILER := $(CC)
$(EXE): $(OBJS) $(OBJS_PNG)
$(CC) $(CCFLAGS) $(OBJS) $(OBJS_PNG) -o $@ $(LDFLAGS) $(LIBS)
%.o: %.c
$(C_COMPILER) $(CCFLAGS) -c $< -o $@
.PHONY: clean
clean:
-/bin/rm -f $(EXE) a.out *.o *.png *~
This source diff could not be displayed because it is too large. You can view the blob instead.
/* Main solver routines for heat equation solver */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <mpi.h>
#include "heat.h"
/* Exchange the boundary values */
void exchange_init(field *temperature, parallel_data *parallel)
{
int ind, width;
width = temperature->ny + 2;
// Send to the up, receive from down
ind = idx(1, 0, width);
MPI_Isend(&temperature->data[ind], 1, parallel->rowtype,
parallel->nup, 11, parallel->comm, &parallel->requests[0]);
ind = idx(temperature->nx + 1, 0, width);
MPI_Irecv(&temperature->data[ind], 1, parallel->rowtype,
parallel->ndown, 11, parallel->comm, &parallel->requests[1]);
// Send to the down, receive from up
ind = idx(temperature->nx, 0, width);
MPI_Isend(&temperature->data[ind], 1, parallel->rowtype,
parallel->ndown, 12, parallel->comm, &parallel->requests[2]);
ind = idx(0, 0, width);
MPI_Irecv(&temperature->data[ind], 1, parallel->rowtype,
parallel->nup, 12, parallel->comm, &parallel->requests[3]);
// Send to the left, receive from right
ind = idx(0, 1, width);
MPI_Isend(&temperature->data[ind], 1, parallel->columntype,
parallel->nleft, 13, parallel->comm, &parallel->requests[4]);
ind = idx(0, temperature->ny + 1, width);
MPI_Irecv(&temperature->data[ind], 1, parallel->columntype,
parallel->nright, 13, parallel->comm, &parallel->requests[5]);
// Send to the right, receive from left
ind = idx(0, temperature->ny, width);
MPI_Isend(&temperature->data[ind], 1, parallel->columntype,
parallel->nright, 14, parallel->comm, &parallel->requests[7]);
ind = 0;
MPI_Irecv(&temperature->data[ind], 1, parallel->columntype,
parallel->nleft, 14, parallel->comm, &parallel->requests[6]);
}
/* complete the non-blocking communication */
void exchange_finalize(parallel_data *parallel)
{
MPI_Waitall(8, &parallel->requests[0], MPI_STATUSES_IGNORE);
}
/* Update the temperature values using five-point stencil */
void evolve_interior(field *curr, field *prev, double a, double dt)
{
int i, j;
int ic, iu, id, il, ir; // indexes for center, up, down, left, right
int width;
width = curr->ny + 2;
double dx2, dy2;
/* Determine the temperature field at next time step
* As we have fixed boundary conditions, the outermost gridpoints
* are not updated. */
dx2 = prev->dx * prev->dx;
dy2 = prev->dy * prev->dy;
for (i = 2; i < curr->nx; i++) {
for (j = 2; j < curr->ny; j++) {
ic = idx(i, j, width);
iu = idx(i+1, j, width);
id = idx(i-1, j, width);
ir = idx(i, j+1, width);
il = idx(i, j-1, width);
curr->data[ic] = prev->data[ic] + a * dt *
((prev->data[iu] -
2.0 * prev->data[ic] +
prev->data[id]) / dx2 +
(prev->data[ir] -
2.0 * prev->data[ic] +
prev->data[il]) / dy2);
}
}
}
/* Update the temperature values using five-point stencil */
/* update only the border-dependent regions of the field */
void evolve_edges(field *curr, field *prev, double a, double dt)
{
int i, j;
int ic, iu, id, il, ir; // indexes for center, up, down, left, right
int width;
width = curr->ny + 2;
double dx2, dy2;
/* Determine the temperature field at next time step
* As we have fixed boundary conditions, the outermost gridpoints
* are not updated. */
dx2 = prev->dx * prev->dx;
dy2 = prev->dy * prev->dy;
i = 1;
for (j = 1; j < curr->ny + 1; j++) {
ic = idx(i, j, width);
iu = idx(i+1, j, width);
id = idx(i-1, j, width);
ir = idx(i, j+1, width);
il = idx(i, j-1, width);
curr->data[ic] = prev->data[ic] + a * dt *
((prev->data[iu] -
2.0 * prev->data[ic] +
prev->data[id]) / dx2 +
(prev->data[ir] -
2.0 * prev->data[ic] +
prev->data[il]) / dy2);
}
i = curr -> nx;
for (j = 1; j < curr->ny + 1; j++) {
ic = idx(i, j, width);
iu = idx(i+1, j, width);
id = idx(i-1, j, width);
ir = idx(i, j+1, width);
il = idx(i, j-1, width);
curr->data[ic] = prev->data[ic] + a * dt *
((prev->data[iu] -
2.0 * prev->data[ic] +
prev->data[id]) / dx2 +
(prev->data[ir] -
2.0 * prev->data[ic] +
prev->data[il]) / dy2);
}
j = 1;
for (i = 1; i < curr->nx + 1; i++) {
ic = idx(i, j, width);
iu = idx(i+1, j, width);
id = idx(i-1, j, width);
ir = idx(i, j+1, width);
il = idx(i, j-1, width);
curr->data[ic] = prev->data[ic] + a * dt *
((prev->data[iu] -
2.0 * prev->data[ic] +
prev->data[id]) / dx2 +
(prev->data[ir] -
2.0 * prev->data[ic] +
prev->data[il]) / dy2);
}
j = curr -> ny;
for (i = 1; i < curr->nx + 1; i++) {
ic = idx(i, j, width);
iu = idx(i+1, j, width);
id = idx(i-1, j, width);
ir = idx(i, j+1, width);
il = idx(i, j-1, width);
curr->data[ic] = prev->data[ic] + a * dt *
((prev->data[iu] -
2.0 * prev->data[ic] +
prev->data[id]) / dx2 +
(prev->data[ir] -
2.0 * prev->data[ic] +
prev->data[il]) / dy2);
}
}
#ifndef __HEAT_H__
#define __HEAT_H__
/* Datatype for temperature field */
typedef struct {
/* nx and ny are the true dimensions of the field. The array data
* contains also ghost layers, so it will have dimensions nx+2 x ny+2 */
int nx; /* Local dimensions of the field */
int ny;
int nx_full; /* Global dimensions of the field */
int ny_full; /* Global dimensions of the field */
double dx;
double dy;
double *data;
} field;
/* Datatype for basic parallelization information */
typedef struct {
int size; /* Number of MPI tasks */
int rank;
int nup, ndown, nleft, nright; /* Ranks of neighbouring MPI tasks */
MPI_Comm comm; /* Cartesian communicator */
MPI_Request requests[8]; /* Requests for non-blocking communication */
MPI_Datatype rowtype; /* MPI Datatype for communication of rows */
MPI_Datatype columntype; /* MPI Datatype for communication of columns */
MPI_Datatype subarraytype; /* MPI Datatype for communication in text I/O */
MPI_Datatype restarttype; /* MPI Datatype for communication in restart I/O */
MPI_Datatype filetype; /* MPI Datatype for file view in restart I/O */
} parallel_data;
/* We use here fixed grid spacing */
#define DX 0.01
#define DY 0.01
/* file name for restart checkpoints*/
#define CHECKPOINT "HEAT_RESTART.dat"
/* Inline function for indexing the 2D arrays */
static inline int idx(int i, int j, int width)
{
return i * width + j;
}
/* Function prototypes */
double *malloc_2d(int nx, int ny);
void free_2d(double *array);
void set_field_dimensions(field *temperature, int nx, int ny,
parallel_data *parallel);
void parallel_setup(parallel_data *parallel, int nx, int ny);
void initialize(int argc, char *argv[], field *temperature1,
field *temperature2, int *nsteps, parallel_data *parallel,
int *iter0);
void generate_field(field *temperature, parallel_data *parallel);
void exchange_init(field *temperature, parallel_data *parallel);
void exchange_finalize(parallel_data *parallel);
void evolve_interior(field *curr, field *prev, double a, double dt);
void evolve_edges(field *curr, field *prev, double a, double dt);
void write_field(field *temperature, int iter, parallel_data *parallel);
void read_field(field *temperature1, field *temperature2,
char *filename, parallel_data *parallel);
void write_restart(field *temperature, parallel_data *parallel, int iter);
void read_restart(field *temperature, parallel_data *parallel, int *iter);
void copy_field(field *temperature1, field *temperature2);
void swap_fields(field *temperature1, field *temperature2);
void allocate_field(field *temperature);
void deallocate_field(field *temperature);
void finalize(field *temperature1, field *temperature2,
parallel_data *parallel);
#endif /* __HEAT_H__ */
/* I/O related functions for heat equation solver */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <mpi.h>
#include "heat.h"
#include "pngwriter.h"
/* Output routine that prints out a picture of the temperature
* distribution. */
void write_field(field *temperature, int iter, parallel_data *parallel)
{
char filename[64];
/* The actual write routine takes only the actual data
* (without ghost layers) so we need array for that. */
int height, width;
double *full_data;
int coords[2];
int ix, jy;
int i, p;
height = temperature->nx_full;
width = temperature->ny_full;
if (parallel->rank == 0) {
/* Copy the inner data */
full_data = malloc_2d(height, width);
for (i = 0; i < temperature->nx; i++)
memcpy(&full_data[idx(i, 0, width)],
&temperature->data[idx(i+1, 1, temperature->ny + 2)],
temperature->ny * sizeof(double));
/* Receive data from other ranks */
for (p = 1; p < parallel->size; p++) {
MPI_Cart_coords(parallel->comm, p, 2, coords);
ix = coords[0] * temperature->nx;
jy = coords[1] * temperature->ny;
MPI_Recv(&full_data[idx(ix, jy, width)], 1,
parallel->subarraytype, p, 22,
parallel->comm, MPI_STATUS_IGNORE);
}
/* Write out the data to a png file */
sprintf(filename, "%s_%04d.png", "heat", iter);
save_png(full_data, height, width, filename, 'c');
free_2d(full_data);
} else {
/* Send data */
MPI_Ssend(temperature->data, 1,
parallel->subarraytype, 0,
22, parallel->comm);
}
}
/* Read the initial temperature distribution from a file and
* initialize the temperature fields temperature1 and
* temperature2 to the same initial state. */
void read_field(field *temperature1, field *temperature2, char *filename,
parallel_data *parallel)
{
FILE *fp;
int nx, ny, i, j;
double *full_data;
int coords[2];
int ix, jy, p;
int count;
fp = fopen(filename, "r");
/* Read the header */
count = fscanf(fp, "# %d %d \n", &nx, &ny);
if (count < 2) {
fprintf(stderr, "Error while reading the input file!\n");
MPI_Abort(MPI_COMM_WORLD, -1);
}
parallel_setup(parallel, nx, ny);
set_field_dimensions(temperature1, nx, ny, parallel);
set_field_dimensions(temperature2, nx, ny, parallel);
/* Allocate arrays (including ghost layers) */
temperature1->data =
malloc_2d(temperature1->nx + 2, temperature1->ny + 2);
temperature2->data =
malloc_2d(temperature2->nx + 2, temperature2->ny + 2);
if (parallel->rank == 0) {
/* Full array */
full_data = malloc_2d(nx, ny);
/* Read the actual data */
for (i = 0; i < nx; i++) {
for (j = 0; j < ny; j++) {
count = fscanf(fp, "%lf", &full_data[idx(i, j, ny)]);
}
}
/* Copy to own local array */
for (i = 0; i < temperature1->nx; i++) {
memcpy(&temperature1->data[idx(i+1, 1, temperature1->ny + 2)],
&full_data[idx(i, 0, ny)], temperature1->ny * sizeof(double));
}
/* Send to other processes */
for (p = 1; p < parallel->size; p++) {
MPI_Cart_coords(parallel->comm, p, 2, coords);
ix = coords[0] * temperature1->nx;
jy = coords[1] * temperature1->ny;
MPI_Send(&full_data[idx(ix, jy, ny)], 1, parallel->subarraytype,
p, 44, parallel->comm);
}
} else {
/* Receive data */
MPI_Recv(temperature1->data, 1,
parallel->subarraytype, 0,
44, parallel->comm, MPI_STATUS_IGNORE);
}
/* Set the boundary values */
for (i = 0; i < temperature1->nx + 1; i++) {
temperature1->data[idx(i, 0, temperature1->ny + 2)] =
temperature1->data[idx(i, 1, temperature1->ny + 2)];
temperature1->data[idx(i, temperature1->ny + 1, temperature1->ny + 2)] =
temperature1->data[idx(i, temperature1->ny, temperature1->ny + 2)];
}
for (j = 0; j < temperature1->ny + 2; j++) {
temperature1->data[idx(0, j, temperature1->ny + 2)] =
temperature1->data[idx(1, j, temperature1->ny + 2)];
temperature1->data[idx(temperature1->nx + 1, j, temperature1->ny + 2)] =
temperature1->data[idx(temperature1->nx, j, temperature1->ny + 2)];
}
copy_field(temperature1, temperature2);
if (parallel->rank == 0) {
free_2d(full_data);
}
fclose(fp);
}
/* Write a restart checkpoint that contains field dimensions, current
* iteration number and temperature field. */
void write_restart(field *temperature, parallel_data *parallel, int iter)
{
MPI_File fp;
MPI_Offset disp;
// open the file and write the dimensions
MPI_File_open(MPI_COMM_WORLD, CHECKPOINT,
MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fp);
if (parallel->rank == 0) {
MPI_File_write(fp, &temperature->nx_full, 1, MPI_INT,
MPI_STATUS_IGNORE);
MPI_File_write(fp, &temperature->ny_full, 1, MPI_INT,
MPI_STATUS_IGNORE);
MPI_File_write(fp, &iter, 1, MPI_INT, MPI_STATUS_IGNORE);
}
disp = 3 * sizeof(int);
MPI_File_set_view(fp, 0, MPI_DOUBLE, parallel->filetype, "native",
MPI_INFO_NULL);
MPI_File_write_at_all(fp, disp, temperature->data,
1, parallel->restarttype, MPI_STATUS_IGNORE);
MPI_File_close(&fp);
}
/* Read a restart checkpoint that contains field dimensions, current
* iteration number and temperature field. */
void read_restart(field *temperature, parallel_data *parallel, int *iter)
{
MPI_File fp;
MPI_Offset disp;
int nx, ny;
// open the file and write the dimensions
MPI_File_open(MPI_COMM_WORLD, CHECKPOINT, MPI_MODE_RDONLY,
MPI_INFO_NULL, &fp);
// read grid size and current iteration
MPI_File_read_all(fp, &nx, 1, MPI_INT, MPI_STATUS_IGNORE);
MPI_File_read_all(fp, &ny, 1, MPI_INT, MPI_STATUS_IGNORE);
MPI_File_read_all(fp, iter, 1, MPI_INT, MPI_STATUS_IGNORE);
// set correct dimensions to MPI metadata
parallel_setup(parallel, nx, ny);
// set local dimensions and allocate memory for the data
set_field_dimensions(temperature, nx, ny, parallel);
allocate_field(temperature);
disp = 3 * sizeof(int);
MPI_File_set_view(fp, 0, MPI_DOUBLE, parallel->filetype, "native",
MPI_INFO_NULL);
MPI_File_read_at_all(fp, disp, temperature->data,
1, parallel->restarttype, MPI_STATUS_IGNORE);
MPI_File_close(&fp);
}
/* Heat equation solver in 2D. */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <mpi.h>
#include "heat.h"
int main(int argc, char **argv)
{
double a = 0.5; //!< Diffusion constant
field current, previous; //!< Current and previous temperature fields
double dt; //!< Time step
int nsteps; //!< Number of time steps
int image_interval = 500; //!< Image output interval
int restart_interval = 200; //!< Checkpoint output interval
parallel_data parallelization; //!< Parallelization info
int iter, iter0; //!< Iteration counter
double dx2, dy2; //!< delta x and y squared
double start_clock; //!< Time stamps
MPI_Init(&argc, &argv);
initialize(argc, argv, &current, &previous, &nsteps,
&parallelization, &iter0);
/* Output the initial field */
write_field(&current, iter0, &parallelization);
iter0++;
/* Largest stable time step */
dx2 = current.dx * current.dx;
dy2 = current.dy * current.dy;
dt = dx2 * dy2 / (2.0 * a * (dx2 + dy2));
/* Get the start time stamp */
start_clock = MPI_Wtime();
/* Time evolve */
for (iter = iter0; iter < iter0 + nsteps; iter++) {
exchange_init(&previous, &parallelization);
evolve_interior(&current, &previous, a, dt);
exchange_finalize(&parallelization);
evolve_edges(&current, &previous, a, dt);
if (iter % image_interval == 0) {
write_field(&current, iter, &parallelization);
}
/* write a checkpoint now and then for easy restarting */
if (iter % restart_interval == 0) {
write_restart(&current, &parallelization, iter);
}
/* Swap current field so that it will be used
as previous for next iteration step */
swap_fields(&current, &previous);
}
/* Determine the CPU time used for the iteration */
if (parallelization.rank == 0) {
printf("Iteration took %.3f seconds.\n", (MPI_Wtime() - start_clock));
printf("Reference value at 5,5: %f\n",
previous.data[idx(5, 5, current.ny + 2)]);
}
write_field(&current, iter, &parallelization);
finalize(&current, &previous, &parallelization);
MPI_Finalize();
return 0;
}
#include <png.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include "pngwriter.h"
/* Datatype for RGB pixel */
typedef struct {
uint8_t red;
uint8_t green;
uint8_t blue;
} pixel_t;
void cmap(double value, const double scaling, const double maxval,
pixel_t *pix);
static int heat_colormap[256][3] = {
{59, 76, 192}, {59, 76, 192}, {60, 78, 194}, {61, 80, 195},
{62, 81, 197}, {64, 83, 198}, {65, 85, 200}, {66, 87, 201},
{67, 88, 203}, {68, 90, 204}, {69, 92, 206}, {71, 93, 207},
{72, 95, 209}, {73, 97, 210}, {74, 99, 211}, {75, 100, 213},
{77, 102, 214}, {78, 104, 215}, {79, 105, 217}, {80, 107, 218},
{82, 109, 219}, {83, 110, 221}, {84, 112, 222}, {85, 114, 223},
{87, 115, 224}, {88, 117, 225}, {89, 119, 227}, {90, 120, 228},
{92, 122, 229}, {93, 124, 230}, {94, 125, 231}, {96, 127, 232},
{97, 129, 233}, {98, 130, 234}, {100, 132, 235}, {101, 133, 236},
{102, 135