Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Jussi Enkovaara
MPI
Commits
7c82d98b
Commit
7c82d98b
authored
Jul 02, 2018
by
Jussi Enkovaara
Browse files
Python version of 2D heat equation solver
parent
137ef070
Changes
9
Hide whitespace changes
Inline
Side-by-side
heat-equation/python/bottle.dat
0 → 100644
View file @
7c82d98b
This source diff could not be displayed because it is too large. You can
view the blob
instead.
heat-equation/python/evolve.py
0 → 100644
View file @
7c82d98b
import
numpy
as
np
# Time evolution for the inner part of the grid
def
evolve_inner
(
u
,
u_previous
,
a
,
dt
,
dx2
,
dy2
):
"""Explicit time evolution.
u: new temperature field
u_previous: previous field
a: diffusion constant
dt: time step
dx2: grid spacing squared, i.e. dx^2
dy2: -- "" -- , i.e. dy^2"""
u
[
2
:
-
2
,
2
:
-
2
]
=
u_previous
[
2
:
-
2
,
2
:
-
2
]
+
a
*
dt
*
(
\
(
u_previous
[
3
:
-
1
,
2
:
-
2
]
-
2
*
u_previous
[
2
:
-
2
,
2
:
-
2
]
+
\
u_previous
[
1
:
-
3
,
2
:
-
2
])
/
dx2
+
\
(
u_previous
[
2
:
-
2
,
3
:
-
1
]
-
2
*
u_previous
[
2
:
-
2
,
2
:
-
2
]
+
\
u_previous
[
2
:
-
2
,
1
:
-
3
])
/
dy2
)
# Time evolution for the edges of the grid
def
evolve_edges
(
u
,
u_previous
,
a
,
dt
,
dx2
,
dy2
):
"""Explicit time evolution.
u: new temperature field
u_previous: previous field
a: diffusion constant
dt: time step
dx2: grid spacing squared, i.e. dx^2
dy2: -- "" -- , i.e. dy^2"""
u
[
1
,
1
:
-
1
]
=
u_previous
[
1
,
1
:
-
1
]
+
a
*
dt
*
(
\
(
u_previous
[
2
,
1
:
-
1
]
-
2
*
u_previous
[
1
,
1
:
-
1
]
+
\
u_previous
[
0
,
1
:
-
1
])
/
dx2
+
\
(
u_previous
[
1
,
2
:]
-
2
*
u_previous
[
1
,
1
:
-
1
]
+
\
u_previous
[
1
,
:
-
2
])
/
dy2
)
u
[
-
2
,
1
:
-
1
]
=
u_previous
[
-
2
,
1
:
-
1
]
+
a
*
dt
*
(
\
(
u_previous
[
-
1
,
1
:
-
1
]
-
2
*
u_previous
[
-
2
,
1
:
-
1
]
+
\
u_previous
[
-
3
,
1
:
-
1
])
/
dx2
+
\
(
u_previous
[
-
2
,
2
:]
-
2
*
u_previous
[
-
2
,
1
:
-
1
]
+
\
u_previous
[
-
2
,
:
-
2
])
/
dy2
)
u
[
1
:
-
1
,
1
]
=
u_previous
[
1
:
-
1
,
1
]
+
a
*
dt
*
(
\
(
u_previous
[
2
:,
1
]
-
2
*
u_previous
[
1
:
-
1
,
1
]
+
\
u_previous
[:
-
2
,
1
])
/
dx2
+
\
(
u_previous
[
1
:
-
1
,
2
]
-
2
*
u_previous
[
1
:
-
1
,
1
]
+
\
u_previous
[
1
:
-
1
,
0
])
/
dy2
)
u
[
1
:
-
1
,
-
2
]
=
u_previous
[
1
:
-
1
,
-
2
]
+
a
*
dt
*
(
\
(
u_previous
[
2
:,
-
2
]
-
2
*
u_previous
[
1
:
-
1
,
-
2
]
+
\
u_previous
[:
-
2
,
-
2
])
/
dx2
+
\
(
u_previous
[
1
:
-
1
,
-
1
]
-
2
*
u_previous
[
1
:
-
1
,
-
2
]
+
\
u_previous
[
1
:
-
1
,
-
3
])
/
dy2
)
heat-equation/python/evolve.pyx
0 → 100644
View file @
7c82d98b
# cython: profile=True
import
numpy
as
np
cimport
numpy
as
cnp
import
cython
# Time evolution for the inner part of the grid
@
cython
.
boundscheck
(
False
)
@
cython
.
wraparound
(
False
)
@
cython
.
cdivision
(
True
)
cpdef
evolve_inner
(
cnp
.
ndarray
[
cnp
.
double_t
,
ndim
=
2
]
u
,
cnp
.
ndarray
[
cnp
.
double_t
,
ndim
=
2
]
u_previous
,
double
a
,
double
dt
,
double
dx2
,
double
dy2
):
"""Explicit time evolution.
u: new temperature field
u_previous: previous field
a: diffusion constant
dt: time step. """
cdef
int
nx
=
u
.
shape
[
0
]
-
2
cdef
int
ny
=
u
.
shape
[
1
]
-
2
cdef
int
i
,
j
# Multiplication is more efficient than division
cdef
double
dx2inv
=
1.
/
dx2
cdef
double
dy2inv
=
1.
/
dy2
for
i
in
range
(
2
,
nx
):
for
j
in
range
(
2
,
ny
):
u
[
i
,
j
]
=
u_previous
[
i
,
j
]
+
a
*
dt
*
(
\
(
u_previous
[
i
+
1
,
j
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
-
1
,
j
])
*
dx2inv
+
\
(
u_previous
[
i
,
j
+
1
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
,
j
-
1
])
*
dy2inv
)
# Time evolution for the edges of the grid
@
cython
.
boundscheck
(
False
)
@
cython
.
wraparound
(
False
)
@
cython
.
cdivision
(
True
)
cpdef
evolve_edges
(
cnp
.
ndarray
[
cnp
.
double_t
,
ndim
=
2
]
u
,
cnp
.
ndarray
[
cnp
.
double_t
,
ndim
=
2
]
u_previous
,
double
a
,
double
dt
,
double
dx2
,
double
dy2
):
"""Explicit time evolution.
u: new temperature field
u_previous: previous field
a: diffusion constant
dt: time step
dx2: grid spacing squared, i.e. dx^2
dy2: -- "" -- , i.e. dy^2"""
cdef
int
nx
=
u
.
shape
[
0
]
-
2
cdef
int
ny
=
u
.
shape
[
1
]
-
2
cdef
int
i
,
j
# Multiplication is more efficient than division
cdef
double
dx2inv
=
1.
/
dx2
cdef
double
dy2inv
=
1.
/
dy2
j
=
1
for
i
in
range
(
1
,
nx
+
1
):
u
[
i
,
j
]
=
u_previous
[
i
,
j
]
+
a
*
dt
*
(
\
(
u_previous
[
i
+
1
,
j
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
-
1
,
j
])
*
dx2inv
+
\
(
u_previous
[
i
,
j
+
1
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
,
j
-
1
])
*
dy2inv
)
j
=
ny
for
i
in
range
(
1
,
nx
+
1
):
u
[
i
,
j
]
=
u_previous
[
i
,
j
]
+
a
*
dt
*
(
\
(
u_previous
[
i
+
1
,
j
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
-
1
,
j
])
*
dx2inv
+
\
(
u_previous
[
i
,
j
+
1
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
,
j
-
1
])
*
dy2inv
)
i
=
1
for
j
in
range
(
1
,
ny
+
1
):
u
[
i
,
j
]
=
u_previous
[
i
,
j
]
+
a
*
dt
*
(
\
(
u_previous
[
i
+
1
,
j
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
-
1
,
j
])
*
dx2inv
+
\
(
u_previous
[
i
,
j
+
1
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
,
j
-
1
])
*
dy2inv
)
i
=
nx
for
j
in
range
(
1
,
ny
+
1
):
u
[
i
,
j
]
=
u_previous
[
i
,
j
]
+
a
*
dt
*
(
\
(
u_previous
[
i
+
1
,
j
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
-
1
,
j
])
*
dx2inv
+
\
(
u_previous
[
i
,
j
+
1
]
-
2
*
u_previous
[
i
,
j
]
+
\
u_previous
[
i
,
j
-
1
])
*
dy2inv
)
heat-equation/python/exchange.py
0 → 100644
View file @
7c82d98b
from
mpi4py.MPI
import
Request
import
numpy
as
np
# Time evolution for the inner part of the grid
def
exchange_init
(
u
,
parallel
):
# Send to the up, receive from down
parallel
.
requests
[
0
]
=
parallel
.
comm
.
Isend
((
u
[
1
,:],
1
,
parallel
.
rowtype
),
dest
=
parallel
.
nup
)
parallel
.
requests
[
1
]
=
parallel
.
comm
.
Irecv
((
u
[
-
1
,:],
1
,
parallel
.
rowtype
),
source
=
parallel
.
ndown
)
# Send to the down, receive from up
parallel
.
requests
[
2
]
=
parallel
.
comm
.
Isend
((
u
[
-
2
,:],
1
,
parallel
.
rowtype
),
dest
=
parallel
.
ndown
)
parallel
.
requests
[
3
]
=
parallel
.
comm
.
Irecv
((
u
[
0
,:],
1
,
parallel
.
rowtype
),
source
=
parallel
.
nup
)
# Send to the left, receive from right
parallel
.
requests
[
4
]
=
parallel
.
comm
.
Isend
((
u
.
ravel
()[
1
:],
1
,
parallel
.
columntype
),
dest
=
parallel
.
nleft
)
idx
=
u
.
shape
[
1
]
-
1
# ny + 1
parallel
.
requests
[
5
]
=
parallel
.
comm
.
Irecv
((
u
.
ravel
()[
idx
:],
1
,
parallel
.
columntype
),
source
=
parallel
.
nright
)
# Send to the right, receive from left
idx
=
u
.
shape
[
1
]
-
2
# ny
parallel
.
requests
[
6
]
=
parallel
.
comm
.
Isend
((
u
.
ravel
()[
idx
:],
1
,
parallel
.
columntype
),
dest
=
parallel
.
nright
)
parallel
.
requests
[
7
]
=
parallel
.
comm
.
Irecv
((
u
,
1
,
parallel
.
columntype
),
source
=
parallel
.
nleft
)
def
exchange_finalize
(
parallel
):
# MPI.Request.Waitall(parallel.requests)
Request
.
Waitall
(
parallel
.
requests
)
heat-equation/python/heat.py
0 → 100644
View file @
7c82d98b
from
__future__
import
print_function
import
numpy
as
np
import
time
from
heat_setup
import
initialize
from
evolve
import
evolve_inner
,
evolve_edges
from
exchange
import
exchange_init
,
exchange_finalize
from
heat_io
import
write_field
,
write_restart
# Basic parameters
a
=
0.5
# Diffusion constant
image_interval
=
100
# Write frequency for png files
restart_interval
=
200
# Write frequency for restart files
# Grid spacings
dx
=
0.01
dy
=
0.01
dx2
=
dx
**
2
dy2
=
dy
**
2
# For stability, this is the largest interval possible
# for the size of the time-step:
dt
=
dx2
*
dy2
/
(
2
*
a
*
(
dx2
+
dy2
)
)
def
main
():
# Initialize
field
,
field0
,
parallel
,
iter0
,
timesteps
=
initialize
()
write_field
(
field
,
parallel
,
iter0
)
t0
=
time
.
time
()
for
iter
in
range
(
iter0
,
iter0
+
timesteps
):
exchange_init
(
field0
,
parallel
)
evolve_inner
(
field
,
field0
,
a
,
dt
,
dx2
,
dy2
)
exchange_finalize
(
parallel
)
evolve_edges
(
field
,
field0
,
a
,
dt
,
dx2
,
dy2
)
if
iter
%
image_interval
==
0
:
write_field
(
field
,
parallel
,
iter
)
if
iter
%
restart_interval
==
0
:
write_restart
(
field
,
parallel
,
iter
)
field
,
field0
=
field0
,
field
t1
=
time
.
time
()
if
parallel
.
rank
==
0
:
print
(
"Running time: {0}"
.
format
(
t1
-
t0
))
if
__name__
==
'__main__'
:
main
()
heat-equation/python/heat_io.py
0 → 100644
View file @
7c82d98b
from
__future__
import
print_function
import
numpy
as
np
from
mpi4py
import
MPI
from
parallel
import
Parallel
try
:
import
h5py
use_hdf5
=
True
except
ImportError
:
use_hdf5
=
False
import
matplotlib
matplotlib
.
use
(
'Agg'
)
import
matplotlib.pyplot
as
plt
# Set the colormap
plt
.
rcParams
[
'image.cmap'
]
=
'BrBG'
def
write_field
(
field
,
parallel
,
step
):
nx
,
ny
=
[
i
-
2
for
i
in
field
.
shape
]
nx_full
=
parallel
.
dims
[
0
]
*
nx
ny_full
=
parallel
.
dims
[
1
]
*
ny
if
parallel
.
rank
==
0
:
field_full
=
np
.
zeros
((
nx_full
,
ny_full
),
float
)
field_full
[:
nx
,
:
ny
]
=
field
[
1
:
-
1
,
1
:
-
1
]
# Receive data from other ranks
for
p
in
range
(
1
,
parallel
.
size
):
coords
=
parallel
.
comm
.
Get_coords
(
p
)
idx
=
coords
[
0
]
*
nx
*
ny_full
+
coords
[
1
]
*
ny
parallel
.
comm
.
Recv
((
field_full
.
ravel
()[
idx
:],
1
,
parallel
.
subarraytype
),
source
=
p
)
# Write out the data
plt
.
figure
()
plt
.
gca
().
clear
()
plt
.
imshow
(
field_full
)
plt
.
axis
(
'off'
)
plt
.
savefig
(
'heat_{0:04d}.png'
.
format
(
step
))
else
:
# Send the data
parallel
.
comm
.
Ssend
((
field
,
1
,
parallel
.
subarraytype
),
dest
=
0
)
def
read_field
(
filename
):
# Read the initial temperature field from file
rank
=
MPI
.
COMM_WORLD
.
Get_rank
()
if
rank
==
0
:
with
open
(
filename
)
as
f
:
line
=
f
.
readline
().
split
()
shape
=
[
int
(
n
)
for
n
in
line
[
1
:
3
]]
else
:
shape
=
None
nx
,
ny
=
MPI
.
COMM_WORLD
.
bcast
(
shape
,
root
=
0
)
parallel
=
Parallel
(
nx
,
ny
)
nx_local
=
nx
/
parallel
.
dims
[
0
]
ny_local
=
ny
/
parallel
.
dims
[
1
]
field
=
np
.
zeros
((
nx_local
+
2
,
ny_local
+
2
),
dtype
=
float
)
if
parallel
.
rank
==
0
:
field_full
=
np
.
loadtxt
(
filename
)
field
[
1
:
-
1
,
1
:
-
1
]
=
field_full
[:
nx_local
,
:
ny_local
]
# Send the data to other ranks
for
p
in
range
(
1
,
parallel
.
size
):
coords
=
parallel
.
comm
.
Get_coords
(
p
)
idx
=
coords
[
0
]
*
nx_local
*
ny
+
coords
[
1
]
*
ny_local
parallel
.
comm
.
Send
((
field_full
.
ravel
()[
idx
:],
1
,
parallel
.
subarraytype
),
dest
=
p
)
else
:
parallel
.
comm
.
Recv
((
field
,
1
,
parallel
.
subarraytype
),
source
=
0
)
# fo = np.empty((nx_local, ny_local), dtype=float)
# parallel.comm.Recv(fo, source=0)
# import pickle
# pickle.dump(fo, open('fo_f{0}.pckl'.format(parallel.rank), 'w'))
# Set the boundary values
field
[
0
,
:]
=
field
[
1
,
:]
field
[
-
1
,
:]
=
field
[
-
2
,
:]
field
[:,
0
]
=
field
[:,
1
]
field
[:,
-
1
]
=
field
[:,
-
2
]
field0
=
field
.
copy
()
return
field
,
field0
,
parallel
def
write_restart
(
field
,
parallel
,
iter
):
nx
,
ny
=
[
i
-
2
for
i
in
field
.
shape
]
nx_full
=
parallel
.
dims
[
0
]
*
nx
ny_full
=
parallel
.
dims
[
1
]
*
ny
mode
=
MPI
.
MODE_CREATE
|
MPI
.
MODE_WRONLY
fh
=
MPI
.
File
.
Open
(
parallel
.
comm
,
"HEAT_RESTART.dat"
,
mode
)
if
parallel
.
rank
==
0
:
fh
.
Write
(
np
.
array
((
nx_full
,
ny_full
,
iter
)))
disp
=
3
*
8
# Python ints are 8 bytes
fh
.
Set_view
(
disp
,
MPI
.
DOUBLE
,
parallel
.
filetype
)
fh
.
Write_all
((
field
,
1
,
parallel
.
restarttype
))
fh
.
Close
()
def
read_restart
():
mode
=
MPI
.
MODE_RDONLY
fh
=
MPI
.
File
.
Open
(
MPI
.
COMM_WORLD
,
"HEAT_RESTART.dat"
,
mode
)
buf
=
np
.
zeros
(
3
,
int
)
fh
.
Read_all
(
buf
)
nx
,
ny
,
iter
=
buf
[
0
],
buf
[
1
],
buf
[
2
]
parallel
=
Parallel
(
nx
,
ny
)
nx_local
=
nx
/
parallel
.
dims
[
0
]
ny_local
=
ny
/
parallel
.
dims
[
1
]
if
parallel
.
rank
==
0
:
print
(
"Restarting from iteration "
,
iter
)
field
=
np
.
zeros
((
nx_local
+
2
,
ny_local
+
2
),
dtype
=
float
)
disp
=
3
*
8
# Python ints are 8 bytes
fh
.
Set_view
(
disp
,
MPI
.
DOUBLE
,
parallel
.
filetype
)
fh
.
Read_all
((
field
,
1
,
parallel
.
restarttype
))
fh
.
Close
()
field0
=
field
.
copy
()
return
field
,
field0
,
parallel
,
iter
heat-equation/python/heat_setup.py
0 → 100644
View file @
7c82d98b
import
numpy
as
np
import
sys
from
os.path
import
isfile
from
parallel
import
Parallel
from
heat_io
import
read_restart
,
read_field
def
initialize
():
timesteps
=
500
# Number of time-steps to evolve system
nx
=
1000
ny
=
1000
if
isfile
(
'HEAT_RESTART.dat'
):
field
,
field0
,
parallel
,
iter0
=
read_restart
()
else
:
if
len
(
sys
.
argv
)
==
2
:
filename
=
sys
.
argv
[
1
]
field
,
field0
,
parallel
=
read_field
(
filename
)
elif
len
(
sys
.
argv
)
==
3
:
filename
=
sys
.
argv
[
1
]
timesteps
=
int
(
sys
.
argv
[
2
])
field
,
field0
,
parallel
=
read_field
(
filename
)
elif
len
(
sys
.
argv
)
==
4
:
nx
=
int
(
sys
.
argv
[
1
])
ny
=
int
(
sys
.
argv
[
2
])
timesteps
=
int
(
sys
.
argv
[
3
])
field
,
field0
,
parallel
=
generate_field
(
nx
,
ny
)
else
:
field
,
field0
,
parallel
=
generate_field
(
nx
,
ny
)
iter0
=
0
return
field
,
field0
,
parallel
,
iter0
,
timesteps
def
generate_field
(
nx
,
ny
):
parallel
=
Parallel
(
nx
,
ny
)
nx_local
=
nx
/
parallel
.
dims
[
0
]
ny_local
=
ny
/
parallel
.
dims
[
1
]
field
=
np
.
zeros
((
nx_local
+
2
,
ny_local
+
2
),
dtype
=
float
)
# Square of the disk radius
radius2
=
(
nx
/
6.0
)
**
2
X
,
Y
=
np
.
meshgrid
(
np
.
arange
(
nx_local
+
2
),
np
.
arange
(
ny_local
+
2
),
indexing
=
'ij'
)
ds2
=
(
X
+
parallel
.
coords
[
0
]
*
nx_local
-
nx
/
2.0
)
**
2
+
\
(
Y
+
parallel
.
coords
[
1
]
*
ny_local
-
ny
/
2.0
)
**
2
field
[:]
=
np
.
where
(
ds2
<
radius2
,
5.0
,
65.0
)
# Boundaries
if
parallel
.
coords
[
0
]
==
0
:
field
[
0
,:]
=
85.0
if
parallel
.
coords
[
0
]
==
parallel
.
dims
[
0
]
-
1
:
field
[
-
1
,:]
=
5.0
if
parallel
.
coords
[
1
]
==
0
:
field
[:,
0
]
=
20.0
if
parallel
.
coords
[
1
]
==
parallel
.
dims
[
1
]
-
1
:
field
[:,
-
1
]
=
70.0
field0
=
field
.
copy
()
return
field
,
field0
,
parallel
heat-equation/python/parallel.py
0 → 100644
View file @
7c82d98b
from
mpi4py
import
MPI
class
Parallel
(
object
):
def
__init__
(
self
,
nx
,
ny
):
self
.
requests
=
[]
world
=
MPI
.
COMM_WORLD
world_size
=
world
.
Get_size
()
self
.
dims
=
MPI
.
Compute_dims
(
world_size
,
(
0
,
0
))
nx_local
=
nx
/
self
.
dims
[
0
]
ny_local
=
ny
/
self
.
dims
[
1
]
if
nx_local
*
self
.
dims
[
0
]
!=
nx
:
print
(
"Cannot divide grid evenly to processors in x-direction!"
)
print
(
"{0} x {1} != {2}"
.
format
(
nx_local
,
self
.
dims
[
0
],
nx
))
raise
RuntimeError
(
'Invalid domain decomposition'
)
if
ny_local
*
self
.
dims
[
1
]
!=
ny
:
print
(
"Cannot divide grid evenly to processors in y-direction!"
)
print
(
"{0} x {1} != {2}"
.
format
(
ny_local
,
self
.
dims
[
1
],
ny
))
raise
RuntimeError
(
'Invalid domain decomposition'
)
self
.
comm
=
world
.
Create_cart
(
self
.
dims
,
periods
=
None
,
reorder
=
True
)
self
.
nup
,
self
.
ndown
=
self
.
comm
.
Shift
(
0
,
1
)
self
.
nleft
,
self
.
nright
=
self
.
comm
.
Shift
(
1
,
1
)
self
.
size
=
self
.
comm
.
Get_size
()
self
.
rank
=
self
.
comm
.
Get_rank
()
self
.
coords
=
self
.
comm
.
coords
if
self
.
rank
==
0
:
print
(
"Using domain decomposition {0} x {1}"
.
format
(
self
.
dims
[
0
],
self
.
dims
[
1
]))
print
(
"Local domain size {0} x {1}"
.
format
(
nx_local
,
ny_local
))
# Maybe there is more elegant way to initialize the requests array?
self
.
requests
=
[
0
,
]
*
8
# Datatypes for halo exchange
self
.
rowtype
=
MPI
.
DOUBLE
.
Create_contiguous
(
ny_local
+
2
)
self
.
rowtype
.
Commit
()
self
.
columntype
=
MPI
.
DOUBLE
.
Create_vector
(
nx_local
+
2
,
1
,
ny_local
+
2
)
self
.
columntype
.
Commit
()
# Datatypes for subblock needed in text I/O
# Rank 0 uses datatype for receiving data into full array while
# other ranks use datatype for sending the inner part of array
subsizes
=
[
nx_local
,
ny_local
]
if
self
.
rank
==
0
:
sizes
=
[
nx
,
ny
]
offsets
=
[
0
,
0
]
else
:
sizes
=
[
nx_local
+
2
,
ny_local
+
2
]
offsets
=
[
1
,
1
]
self
.
subarraytype
=
MPI
.
DOUBLE
.
Create_subarray
(
sizes
,
subsizes
,
offsets
)
self
.
subarraytype
.
Commit
()
# Datatypes for restart I/O
sizes
=
[
nx
+
2
,
ny
+
2
]
offsets
=
[
1
+
self
.
coords
[
0
]
*
nx_local
,
1
+
self
.
coords
[
1
]
*
ny_local
]
if
self
.
coords
[
0
]
==
0
:
offsets
[
0
]
-=
1
subsizes
[
0
]
+=
1
if
self
.
coords
[
0
]
==
(
self
.
dims
[
0
]
-
1
):
subsizes
[
0
]
+=
1
if
self
.
coords
[
1
]
==
0
:
offsets
[
1
]
-=
1
subsizes
[
1
]
+=
1
if
self
.
coords
[
1
]
==
(
self
.
dims
[
1
]
-
1
):
subsizes
[
1
]
+=
1
self
.
filetype
=
MPI
.
DOUBLE
.
Create_subarray
(
sizes
,
subsizes
,
offsets
)
self
.
filetype
.
Commit
()
sizes
=
[
nx_local
+
2
,
ny_local
+
2
]
offsets
=
[
1
,
1
]
if
self
.
coords
[
0
]
==
0
:
offsets
[
0
]
=
0
if
self
.
coords
[
1
]
==
0
:
offsets
[
1
]
=
0
self
.
restarttype
=
MPI
.
DOUBLE
.
Create_subarray
(
sizes
,
subsizes
,
offsets
)
self
.
restarttype
.
Commit
()
heat-equation/python/setup.py
0 → 100644
View file @
7c82d98b
from
distutils.core
import
setup
,
Extension
from
Cython.Build
import
cythonize
setup
(
ext_modules
=
cythonize
(
"evolve.pyx"
),
)
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment