Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
CodeVault
hpc-kernels
structured_grids
Commits
653d505d
Commit
653d505d
authored
Oct 08, 2016
by
Thomas Ponweiser
Browse files
renamed 'wireworld' to 'cellular_automaton'; added first C code variant
parent
98b3ac44
Changes
46
Hide whitespace changes
Inline
Side-by-side
cellular_automaton/c/mpitypes.c
0 → 100644
View file @
653d505d
#include
<stdlib.h>
#include
"mpitypes.h"
#include
"configuration.h"
void
mpitype_conf_init
(
MPI_Datatype
*
new_type
)
{
conf_t
dummy
;
int
i
;
// CONF_T_* constants defined in configuration.h
int
blocklengths
[]
=
{
CONF_T_N_INT_MEMBERS
,
CONF_T_N_LONG_MEMBERS
,
CONF_T_N_CHAR_MEMBERS
};
MPI_Datatype
types
[]
=
{
MPI_INT
,
MPI_LONG
,
MPI_CHAR
};
MPI_Aint
displacements
[
3
];
MPI_Aint
base
;
MPI_Get_address
(
&
dummy
,
&
base
);
MPI_Get_address
(
&
dummy
.
CONF_T_FIRST_INT_MEMBER
,
&
displacements
[
0
]);
MPI_Get_address
(
&
dummy
.
CONF_T_FIRST_LONG_MEMBER
,
&
displacements
[
1
]);
MPI_Get_address
(
&
dummy
.
CONF_T_FIRST_CHAR_MEMBER
,
&
displacements
[
2
]);
for
(
i
=
0
;
i
<
2
;
i
++
)
displacements
[
i
]
-=
base
;
MPI_Type_create_struct
(
2
,
blocklengths
,
displacements
,
types
,
new_type
);
MPI_Type_commit
(
new_type
);
}
void
mpitype_conf_free
(
MPI_Datatype
*
type
)
{
MPI_Type_free
(
type
);
}
cellular_automaton/c/mpitypes.h
0 → 100644
View file @
653d505d
#ifndef MPITYPES_H
#define MPITYPES_H
#include
<mpi.h>
void
mpitype_conf_init
(
MPI_Datatype
*
new_type
);
void
mpitype_conf_free
(
MPI_Datatype
*
type
);
#endif
cellular_automaton/c/simulation.c
0 → 100644
View file @
653d505d
#include
<mpi.h>
#include
"simulation.h"
#define ELECTRON_HEAD '@'
#define ELECTRON_TAIL '~'
#define WIRE '#'
void
wireworld_step
(
world_t
*
world
,
size_t
i_start
,
size_t
bx
,
size_t
by
);
void
do_simulation
(
world_t
*
world
,
size_t
n_generations
)
{
const
size_t
nx
=
world
->
local_size
[
0
];
const
size_t
ny
=
world
->
local_size
[
1
];
const
size_t
DOWN
=
nx
+
2
;
// (+2 ... for halo cells)
const
size_t
i_leftupper
=
1
+
DOWN
;
const
size_t
i_rightupper
=
nx
+
DOWN
;
const
size_t
i_leftlower
=
1
+
ny
*
DOWN
;
size_t
g
;
char
*
tmp
;
const
int
counts
[]
=
{
1
,
1
,
1
,
1
,
1
,
1
,
1
,
1
};
const
MPI_Aint
displs
[]
=
{
0
,
0
,
0
,
0
,
0
,
0
,
0
,
0
};
const
transfer_t
*
transfer
=
&
world
->
transfer
;
MPI_Request
request
;
for
(
g
=
0
;
g
<
n_generations
;
g
++
)
{
tmp
=
world
->
cells_prev
;
world
->
cells_prev
=
world
->
cells_next
;
world
->
cells_next
=
tmp
;
// Start halo exchange
MPI_Ineighbor_alltoallw
(
world
->
cells_prev
,
counts
,
displs
,
transfer
->
send_types
,
world
->
cells_prev
,
counts
,
displs
,
transfer
->
recv_types
,
transfer
->
graph_comm
,
&
request
);
// Compute inner region
wireworld_step
(
world
,
i_leftupper
+
1
+
DOWN
,
nx
-
2
,
ny
-
2
);
// Finish halo exchange
MPI_Wait
(
&
request
,
MPI_STATUS_IGNORE
);
// Compute boundary regions
wireworld_step
(
world
,
i_leftupper
,
nx
,
1
);
// upper
wireworld_step
(
world
,
i_leftlower
,
nx
,
1
);
// lower
wireworld_step
(
world
,
i_leftupper
+
DOWN
,
1
,
ny
-
2
);
// left
wireworld_step
(
world
,
i_rightupper
+
DOWN
,
1
,
ny
-
2
);
// right
/* Blocking variant:
MPI_Neighbor_alltoallw(
world->cells_prev, counts, displs, transfer->send_types,
world->cells_prev, counts, displs, transfer->recv_types,
transfer->graph_comm
);
wireworld_step(world, i_leftupper, nx, ny);
*/
}
}
void
wireworld_step
(
world_t
*
world
,
size_t
i_start
,
size_t
bx
,
size_t
by
)
{
const
size_t
L
=
-
1
,
R
=
1
;
const
size_t
D
=
world
->
local_size
[
0
]
+
2
;
// (+2 ... for halo cells)
const
size_t
U
=
-
D
;
size_t
x
,
y
,
i
;
int
nheads
;
char
*
prev
=
world
->
cells_prev
;
char
*
next
=
world
->
cells_next
;
for
(
y
=
0
;
y
<
by
;
y
++
)
{
i
=
i_start
;
for
(
x
=
0
;
x
<
bx
;
x
++
)
{
switch
(
prev
[
i
])
{
// Electron heads become electron tails.
case
ELECTRON_HEAD
:
next
[
i
]
=
ELECTRON_TAIL
;
break
;
// Electron tails become copper.
case
ELECTRON_TAIL
:
next
[
i
]
=
WIRE
;
break
;
// New electron head replacing copper,
// if 1 or 2 electron heads are in neighborhood.
case
WIRE
:
nheads
=
(
prev
[
i
+
L
+
U
]
==
ELECTRON_HEAD
)
+
(
prev
[
i
+
U
]
==
ELECTRON_HEAD
)
+
(
prev
[
i
+
R
+
U
]
==
ELECTRON_HEAD
)
+
(
prev
[
i
+
L
]
==
ELECTRON_HEAD
)
+
(
prev
[
i
+
R
]
==
ELECTRON_HEAD
)
+
(
prev
[
i
+
L
+
D
]
==
ELECTRON_HEAD
)
+
(
prev
[
i
+
D
]
==
ELECTRON_HEAD
)
+
(
prev
[
i
+
R
+
D
]
==
ELECTRON_HEAD
);
if
(
nheads
==
1
||
nheads
==
2
)
{
next
[
i
]
=
ELECTRON_HEAD
;
}
else
{
next
[
i
]
=
WIRE
;
}
break
;
default:
break
;
}
i
++
;
}
i_start
+=
D
;
}
}
cellular_automaton/c/simulation.h
0 → 100644
View file @
653d505d
#ifndef _AUTOMATON_H_
#define _AUTOMATON_H_
#include
"world.h"
void
do_simulation
(
world_t
*
world
,
size_t
n_generations
);
#endif
cellular_automaton/c/world.c
0 → 100644
View file @
653d505d
#include
<string.h>
#include
"world.h"
void
world_init_io_type
(
world_t
*
world
);
void
world_free_io_type
(
world_t
*
world
);
void
world_init_neighborhood
(
world_t
*
world
,
MPI_Comm
cart_comm
,
int
nprocs
[],
int
proc_coord
[]);
void
world_free_neighborhood
(
world_t
*
world
);
void
world_init
(
world_t
*
world
,
MPI_Comm
cart_comm
,
size_t
*
global_size
)
{
int
dim
,
lo
,
hi
;
int
nprocs
[
2
],
periods
[
2
],
proc_coord
[
2
];
char
*
buffer
;
size_t
storage_size
;
MPI_Cart_get
(
cart_comm
,
2
,
nprocs
,
periods
,
proc_coord
);
for
(
dim
=
0
;
dim
<
2
;
dim
++
)
{
lo
=
(
proc_coord
[
dim
]
+
0
)
*
global_size
[
dim
]
/
nprocs
[
dim
];
hi
=
(
proc_coord
[
dim
]
+
1
)
*
global_size
[
dim
]
/
nprocs
[
dim
];
world
->
global_size
[
dim
]
=
global_size
[
dim
];
world
->
local_size
[
dim
]
=
hi
-
lo
;
world
->
local_start
[
dim
]
=
lo
;
}
storage_size
=
world_get_storage_size
(
world
);
world
->
cells_prev
=
malloc
(
storage_size
);
world
->
cells_next
=
malloc
(
storage_size
);
memset
(
world
->
cells_prev
,
' '
,
storage_size
);
memset
(
world
->
cells_next
,
' '
,
storage_size
);
world_init_io_type
(
world
);
world_init_neighborhood
(
world
,
cart_comm
,
nprocs
,
proc_coord
);
}
void
world_free
(
world_t
*
world
)
{
world_free_io_type
(
world
);
world_free_neighborhood
(
world
);
free
(
world
->
cells_prev
);
free
(
world
->
cells_next
);
world
->
cells_prev
=
NULL
;
world
->
cells_next
=
NULL
;
}
void
world_init_io_type
(
world_t
*
world
)
{
const
int
nx
=
world
->
local_size
[
0
],
ny
=
world
->
local_size
[
1
];
const
int
sizes
[]
=
{
nx
+
2
,
ny
+
2
};
const
int
subsizes
[]
=
{
nx
,
ny
};
const
int
starts
[]
=
{
1
,
1
};
MPI_Type_create_subarray
(
2
,
sizes
,
subsizes
,
starts
,
MPI_ORDER_FORTRAN
,
MPI_CHAR
,
&
world
->
transfer
.
io_type
);
MPI_Type_commit
(
&
world
->
transfer
.
io_type
);
}
void
world_free_io_type
(
world_t
*
world
)
{
MPI_Type_free
(
&
world
->
transfer
.
io_type
);
}
void
world_init_neighborhood
(
world_t
*
world
,
MPI_Comm
cart_comm
,
int
nprocs
[],
int
proc_coord
[])
{
const
int
px
=
proc_coord
[
0
],
py
=
proc_coord
[
1
];
const
int
npx
=
nprocs
[
0
],
npy
=
nprocs
[
1
];
const
int
nx
=
world
->
local_size
[
0
],
ny
=
world
->
local_size
[
1
];
struct
halo_info_s
{
int
proc_coord
[
2
];
int
subsizes
[
2
];
int
send_starts
[
2
];
int
recv_starts
[
2
];
};
const
struct
halo_info_s
halo
[]
=
{
// Target Proc | Subsize | Send start | Recv start
{
{
px
-
1
,
py
-
1
},
{
1
,
1
},
{
1
,
1
},
{
0
,
0
}
},
// left upper
{
{
px
,
py
-
1
},
{
nx
,
1
},
{
1
,
1
},
{
1
,
0
}
},
// upper
{
{
px
+
1
,
py
-
1
},
{
1
,
1
},
{
nx
,
1
},
{
nx
+
1
,
0
}
},
// right upper
{
{
px
-
1
,
py
},
{
1
,
ny
},
{
1
,
1
},
{
0
,
1
}
},
// left
{
{
px
+
1
,
py
},
{
1
,
ny
},
{
nx
,
1
},
{
nx
+
1
,
1
}
},
// right
{
{
px
-
1
,
py
+
1
},
{
1
,
1
},
{
1
,
ny
},
{
0
,
ny
+
1
}
},
// left lower
{
{
px
,
py
+
1
},
{
nx
,
1
},
{
1
,
ny
},
{
1
,
ny
+
1
}
},
// lower
{
{
px
+
1
,
py
+
1
},
{
1
,
1
},
{
nx
,
ny
},
{
nx
+
1
,
ny
+
1
}
},
// right lower
};
size_t
i
,
n
;
const
int
sizes
[]
=
{
nx
+
2
,
ny
+
2
};
int
neighbor_ranks
[
8
];
int
weights
[
8
];
MPI_Datatype
*
send_types
=
world
->
transfer
.
send_types
;
MPI_Datatype
*
recv_types
=
world
->
transfer
.
recv_types
;
n
=
0
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
int
x
=
halo
[
i
].
proc_coord
[
0
];
int
y
=
halo
[
i
].
proc_coord
[
1
];
// Bounds check (Valid neighbor?)
if
(
x
>=
0
&&
x
<
npx
&&
y
>=
0
&&
y
<
npy
)
{
int
neighbor_rank
;
// Create send and recevie type
MPI_Type_create_subarray
(
2
,
sizes
,
halo
[
i
].
subsizes
,
halo
[
i
].
send_starts
,
MPI_ORDER_FORTRAN
,
MPI_CHAR
,
&
send_types
[
n
]
);
MPI_Type_commit
(
&
send_types
[
n
]);
MPI_Type_create_subarray
(
2
,
sizes
,
halo
[
i
].
subsizes
,
halo
[
i
].
recv_starts
,
MPI_ORDER_FORTRAN
,
MPI_CHAR
,
&
recv_types
[
n
]
);
MPI_Type_commit
(
&
recv_types
[
n
]);
// Get rank of neighbor
MPI_Cart_rank
(
cart_comm
,
halo
[
i
].
proc_coord
,
&
neighbor_rank
);
neighbor_ranks
[
n
]
=
neighbor_rank
;
weights
[
n
]
=
halo
[
i
].
subsizes
[
0
]
*
halo
[
i
].
subsizes
[
1
];
n
++
;
}
}
world
->
transfer
.
n_neighbors
=
n
;
// Create graph communicator
{
const
int
allow_reorder
=
0
;
MPI_Dist_graph_create_adjacent
(
cart_comm
,
n
,
neighbor_ranks
,
weights
,
n
,
neighbor_ranks
,
weights
,
MPI_INFO_NULL
,
allow_reorder
,
&
world
->
transfer
.
graph_comm
);
}
}
void
world_free_neighborhood
(
world_t
*
world
)
{
int
i
;
const
int
n
=
world
->
transfer
.
n_neighbors
;
MPI_Datatype
*
send_types
=
world
->
transfer
.
send_types
;
MPI_Datatype
*
recv_types
=
world
->
transfer
.
recv_types
;
for
(
i
=
0
;
i
<
n
;
i
++
)
{
MPI_Type_free
(
&
send_types
[
i
]);
MPI_Type_free
(
&
recv_types
[
i
]);
}
MPI_Comm_free
(
&
world
->
transfer
.
graph_comm
);
}
size_t
world_get_storage_size
(
const
world_t
*
world
)
{
int
nx
=
world
->
local_size
[
0
],
ny
=
world
->
local_size
[
1
];
return
(
nx
+
2
)
*
(
ny
+
2
)
*
sizeof
(
char
);
}
cellular_automaton/c/world.h
0 → 100644
View file @
653d505d
#ifndef _CELLS_H_
#define _CELLS_H_
#include
<stddef.h>
#include
<mpi.h>
typedef
struct
{
size_t
n_neighbors
;
MPI_Comm
graph_comm
;
MPI_Datatype
send_types
[
8
];
MPI_Datatype
recv_types
[
8
];
MPI_Datatype
io_type
;
}
transfer_t
;
typedef
struct
{
size_t
global_size
[
2
];
size_t
local_size
[
2
];
size_t
local_start
[
2
];
char
*
cells_prev
;
char
*
cells_next
;
transfer_t
transfer
;
}
world_t
;
void
world_init
(
world_t
*
world
,
MPI_Comm
cart_comm
,
size_t
*
global_size
);
size_t
world_get_storage_size
(
const
world_t
*
world
);
void
world_free
(
world_t
*
world
);
#endif
Prev
1
2
3
Next
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment