diff --git a/dynamic_sparse_data_exchange/configuration.c b/dynamic_sparse_data_exchange/configuration.c new file mode 100755 index 0000000000000000000000000000000000000000..a97d717e36a6d3807dd6925dcbc9082d32865ece --- /dev/null +++ b/dynamic_sparse_data_exchange/configuration.c @@ -0,0 +1,182 @@ +#include +#include +#include +#include + +#include "configuration.h" + +const char *verbosity_levels[] = {"OFF", "INFO", "DEBUG", "TRACE"}; + +const char *transmission_modes[] = { + "Dynamic - MPI_Issend / MPI_Iprobe / MPI_Ibarrier", + "Collective - MPI_Alltoall / MPI_Alltoallw" +}; + +// --------------------------------------------- Helper function declarations + +const char* set_ncells(conf_t *c, int nprocs); + +long parse_long(const char *str); + +// ========================================================================== + +void conf_init(conf_t *c) +{ + c->verbosity_level = INFO; + c->ncells_x = 0; + c->ncells_y = 0; + c->debug_rank = -1; + c->use_cartesian_topology = 0; +#if MPI_VERSION >= 3 + c->transmission_mode = DYNAMIC; +#else + #pragma message("MPI_VERSION < 3: Default transmission mode will be collective.") + c->transmission_mode = COLLECTIVE; +#endif + + // Default values: + c->n_iterations = 100; + c->particles_per_cell_initial = 1<<14; // 16k particles + c->delta_t = 1.0; + c->min_mass = 1.0; + c->max_mass = 1.0; + c->max_force = 0.0; + c->max_velocity_initial = 1.0; // 1 cell width per iteration +} + +const char* conf_set_from_args(conf_t *c, int argc, char* argv[], int nprocs) +{ + struct option long_options[] = { + {"verbose", required_argument, NULL, 'v'}, + {"debug-rank", required_argument, NULL, 'g'}, + + {"use-cart-topo", no_argument, &c->use_cartesian_topology, 1}, + {"collective", no_argument, &c->transmission_mode, COLLECTIVE}, + {"dynamic", no_argument, &c->transmission_mode, DYNAMIC}, + + {"ncells-x", required_argument, NULL, 'x'}, + {"ncells-y", required_argument, NULL, 'y'}, + + {"iterations", required_argument, NULL, 'i'}, + {"particles-per-cell", required_argument, NULL, 'n'}, + {"delta-t", required_argument, NULL, 't'}, + {"min-mass", required_argument, NULL, 'm'}, + {"max-mass", required_argument, NULL, 'M'}, + {"max-force", required_argument, NULL, 'F'}, + {"max-velocity", required_argument, NULL, 'V'}, + {NULL, 0, NULL, 0} + }; + + int option; + while((option = getopt_long(argc, argv, "v:g:x:y:i:n:t:m:M:F:V:", long_options, NULL)) >= 0) { + switch(option) { + case 'v': c->verbosity_level = atoi(optarg); break; + case 'g': c->debug_rank = atoi(optarg); break; + case 'x': c->ncells_x = atoi(optarg); break; + case 'y': c->ncells_y = atoi(optarg); break; + case 'i': c->n_iterations = parse_long(optarg); break; + case 'n': c->particles_per_cell_initial = parse_long(optarg); break; + case 't': c->delta_t = atof(optarg); break; + case 'm': c->min_mass = atof(optarg); break; + case 'M': c->max_mass = atof(optarg); break; + case 'F': c->max_force = atof(optarg); break; + case 'V': c->max_velocity_initial = atof(optarg); break; + } + } + + return set_ncells(c, nprocs); +} + +void conf_print(const conf_t *c, FILE *f) +{ + int i = c->verbosity_level; + if(i < 0) i = 0; else if(i > 3) i = 3; + + fprintf(f, "Configuration:\n"); + fprintf(f, " * Verbosity level: %s (%d)\n", verbosity_levels[i], c->verbosity_level); + fprintf(f, " * Use MPI Cart. Topology: %s\n", c->use_cartesian_topology ? "YES" : "NO"); + fprintf(f, " * Transmission mode: %s\n", transmission_modes[c->transmission_mode]); + fprintf(f, " * Number of cells: %d x %d\n", c->ncells_x, c->ncells_y); + fprintf(f, " * Particles per cell: %d\n", c->particles_per_cell_initial); + fprintf(f, " * Init. Velocity max: %f\n", c->max_velocity_initial); + fprintf(f, " * Timestep: %f\n", c->delta_t); + fprintf(f, " * Particle mass (min-max): %f - %f\n", c->min_mass, c->max_mass); + fprintf(f, " * Max. random force: %f\n", c->max_force); + fprintf(f, "\n"); +} + +const char* set_ncells(conf_t *c, int nprocs) +{ + static char error[1024]; + + int nx = c->ncells_x; + int ny = c->ncells_y; + + if(nx > 0) { + if(ny > 0) { + if(nx * ny != nprocs) { + snprintf(error, sizeof(error), "Number of cells in x and y direction does not match the number of processes (%d * %d != %d).", nx, ny, nprocs); + return error; + } + } + if(nprocs % nx != 0) { + snprintf(error, sizeof(error), "Number of cells in x direction must divide the number of processes (%d %% %d != 0).", nprocs, nx); + return error; + } + ny = nprocs / nx; + } else if(ny > 0) { + if(nprocs % ny != 0) { + snprintf(error, sizeof(error), "Number of cells in y direction must divide the number of processes (%d %% %d != 0).", nprocs, ny); + return error; + } + nx = nprocs / ny; + } else { + snprintf(error, sizeof(error), "Number of cells in x and y directions not defined. Use option --ncells-x or --ncells-y."); + return error; + } + + c->ncells_x = nx; + c->ncells_y = ny; + + return NULL; +} + +int log_enabled(const conf_t *c, int lvl) +{ + static int rank = -1; + if(rank < 0) { + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + } + + return c->verbosity_level >= lvl && (rank == 0 || rank == c->debug_rank); +} + +int info_enabled(const conf_t *c) +{ + return log_enabled(c, INFO); +} + +int debug_enabled(const conf_t *c) +{ + return log_enabled(c, DEBUG); +} + +int trace_enabled(const conf_t *c) +{ + return log_enabled(c, TRACE); +} + +// --------------------------------------------------------- Helper functions + +long parse_long(const char *str) +{ + char* p; + long result = strtol(str, &p, 10); + if(*p == 'k') { + result <<= 10; + } else if(*p == 'M') { + result <<= 20; + } + return result; +} + diff --git a/dynamic_sparse_data_exchange/configuration.h b/dynamic_sparse_data_exchange/configuration.h new file mode 100755 index 0000000000000000000000000000000000000000..fb7f90b8e00f7a04c259fe2a24adcb2088c70d4f --- /dev/null +++ b/dynamic_sparse_data_exchange/configuration.h @@ -0,0 +1,57 @@ +#ifndef CONFIGURATION_H +#define CONFIGURATION_H + +#include + +enum verbosity_level_enum +{ + OFF = 0, + INFO = 1, + DEBUG = 2, + TRACE = 3, +}; + +enum transmission_mode_enum +{ + DYNAMIC = 0, + COLLECTIVE = 1, +}; + +typedef struct +{ + int verbosity_level; + int debug_rank; + int use_cartesian_topology; + int transmission_mode; + + int ncells_x; + int ncells_y; + int particles_per_cell_initial; + + int n_iterations; + double delta_t; + double min_mass; + double max_mass; + double max_force; + double max_velocity_initial; +} conf_t; + +// Meta-information for MPI-Datatype creation (see mpitypes.c) +#define CONF_T_N_INT_MEMBERS 8 +#define CONF_T_FIRST_INT_MEMBER verbosity_level +#define CONF_T_N_DOUBLE_MEMBERS 5 +#define CONF_T_FIRST_DOUBLE_MEMBER delta_t + +void conf_init(conf_t *c); + +const char* conf_set_from_args(conf_t *c, int argc, char* argv[], int nprocs); + +void conf_print(const conf_t *c, FILE *file); + +int info_enabled(const conf_t *c); +int warn_enabled(const conf_t *c); +int debug_enabled(const conf_t *c); +int trace_enabled(const conf_t *c); + +#endif + diff --git a/dynamic_sparse_data_exchange/main.c b/dynamic_sparse_data_exchange/main.c new file mode 100755 index 0000000000000000000000000000000000000000..255d384cb1c9d1414a2177a27b2031a3953ff54e --- /dev/null +++ b/dynamic_sparse_data_exchange/main.c @@ -0,0 +1,159 @@ +#include +#include +#include +#include + +#include "configuration.h" +#include "mpicomm.h" +#include "simulation.h" + +#ifdef DEBUG_ATTACH +#include +void wait_for_debugger(); +#endif + +void run_simulation(const conf_t *configuration); + +void print_simulation_info(const sim_t *simulation, const char* header, FILE *f); + +MPI_Comm get_mpi_communicator(const conf_t *configuration); + +int main(int argc, char *argv[]) +{ + int mpi_rank, nprocs; + const char* error; + + MPI_Init(&argc, &argv); + + MPI_Comm_size(MPI_COMM_WORLD, &nprocs); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); + + conf_t configuration; + conf_init(&configuration); + + if(mpi_rank == 0) { + // parse comand line args on root process only + error = conf_set_from_args(&configuration, argc, argv, nprocs); + if(error) { + fprintf(stderr, "%s\n", error); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); + } + } + mpi_broadcast_configuration(&configuration); + + if(info_enabled(&configuration)) { + conf_print(&configuration, stdout); + } + +#ifdef DEBUG_ATTACH + if(mpi_rank == configuration.debug_rank) { + wait_for_debugger(); + } +#endif + + run_simulation(&configuration); + + MPI_Finalize(); + + return EXIT_SUCCESS; +} + +void run_simulation(const conf_t *configuration) +{ + sim_t s; + int i, n, rank; + MPI_Comm communicator = get_mpi_communicator(configuration); + + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + set_random_seed((rank + 17) * (int)time(NULL)); + + sim_init(&s, configuration, communicator); + + print_simulation_info(&s, "Simulation status (initial):", stdout); + + n = configuration->n_iterations; + for(i = 0; i < n; i++) { + if(trace_enabled(configuration)) { + printf("Iteration %d\n", i); + } + sim_calc_forces(&s); + sim_move_particles(&s); + sim_communicate_particles(&s); + } + + print_simulation_info(&s, "Simulation satus (final):", stdout); +} + +void print_simulation_info(const sim_t *s, const char* header, FILE *f) +{ + if(s->configuration->verbosity_level >= INFO) { + int rank; + sim_info_t info; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + sim_info(s, &info); + if(rank == 0) { + fprintf(f, "\n%s\n", header); + fprintf(f, " * Number of particles (total): %ld\n", info.n_particles_total); + fprintf(f, " * Particles per cell (min-max): %d - %d\n", info.min_particles_per_cell, info.max_particles_per_cell); + fprintf(f, " * Max. velocity: %f\n", info.max_velocity); + } + } +} + +int communicator_reordered(MPI_Comm new_comm) +{ + int comm_world_rank, new_rank; + int local_ranks_different; + int ranks_reordered = 0; + + MPI_Comm_rank(MPI_COMM_WORLD, &comm_world_rank); + MPI_Comm_rank(new_comm, &new_rank); + + local_ranks_different = comm_world_rank != new_rank; + MPI_Allreduce( + &local_ranks_different, &ranks_reordered, 1, MPI_INT, + MPI_LOR, MPI_COMM_WORLD + ); + return ranks_reordered; +} + +MPI_Comm get_mpi_communicator(const conf_t *configuration) +{ + int use_cart_topo = configuration->use_cartesian_topology; + + if(use_cart_topo) { + MPI_Comm cart_comm; + int ndims = 2; + int dims[] = {configuration->ncells_x, configuration->ncells_y}; + int isperiodic[] = {1, 1}; + int allow_reorder = 1; + int reordered; + + MPI_Cart_create(MPI_COMM_WORLD, ndims, dims, isperiodic, allow_reorder, &cart_comm); + + reordered = communicator_reordered(cart_comm); + if(info_enabled(configuration)) { + printf("INFO: MPI reordered ranks: %s\n", + reordered ? "YES" : "NO"); + } + return cart_comm; + } else { + return MPI_COMM_WORLD; + } +} + +#ifdef DEBUG_ATTACH +void wait_for_debugger() +{ + int rank; + volatile int resume = 0; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + + printf("Rank %d (pid %d) waiting for debugger to attach...\n", rank, getpid()); + while(!resume) { + sleep(1); + } +} +#endif + diff --git a/dynamic_sparse_data_exchange/mpicomm.c b/dynamic_sparse_data_exchange/mpicomm.c new file mode 100755 index 0000000000000000000000000000000000000000..2fc80b6ef2e989528a7449fe2dd097653174c904 --- /dev/null +++ b/dynamic_sparse_data_exchange/mpicomm.c @@ -0,0 +1,264 @@ +#include +#include +#include + +#include "mpicomm.h" +#include "mpitypes.h" + +// --------------------------------------------- Helper function declarations + +void ensure_capacity(part_t *particles, int nprocs, int* recv_counts); +void receive_particles(sim_t *s, const MPI_Status *status); + +void sim_info_reduce_op(void *invec, void *inoutvec, int *len, MPI_Datatype *type); + +// ========================================================================== + +void mpi_broadcast_configuration(conf_t *configuration) +{ + MPI_Datatype conf_mpitype; + mpitype_conf_init(&conf_mpitype); + MPI_Bcast(configuration, 1, conf_mpitype, 0, MPI_COMM_WORLD); + mpitype_conf_free(&conf_mpitype); +} + +void mpi_reduce_sim_info(const sim_info_t *local_info, sim_info_t *info) +{ + MPI_Datatype info_mpitype; + MPI_Op operation; + int commutative = 1; + + mpitype_sim_info_init(&info_mpitype); + MPI_Op_create(sim_info_reduce_op, commutative, &operation); + + MPI_Reduce(local_info, info, 1, info_mpitype, operation, 0, MPI_COMM_WORLD); + + mpitype_sim_info_free(&info_mpitype); + MPI_Op_free(&operation); +} + +void sim_info_reduce_op(void *invec, void *inoutvec, int *len, MPI_Datatype *type) +{ + int i; + const sim_info_t *other = invec; + sim_info_t *result = inoutvec; + + for(i = 0; i < *len; ++i) { + result->n_particles_total += other->n_particles_total; + + if(result->min_particles_per_cell > other->min_particles_per_cell) { + result->min_particles_per_cell = other->min_particles_per_cell; + } + + if(result->max_particles_per_cell < other->max_particles_per_cell) { + result->max_particles_per_cell = other->max_particles_per_cell; + } + + if(result->max_velocity < other->max_velocity) { + result->max_velocity = other->max_velocity; + } + ++other; + ++result; + } +} + +void mpi_communicate_particles_dynamic(sim_t *s) +{ +#if MPI_VERSION >= 3 + // ----------------------------------------------------------------------- + // PHASE 1: Post nonblocking synchronous sends for all outgoing particles + // ----------------------------------------------------------------------- + MPI_Datatype *send_types; + int *send_counts, *receiver_ranks; + int n_receivers, i; + int tag = 0; + MPI_Request *send_requests; + + // Create MPI Send Types + mpitype_part_init_send(&s->leaving_particles, &send_types, &send_counts, &receiver_ranks, &n_receivers); + + send_requests = malloc(n_receivers * sizeof(*send_requests)); + for(i = 0; i < n_receivers; i++) { + MPI_Issend( + MPI_BOTTOM, 1, send_types[i], receiver_ranks[i], + tag, s->communicator, &send_requests[i] + ); + } + + // Free MPI Send Types + mpitype_part_free_send(&send_types, &send_counts, &receiver_ranks, &n_receivers); + + // ----------------------------------------------------------------------- + // PHASE 2: Receive messages until all outgoing particles are transmitted + // ----------------------------------------------------------------------- + int local_sends_finished = 0; + int message_pending = 0; + MPI_Status status; + + while(!local_sends_finished) { + // Check for next pending message and receive + MPI_Iprobe(MPI_ANY_SOURCE, tag, s->communicator, &message_pending, &status); + if(message_pending) { + receive_particles(s, &status); + } + + // Check for completion of local sends + MPI_Testall(n_receivers, send_requests, &local_sends_finished, MPI_STATUSES_IGNORE); + } + free(send_requests); + + // ----------------------------------------------------------------------- + // PHASE 3: Signal completion of local sends with non-blocking barrier + // ----------------------------------------------------------------------- + MPI_Request barrier_request; + MPI_Ibarrier(s->communicator, &barrier_request); + + // ----------------------------------------------------------------------- + // PHASE 4: Receive messages until other processes have finished sending too + // ----------------------------------------------------------------------- + int all_sends_finished = 0; + + while(!all_sends_finished) { + // Check for next pending message and receive + MPI_Iprobe(MPI_ANY_SOURCE, tag, s->communicator, &message_pending, &status); + if(message_pending) { + receive_particles(s, &status); + } + + // Check for completion of other processes' sends + MPI_Test(&barrier_request, &all_sends_finished, MPI_STATUS_IGNORE); + } +#else + #pragma message("MPI_VERSION < 3 => Dynamic transmission mode will not be supported.") + fprintf(stderr, "MPI_VERSION < 3 => Cannot use dynamic transmission mode.\n"); + MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE); +#endif +} + +void receive_particles(sim_t *s, const MPI_Status *status) +{ + part_t *particles = &s->local_particles; + int displacement = particles->count; + + int recv_count = mpitype_part_get_count(status); + int new_count = particles->count + recv_count; + int new_capacity; + + // Reserve space for new particles + if(particles->capacity < new_count) { + new_capacity = new_count + new_count / 8; + part_reserve(particles, new_capacity); + } + part_resize(particles, new_count); + + // Create MPI Datatype and receive + MPI_Datatype recv_type; + mpitype_part_init_recv(particles, recv_count, displacement, &recv_type); + + MPI_Recv( + MPI_BOTTOM, 1, recv_type, + status->MPI_SOURCE, status->MPI_TAG, + s->communicator, MPI_STATUS_IGNORE + ); + + // Free MPI Datatype + mpitype_part_free_recv(&recv_type); +} + +void mpi_communicate_particles_collective(sim_t *s) +{ + // Create MPI datatypes for sending particle data + MPI_Datatype *send_types_compact; + int *send_counts_compact, *receiver_ranks; + int n_receivers; + + mpitype_part_init_send(&s->leaving_particles, &send_types_compact, &send_counts_compact, &receiver_ranks, &n_receivers); + + // Prepare argument arrays for MPI_Alltoall{w} + int nprocs, rank, i, j, displacement; + MPI_Datatype *send_types, *recv_types; + int *send_counts, *recv_counts; + int *displacements; + + MPI_Comm_size(s->communicator, &nprocs); + MPI_Comm_rank(s->communicator, &rank); + + send_types = malloc(nprocs * sizeof(*send_types)); + recv_types = malloc(nprocs * sizeof(*recv_types)); + send_counts = malloc(nprocs * sizeof(*send_counts)); + recv_counts = malloc(nprocs * sizeof(*recv_counts)); + displacements = malloc(nprocs * sizeof(*displacements)); + + for(i = 0; i < nprocs; i++) { + send_types[i] = MPI_BYTE; + recv_types[i] = MPI_BYTE; + send_counts[i] = 0; + recv_counts[i] = 0; + displacements[i] = 0; + } + + for(i = 0; i < n_receivers; i++) { + j = receiver_ranks[i]; + send_types[j] = send_types_compact[i]; + send_counts[j] = send_counts_compact[i]; + } + + // Transfer particle counts (determine number of particles to receive) + MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, s->communicator); + + // Create MPI Datatypes for receiving particle data; + // adjust send/receive counts (transfer only one element of compound datatype) + part_t* particles = &s->local_particles; + displacement = particles->count; + ensure_capacity(particles, nprocs, recv_counts); + + for(i = 0; i < nprocs; i++) + { + if(send_counts[i] > 0) { + send_counts[i] = 1; + } + if(recv_counts[i] > 0) { + mpitype_part_init_recv(particles, recv_counts[i], displacement, &recv_types[i]); + displacement += recv_counts[i]; + recv_counts[i] = 1; + } + } + + // Transfer particle data + MPI_Alltoallw( + MPI_BOTTOM, send_counts, displacements, send_types, + MPI_BOTTOM, recv_counts, displacements, recv_types, + s->communicator + ); + + // Free MPI Datatypes + mpitype_part_free_send(&send_types_compact, &send_counts_compact, &receiver_ranks, &n_receivers); + for(i = 0; i < nprocs; i++) { + if(recv_counts[i] > 0) { + mpitype_part_free_recv(&recv_types[i]); + } + } + + // Cleanup + free(send_types); + free(recv_types); + free(send_counts); + free(recv_counts); + free(displacements); +} + +// --------------------------------------------------------- Helper functions + +void ensure_capacity(part_t *particles, int nprocs, int* recv_counts) +{ + int new_count = particles->count; + int new_capacity, i; + for(i = 0; i < nprocs; i++) new_count += recv_counts[i]; + + if(particles->capacity < new_count) { + new_capacity = new_count + new_count / 8; + part_reserve(particles, new_capacity); + } + part_resize(particles, new_count); +} + diff --git a/dynamic_sparse_data_exchange/mpicomm.h b/dynamic_sparse_data_exchange/mpicomm.h new file mode 100755 index 0000000000000000000000000000000000000000..a47c389b70b6df2a50e5040e1a53e33cf7079a0f --- /dev/null +++ b/dynamic_sparse_data_exchange/mpicomm.h @@ -0,0 +1,13 @@ +#ifndef MPICOMM_H +#define MPICOMM_H + +#include "configuration.h" +#include "simulation.h" + +void mpi_broadcast_configuration(conf_t *configuration); + +void mpi_reduce_sim_info(const sim_info_t *local_info, sim_info_t *info); + +void mpi_communicate_particles_collective(sim_t *s); + +#endif diff --git a/dynamic_sparse_data_exchange/mpitypes.c b/dynamic_sparse_data_exchange/mpitypes.c new file mode 100755 index 0000000000000000000000000000000000000000..8ff33f59906baba7318ca33eb5aebd3d5500a700 --- /dev/null +++ b/dynamic_sparse_data_exchange/mpitypes.c @@ -0,0 +1,233 @@ +#include +#include + +#include "mpitypes.h" + +#include "configuration.h" +#include "particles.h" +#include "simulation.h" + +// --------------------------------------------- Helper function declarations + +void mpitype_indexed_int(int count, int *displacements, MPI_Datatype *type); + +void mpitype_indexed_double(int count, int *displacemnts, MPI_Datatype *type); + +MPI_Aint* part_get_addresses(const part_t *particles, int displacement); + +// ========================================================================== + +void mpitype_conf_init(MPI_Datatype *new_type) +{ + conf_t dummy; + int i; + + // CONF_T_* constants defined in configuration.h + int blocklengths[] = {CONF_T_N_INT_MEMBERS, CONF_T_N_DOUBLE_MEMBERS}; + MPI_Datatype types[] = {MPI_INT, MPI_DOUBLE}; + MPI_Aint displacements[2]; + MPI_Aint base; + + MPI_Get_address(&dummy, &base); + MPI_Get_address(&dummy.CONF_T_FIRST_INT_MEMBER, &displacements[0]); + MPI_Get_address(&dummy.CONF_T_FIRST_DOUBLE_MEMBER, &displacements[1]); + + for(i = 0; i < 2; i++) displacements[i] -= base; + + MPI_Type_create_struct(2, blocklengths, displacements, types, new_type); + MPI_Type_commit(new_type); +} + +void mpitype_conf_free(MPI_Datatype *type) +{ + MPI_Type_free(type); +} + +void mpitype_sim_info_init(MPI_Datatype *new_type) +{ + sim_info_t dummy; + int i; + + int blocklengths[] = { + SIM_INFO_T_N_LONG_MEMBERS, + SIM_INFO_T_N_INT_MEMBERS, + SIM_INFO_T_N_DOUBLE_MEMBERS + }; + MPI_Datatype types[] = {MPI_LONG, MPI_INT, MPI_DOUBLE}; + MPI_Aint displacements[3]; + MPI_Aint base; + + MPI_Get_address(&dummy, &base); + MPI_Get_address(&dummy.SIM_INFO_T_FIRST_LONG_MEMBER, &displacements[0]); + MPI_Get_address(&dummy.SIM_INFO_T_FIRST_INT_MEMBER, &displacements[1]); + MPI_Get_address(&dummy.SIM_INFO_T_FIRST_DOUBLE_MEMBER, &displacements[2]); + + for(i = 0; i < 3; i++) displacements[i] -= base; + + MPI_Type_create_struct(3, blocklengths, displacements, types, new_type); + MPI_Type_commit(new_type); +} + +void mpitype_sim_info_free(MPI_Datatype *type) +{ + MPI_Type_free(type); +} + +MPI_Aint* part_get_addresses(const part_t *particles, int displacement) +{ + static MPI_Aint addresses[5]; + assert(displacement < particles->count); + + MPI_Get_address(particles->mass + displacement, &addresses[0]); + MPI_Get_address(particles->velocity.x + displacement, &addresses[1]); + MPI_Get_address(particles->velocity.y + displacement, &addresses[2]); + MPI_Get_address(particles->location.x + displacement, &addresses[3]); + MPI_Get_address(particles->location.y + displacement, &addresses[4]); + + return addresses; +} + +void mpitype_part_init_send(part_t *particles, MPI_Datatype *datatypes[], int *send_counts[], int *receiver_ranks[], int *n_receivers) +{ + int part_count = particles->count; + int *part_owner = particles->owner_rank; + + if(part_count == 0) { + // no particles are leaving local cell + *datatypes = NULL; + *send_counts = NULL; + *receiver_ranks = NULL; + *n_receivers = 0; + return; + } + + // Only 5 components of part_t structure need to be exchanged: + // mass, velocity.x, velocity.y, location.x, location.y + int blocklengths[] = {1, 1, 1, 1, 1}; + MPI_Aint *addresses = part_get_addresses(particles, 0); + MPI_Datatype struct_component_types[5]; + + // Group particles by owner rank; for each group create mpi send type + int n, i, j, k, recv_rank; + + int recv_capacity = 8; + MPI_Datatype *types = malloc(recv_capacity * sizeof(*types)); + int *counts = malloc(recv_capacity * sizeof(*counts)); + int *recv_ranks = malloc(recv_capacity * sizeof(*recv_ranks)); + int *part_indices = malloc(part_count * sizeof(*part_indices)); + MPI_Datatype indexed_double_type; + MPI_Datatype particle_struct_type; + + // Iterate over particles + for(i = 0, n = 0; i < part_count; i++) + { + recv_rank = part_owner[i]; + if(recv_rank < 0) continue; // Particle already processed + + // Find indices of particles with same owner + for(j = i, k = 0; j < part_count; j++) { + if(part_owner[j] == recv_rank) { + part_indices[k] = j; + k++; + part_owner[j] = -1; // Mark j-th particle as processed + } + } + // Create (temporary) MPI Datatypes for struct components (5 x indexed double) + mpitype_indexed_double(k, part_indices, &indexed_double_type); + struct_component_types[0] = indexed_double_type; // mass + struct_component_types[1] = indexed_double_type; // velocity.x + struct_component_types[2] = indexed_double_type; // velocity.y + struct_component_types[3] = indexed_double_type; // location.x + struct_component_types[4] = indexed_double_type; // location.y + + // Create (final) MPI Datatype for particle struct + MPI_Type_create_struct(5, blocklengths, addresses, struct_component_types, &particle_struct_type); + MPI_Type_commit(&particle_struct_type); + + // Free temporary MPI type + MPI_Type_free(&indexed_double_type); + + // Store type and rank of corresponding recevier + if(n >= recv_capacity) { + recv_capacity += recv_capacity / 2; + types = realloc(types, recv_capacity * sizeof(*types)); + counts = realloc(counts, recv_capacity * sizeof(*counts)); + recv_ranks = realloc(recv_ranks, recv_capacity * sizeof(*recv_ranks)); + } + types[n] = particle_struct_type; + counts[n] = k; + recv_ranks[n] = recv_rank; + n++; + } + // Set output args + *datatypes = types; + *send_counts = counts; + *receiver_ranks = recv_ranks; + *n_receivers = n; +} + +void mpitype_part_free_send(MPI_Datatype *datatypes[], int *send_counts[], int *receiver_ranks[], int *n_receivers) +{ + MPI_Datatype *types; + int *counts, *receivers; + int n = *n_receivers; + int i; + + types = *datatypes; *datatypes = NULL; + counts = *send_counts; *send_counts = NULL; + receivers = *receiver_ranks; *receiver_ranks = NULL; + + for(i = 0; i < n; i++) { + MPI_Type_free(&types[i]); + } + free(types); + free(counts); + free(receivers); + *n_receivers = 0; +} + +int mpitype_part_get_count(const MPI_Status *status) +{ + const int n_doubles_per_particle = 5; // mass, velocity.x, velocity.y, location.x, location.y + int n_doubles; + MPI_Get_count(status, MPI_DOUBLE, &n_doubles); + return n_doubles / n_doubles_per_particle; +} + +void mpitype_part_init_recv(const part_t* particles, int blocklength, int displacement, MPI_Datatype* type) +{ + int blocklengths[5]; + MPI_Aint *addresses = part_get_addresses(particles, displacement); + MPI_Datatype struct_component_types[5]; + int i; + + for(i = 0; i < 5; i++) blocklengths[i] = blocklength; + + struct_component_types[0] = MPI_DOUBLE; // mass + struct_component_types[1] = MPI_DOUBLE; // velocity.x + struct_component_types[2] = MPI_DOUBLE; // velocity.y + struct_component_types[3] = MPI_DOUBLE; // location.x + struct_component_types[4] = MPI_DOUBLE; // location.y + + MPI_Type_create_struct(5, blocklengths, addresses, struct_component_types, type); + MPI_Type_commit(type); +} + +void mpitype_part_free_recv(MPI_Datatype* type) +{ + MPI_Type_free(type); +} + +// --------------------------------------------------------- Helper functions + +void mpitype_indexed_int(int count, int *displacements, MPI_Datatype *type) +{ + MPI_Type_create_indexed_block(count, 1, displacements, MPI_INT, type); +} + +void mpitype_indexed_double(int count, int *displacements, MPI_Datatype *type) +{ + MPI_Type_create_indexed_block(count, 1, displacements, MPI_DOUBLE, type); +} + + diff --git a/dynamic_sparse_data_exchange/mpitypes.h b/dynamic_sparse_data_exchange/mpitypes.h new file mode 100755 index 0000000000000000000000000000000000000000..04a0440aad14d7296b95f609f8229933fbb407d4 --- /dev/null +++ b/dynamic_sparse_data_exchange/mpitypes.h @@ -0,0 +1,24 @@ +#ifndef MPITYPES_H +#define MPITYPES_H + +#include + +#include "particles.h" + +void mpitype_conf_init(MPI_Datatype *new_type); +void mpitype_conf_free(MPI_Datatype *type); + +void mpitype_sim_info_init(MPI_Datatype *new_type); +void mpitype_sim_info_free(MPI_Datatype *type); + +void mpitype_part_init_send(part_t *particles, MPI_Datatype *datatypes[], int *send_counts[], int *receiver_ranks[], int *n_receivers); +void mpitype_part_free_send(MPI_Datatype *datatypes[], int *send_counts[], int *receiver_ranks[], int *n_receivers); + +// returns the number of particles to be received +int mpitype_part_get_count(const MPI_Status *status); + +void mpitype_part_init_recv(const part_t* particles, int blocklength, int displacement, MPI_Datatype* type); +void mpitype_part_free_recv(MPI_Datatype* type); + +#endif + diff --git a/dynamic_sparse_data_exchange/particles.c b/dynamic_sparse_data_exchange/particles.c new file mode 100755 index 0000000000000000000000000000000000000000..22a455032686149187e820c5d952b9e9cb08b47d --- /dev/null +++ b/dynamic_sparse_data_exchange/particles.c @@ -0,0 +1,61 @@ +#include +#include +#include + +#include "particles.h" + +void part_init(part_t *particles, int capacity) +{ + particles->count = 0; + particles->capacity = 0; + + particles->owner_rank = NULL; + particles->mass = NULL; + vec_init(&particles->force, 0); + vec_init(&particles->velocity, 0); + vec_init(&particles->location, 0); + + if(capacity) { + part_reserve(particles, capacity); + } +} + +void part_reserve(part_t *particles, int capacity) +{ + assert(capacity >= particles->count); + particles->owner_rank = realloc(particles->owner_rank, capacity * sizeof(int)); + particles->mass = realloc(particles->mass, capacity * sizeof(double)); + vec_reserve(&particles->force, capacity); + vec_reserve(&particles->velocity, capacity); + vec_reserve(&particles->location, capacity); + + particles->capacity = capacity; +} + +void part_resize(part_t *particles, int count) +{ + if(count > particles->capacity) { + part_reserve(particles, count); + } + particles->count = count; +} + +void part_free(part_t *particles) +{ + part_resize(particles, 0); + part_reserve(particles, 0); +} + +void part_copy(part_t *dst, int dst_start, const part_t *src, int src_start, int count) +{ + if(dst->count < dst_start + count) { + part_resize(dst, dst_start + count); + } + + memmove(&dst->owner_rank[dst_start], &src->owner_rank[src_start], count * sizeof(int)); + memmove(&dst->mass[dst_start], &src->mass[src_start], count * sizeof(double)); + vec_copy(&dst->force, dst_start, &src->force, src_start, count); + vec_copy(&dst->velocity, dst_start, &src->velocity, src_start, count); + vec_copy(&dst->location, dst_start, &src->location, src_start, count); +} + diff --git a/dynamic_sparse_data_exchange/particles.h b/dynamic_sparse_data_exchange/particles.h new file mode 100755 index 0000000000000000000000000000000000000000..ea00d9ed0bddddf6d0882f8366ab9bc929d66679 --- /dev/null +++ b/dynamic_sparse_data_exchange/particles.h @@ -0,0 +1,26 @@ +#ifndef PARTICLES_H +#define PARTICLES_H + +#include "vector.h" + +typedef struct +{ + int count; + int capacity; + + int *owner_rank; + double *mass; + vec_t force; + vec_t velocity; + vec_t location; +} part_t; + +void part_init(part_t *particles, int capacity); +void part_reserve(part_t *particles, int capacity); +void part_resize(part_t *particles, int count); +void part_free(part_t *particles); + +void part_copy(part_t *dst, int dst_start, const part_t *src, int src_start, int count); + +#endif + diff --git a/dynamic_sparse_data_exchange/random.c b/dynamic_sparse_data_exchange/random.c new file mode 100755 index 0000000000000000000000000000000000000000..65ab93252b8791d517c88e76fe66ef68878c68a0 --- /dev/null +++ b/dynamic_sparse_data_exchange/random.c @@ -0,0 +1,55 @@ +#include +#include + +#include "random.h" + +void set_random_seed(int seed) +{ + srand(seed); +} + +void array_set_random(double* array, int count, double min, double max) +{ + int i; + double scale = max - min; + + if(scale > 0.) { + scale /= (double)RAND_MAX; + + for(i = 0; i < count; i++) { + array[i] = min + (double)random() * scale; + } + } else { + for(i = 0; i < count; i++) array[i] = min; + } +} + +void vec_set_random(vec_t *vector, int count, double max_magnitude) +{ + double magnitude, angle; + double mag_scale, ang_scale; + int i; + + if(max_magnitude > 0.) { + mag_scale = max_magnitude / (double)RAND_MAX; + ang_scale = 2 * M_PI / (double)RAND_MAX; + + for(i = 0; i < count; i++) { + magnitude = mag_scale * (double)random(); + angle = ang_scale * (double)random(); + + vector->x[i] = magnitude * cos(angle); + vector->y[i] = magnitude * sin(angle); + } + } else { + vec_set_zero(vector, count); + } +} + +void vec_set_random_box(vec_t *vector, int count, double min_x, double max_x, double min_y, double max_y) +{ + array_set_random(vector->x, count, min_x, max_x); + array_set_random(vector->y, count, min_y, max_y); +} + + diff --git a/dynamic_sparse_data_exchange/random.h b/dynamic_sparse_data_exchange/random.h new file mode 100755 index 0000000000000000000000000000000000000000..87edabf91266fda5c7bc08b803883eb8ce573589 --- /dev/null +++ b/dynamic_sparse_data_exchange/random.h @@ -0,0 +1,15 @@ +#ifndef RANDOM_H +#define RANDOM_H + +#include "vector.h" + +void set_random_seed(int seed); + +void array_set_random(double* array, int count, double min, double max); + +void vec_set_random(vec_t *vector, int count, double max_magnitude); + +void vec_set_random_box(vec_t *vector, int count, double min_x, double max_x, double min_y, double max_y); + +#endif + diff --git a/dynamic_sparse_data_exchange/simulation.c b/dynamic_sparse_data_exchange/simulation.c new file mode 100755 index 0000000000000000000000000000000000000000..d60155d426e6e45097502b63ca87a9ccd194dedb --- /dev/null +++ b/dynamic_sparse_data_exchange/simulation.c @@ -0,0 +1,235 @@ +#include +#include +#include + +#include "simulation.h" +#include "random.h" +#include "mpicomm.h" + +void sim_info_local(const sim_t *s, sim_info_t *local_info); +void allocate_memory(sim_t *s); +void deallocate_memory(sim_t *s); +void init_local_particles(sim_t *s); +void classify_particles(sim_t *s); +void relocate_particles(sim_t *s); + +void sim_init(sim_t *s, const conf_t* configuration, const MPI_Comm communicator) +{ + s->configuration = configuration; + s->communicator = communicator; + + MPI_Comm_rank(communicator, &s->mpi_rank); + + // allocate storage and initialize local particles with random values + allocate_memory(s); + init_local_particles(s); +} + +void sim_free(sim_t *s) +{ + deallocate_memory(s); +} + +void sim_info(const sim_t *s, sim_info_t *info) +{ + sim_info_t local_info; + + sim_info_local(s, &local_info); + mpi_reduce_sim_info(&local_info, info); +} + +void sim_info_local(const sim_t *s, sim_info_t *local_info) +{ + const part_t *particles = &s->local_particles; + int count = particles->count; + + double *vx = particles->velocity.x; + double *vy = particles->velocity.y; + double *vsqr = malloc(count * sizeof(*vsqr)); + double vsqr_max = 0.; + int i; + + for(i = 0; i < count; i++) { + vsqr[i] = vx[i] * vx[i] + vy[i] * vy[i]; + } + for(i = 0; i < count; i++) { + if(vsqr_max < vsqr[i]) vsqr_max = vsqr[i]; + } + free(vsqr); + + local_info->n_particles_total = s->local_particles.count; + local_info->min_particles_per_cell = count; + local_info->max_particles_per_cell = count; + local_info->max_velocity = sqrt(vsqr_max); +} + +void sim_calc_forces(sim_t *s) +{ + part_t *particles = &(s->local_particles); + int count = particles->count; + + // just use random values for demonstration purpose + double max_force = s->configuration->max_force; + vec_t* force = &(particles->force); + vec_set_random(force, max_force, count); +} + +void sim_move_particles(sim_t *s) +{ + part_t *particles = &s->local_particles; + int count = particles->count; + + const vec_t *force = &particles->force; + const double *mass = particles->mass; + double delta_t = s->configuration->delta_t; + + vec_t tmp_vector; + vec_init(&tmp_vector, count); + + vec_t *acceleration = &tmp_vector; + vec_t *velocity = &particles->velocity; + vec_t *location = &particles->location; + + // acceleration = force / mass + vec_arr_divide(acceleration, force, mass, count); + + // velocity += acceleration / delta_t + vec_add_scaled(velocity, acceleration, 1./delta_t, count); + + // location += velocity / delta_t, taking care of periodicity + double modulus_x = s->configuration->ncells_x; + double modulus_y = s->configuration->ncells_y; + vec_add_scaled_mod(location, velocity, 1./delta_t, modulus_x, modulus_y, count); + + vec_free(&tmp_vector); +} + +void sim_communicate_particles(sim_t *s) +{ + int mode = s->configuration->transmission_mode; + + classify_particles(s); + relocate_particles(s); + + switch(mode) { + case DYNAMIC: + mpi_communicate_particles_dynamic(s); break; + case COLLECTIVE: + mpi_communicate_particles_collective(s); break; + } +} + +// ---------------------------------------------------------- Helper functions + +void allocate_memory(sim_t *s) +{ + // set up storage for particles in local cell and temporary working vector + int count = s->configuration->particles_per_cell_initial; + int capacity = count + count / 4; + + part_init(&s->local_particles, capacity); + + // set up buffer for particles leaving the local cell + // (capacity will be increased on demand) + part_init(&s->leaving_particles, 0); +} + +void deallocate_memory(sim_t *s) +{ + part_free(&s->local_particles); + + part_free(&s->leaving_particles); +} + +void init_local_particles(sim_t *s) +{ + int count = s->configuration->particles_per_cell_initial; + part_t *particles = &s->local_particles; + part_resize(particles, count); + + // particle mass + double min_mass = s->configuration->min_mass; + double max_mass = s->configuration->max_mass; + array_set_random(particles->mass, count, min_mass, max_mass); + + // particle location + int nx = s->configuration->ncells_x; + int cell_x = s->mpi_rank % nx; + int cell_y = s->mpi_rank / nx; + double min_x = cell_x, max_x = min_x + 1.; + double min_y = cell_y, max_y = min_y + 1.; + vec_set_random_box(&particles->location, count, min_x, max_x, min_y, max_y); + + // particle velocity + double vmax = s->configuration->max_velocity_initial; + vec_set_random(&particles->velocity, count, vmax); + + // forces + vec_set_zero(&particles->force, count); +} + +void classify_particles(sim_t *s) +{ + int nx = s->configuration->ncells_x; + int ny = s->configuration->ncells_y; + + part_t *particles = &s->local_particles; + double *x = particles->location.x; + double *y = particles->location.y; + int *owner = particles->owner_rank; + int count = particles->count; + int i; + + if(s->configuration->use_cartesian_topology) { + int coords[2]; + for(i = 0; i < count; i++) { + coords[0] = (int)x[i]; + coords[1] = (int)y[i]; + MPI_Cart_rank(s->communicator, coords, &owner[i]); + } + } else { + for(i = 0; i < count; i++) { + owner[i] = (int)x[i] + nx * (int)y[i]; + } + } +} + +void relocate_particles(sim_t *s) +{ + int count = s->local_particles.count; + int my_rank = s->mpi_rank; + + part_t *particles = &s->local_particles; + int *owner = particles->owner_rank; + + int i, j, k; + int block_start, block_len; + + i = 0; j = 0; k = 0; + while(k < count) + { + // scan for particles leaving local cell + block_start = k; + for(; k < count && owner[k] != my_rank; k++); + block_len = k - block_start; + + if(block_len > 0) { + part_copy(&s->leaving_particles, j, &s->local_particles, block_start, block_len); + j += block_len; + } + + // scan for particles staying in local cell + block_start = k; + for(; k < count && owner[k] == my_rank; k++); + block_len = k - block_start; + if(block_len > 0) { + if(i != block_start) { + part_copy(&s->local_particles, i, &s->local_particles, block_start, block_len); + } + i += block_len; + } + } + s->local_particles.count = i; + s->leaving_particles.count = j; +} + diff --git a/dynamic_sparse_data_exchange/simulation.h b/dynamic_sparse_data_exchange/simulation.h new file mode 100755 index 0000000000000000000000000000000000000000..c845f778d73f7b6f64d43f85a3ed4bf3391a6774 --- /dev/null +++ b/dynamic_sparse_data_exchange/simulation.h @@ -0,0 +1,47 @@ +#ifndef SIMULATION_H +#define SIMULATION_H + +#include + +#include "configuration.h" +#include "particles.h" + +typedef struct +{ + const conf_t* configuration; + MPI_Comm communicator; + int mpi_rank; + + part_t local_particles; + part_t leaving_particles; +} sim_t; + +typedef struct +{ + long n_particles_total; + int min_particles_per_cell; + int max_particles_per_cell; + + double max_velocity; +} sim_info_t; + +// Meta-information for MPI-Datatype creation +#define SIM_INFO_T_N_LONG_MEMBERS 1 +#define SIM_INFO_T_FIRST_LONG_MEMBER n_particles_total +#define SIM_INFO_T_N_INT_MEMBERS 2 +#define SIM_INFO_T_FIRST_INT_MEMBER min_particles_per_cell +#define SIM_INFO_T_N_DOUBLE_MEMBERS 1 +#define SIM_INFO_T_FIRST_DOUBLE_MEMBER max_velocity + +void sim_init(sim_t *s, const conf_t* configuration, MPI_Comm communicator); + +void sim_info(const sim_t *s, sim_info_t *info); + +void sim_calc_forces(sim_t *s); + +void sim_move_particles(sim_t *s); + +void sim_communicate_particles(sim_t *s); + +#endif + diff --git a/dynamic_sparse_data_exchange/vector.c b/dynamic_sparse_data_exchange/vector.c new file mode 100755 index 0000000000000000000000000000000000000000..35c8044b5909a5c76b4ae9a1e044c51c845565bb --- /dev/null +++ b/dynamic_sparse_data_exchange/vector.c @@ -0,0 +1,110 @@ +#include +#include +#include + +#include "vector.h" + +void vec_init(vec_t *vector, int capacity) +{ + vector->x = NULL; + vector->y = NULL; + + if(capacity > 0) { + vec_reserve(vector, capacity); + } +} + +void vec_reserve(vec_t *vector, int capacity) +{ + vector->x = realloc(vector->x, capacity * sizeof(double)); + vector->y = realloc(vector->y, capacity * sizeof(double)); +} + +void vec_free(vec_t *vector) +{ + vec_reserve(vector, 0); +} + +void vec_copy(vec_t *dst, int dst_start, const vec_t *src, int src_start, int count) +{ + memmove(&dst->x[dst_start], &src->x[src_start], count * sizeof(double)); + memmove(&dst->y[dst_start], &src->y[src_start], count * sizeof(double)); +} + +void vec_set_zero(vec_t *vector, int count) +{ + memset(vector->x, 0, count * sizeof(double)); + memset(vector->y, 0, count * sizeof(double)); +} + +void vec_add(vec_t *accumulator, const vec_t *operand, int count) +{ + vec_add_scaled(accumulator, operand, 1., count); +} + +void vec_add_scaled(vec_t *accumulator, const vec_t* operand, double factor, int count) +{ + int i; + double *a, *o; + + a = accumulator->x; + o = operand->x; + for(i = 0; i < count; i++) { + a[i] += o[i] * factor; + } + a = accumulator->y; + o = operand->y; + for(i = 0; i < count; i++) { + a[i] += o[i] * factor; + } +} + +void vec_add_scaled_mod(vec_t *accumulator, const vec_t* operand, double factor, double modx, double mody, int count) +{ + int i; + double *a, *o; + + a = accumulator->x; + o = operand->x; + for(i = 0; i < count; i++) { + a[i] = fmod(modx + fmod(a[i] + o[i] * factor, modx), modx); + } + a = accumulator->y; + o = operand->y; + for(i = 0; i < count; i++) { + a[i] = fmod(mody + fmod(a[i] + o[i] * factor, mody), mody); + } +} + +void vec_scale(vec_t *vector, double factor, int count) +{ + int i; + double *v; + + v = vector->x; + for(i = 0; i < count; i++) { + v[i] *= factor; + } + v = vector->y; + for(i = 0; i < count; i++) { + v[i] *= factor; + } +} + +void vec_arr_divide(vec_t *quotient, const vec_t *dividend, const double *divisor, int count) +{ + int i; + double *a, *b; + + a = quotient->x; + b = dividend->x; + for(i = 0; i < count; i++) { + a[i] = b[i] / divisor[i]; + } + a = quotient->y; + b = dividend->y; + for(i = 0; i < count; i++) { + a[i] = b[i] / divisor[i]; + } +} + diff --git a/dynamic_sparse_data_exchange/vector.h b/dynamic_sparse_data_exchange/vector.h new file mode 100755 index 0000000000000000000000000000000000000000..54ba37b316303adf2c25e0692a48e5022d5c14c3 --- /dev/null +++ b/dynamic_sparse_data_exchange/vector.h @@ -0,0 +1,25 @@ +#ifndef VECTOR_H +#define VECTOR_H + +typedef struct +{ + double *x; + double *y; +} vec_t; + +void vec_init(vec_t *vector, int capacity); +void vec_reserve(vec_t *vector, int capacity); +void vec_free(vec_t *vector); +void vec_copy(vec_t *dst, int dst_start, const vec_t *src, int src_start, int count); + +void vec_set_zero(vec_t *vector, int size); + +void vec_add(vec_t *accumulator, const vec_t *operand, int size); +void vec_add_scaled(vec_t *accumulator, const vec_t* operand, double factor, int size); +void vec_add_scaled_mod(vec_t *accumulator, const vec_t* operand, double factor, double modulus_x, double modulus_y, int size); +void vec_scale(vec_t *vector, double factor, int size); + +void vec_arr_divide(vec_t *quotient, const vec_t *dividend, const double *divisor, int count); + +#endif +