GADGET-4
shared_mem_handler.cc
Go to the documentation of this file.
1 /*******************************************************************************
2  * \copyright This file is part of the GADGET4 N-body/SPH code developed
3  * \copyright by Volker Springel. Copyright (C) 2014-2020 by Volker Springel
4  * \copyright (vspringel@mpa-garching.mpg.de) and all contributing authors.
5  *******************************************************************************/
6 
12 #include <hdf5.h>
13 #include <mpi.h>
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <algorithm>
17 #include <cstring>
18 
19 #include "../gravtree/gravtree.h"
20 #include "../ngbtree/ngbtree.h"
21 #include "../time_integration/driftfac.h"
22 
23 #include "../mpi_utils/shared_mem_handler.h"
24 
26 typedef ngbtree ntree;
27 
28 void shmem::prepare_offset_table(void *p, ptrdiff_t *&offset_tab) // called by ghost task
29 {
30  ptrdiff_t off = ((char *)p - Mem.Base);
31 
32  offset_tab = (ptrdiff_t *)Mem.mymalloc("offset_tab", Island_NTask * sizeof(ptrdiff_t));
33 
34  MPI_Gather(&off, sizeof(ptrdiff_t), MPI_BYTE, offset_tab, sizeof(ptrdiff_t), MPI_BYTE, Island_NTask - 1, SharedMemComm);
35 }
36 
37 void shmem::inform_offset_table(void *p) // called by worked tasks
38 {
39  ptrdiff_t off = ((char *)p - Mem.Base);
40 
41  MPI_Gather(&off, sizeof(ptrdiff_t), MPI_BYTE, NULL, sizeof(ptrdiff_t), MPI_BYTE, Island_NTask - 1, SharedMemComm);
42 }
43 
44 void shmem::free_offset_table(ptrdiff_t *&offset_tab) { Mem.myfree(offset_tab); }
45 
47 {
48  simparticles Dp{MPI_COMM_WORLD}; /* dummy needed to access drift functions for ngbtree */
49 
50  /* first, we wait for the parameter All.MaxMemSize, so that we can initialize the memory handler */
51  MPI_Bcast(All.get_data_ptr(), All.get_data_size(), MPI_BYTE, 0, MPI_COMM_WORLD);
53 
54  while(true)
55  {
56  /* wait for an incoming message */
57  MPI_Status status;
58  MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
59 
60  int source = status.MPI_SOURCE;
61  int tag = status.MPI_TAG;
62 
63  int length;
64  MPI_Get_count(&status, MPI_BYTE, &length);
65 
66  /* now pick it up */
67  char *message = (char *)Mem.mymalloc("message", length);
68  MPI_Recv(message, length, MPI_BYTE, source, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
69 
70  if(tag == TAG_METDATA) // signals that we are synchronizing addresses and values for tree access
71  {
72  int handle = *((int *)message);
73  Mem.myfree(message);
74 
75  MPI_Recv(&All.Ti_Current, sizeof(All.Ti_Current), MPI_BYTE, source, TAG_METDATA + 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
76  MPI_Recv(&tree_info[handle].Bd, sizeof(bookkeeping_data), MPI_BYTE, source, TAG_METDATA + 2, MPI_COMM_WORLD,
77  MPI_STATUS_IGNORE);
78 
79  intposconvert *convfac = &Dp;
80  MPI_Recv(convfac, sizeof(intposconvert), MPI_BYTE, source, TAG_METDATA + 3, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
81 
82  prepare_offset_table(NULL, tree_info[handle].TopNodes_offsets);
83  prepare_offset_table(NULL, tree_info[handle].Nodes_offsets);
84  prepare_offset_table(NULL, tree_info[handle].Nextnode_offsets);
85  prepare_offset_table(NULL, tree_info[handle].Points_offsets);
86  prepare_offset_table(NULL, tree_info[handle].P_offsets);
87  prepare_offset_table(NULL, tree_info[handle].SphP_offsets);
88  prepare_offset_table(NULL, tree_info[handle].Foreign_Nodes_offsets);
89  prepare_offset_table(NULL, tree_info[handle].Foreign_Points_offsets);
90  }
91  else if(tag == TAG_HEADER) // signals that we are freeing addresses we stored for tree access
92  {
93  int handle = *((int *)message);
94  Mem.myfree(message);
95 
96  free_offset_table(tree_info[handle].Foreign_Points_offsets);
97  free_offset_table(tree_info[handle].Foreign_Nodes_offsets);
98  free_offset_table(tree_info[handle].SphP_offsets);
99  free_offset_table(tree_info[handle].P_offsets);
100  free_offset_table(tree_info[handle].Points_offsets);
101  free_offset_table(tree_info[handle].Nextnode_offsets);
102  free_offset_table(tree_info[handle].Nodes_offsets);
103  free_offset_table(tree_info[handle].TopNodes_offsets);
104  }
105  else if(tag >= TAG_FETCH_SPH_DENSITY && tag < TAG_FETCH_SPH_DENSITY + MAX_TREE_INFOS) // fetch from SPH density tree
106  {
107  int handle = tag - TAG_FETCH_SPH_DENSITY;
108  deal_with_sph_node_request(message, length, source, handle, &Dp);
109  Mem.myfree(message);
110  }
111  else if(tag >= TAG_FETCH_SPH_HYDRO && tag < TAG_FETCH_SPH_HYDRO + MAX_TREE_INFOS) // fetch from SPH hydro tree
112  {
113  int handle = tag - TAG_FETCH_SPH_HYDRO;
114  deal_with_sph_node_request(message, length, source, handle, &Dp);
115  Mem.myfree(message);
116  }
117  else if(tag >= TAG_FETCH_SPH_TREETIMESTEP && tag < TAG_FETCH_SPH_TREETIMESTEP + MAX_TREE_INFOS) // fetch from SPH timesteptree
118  {
119  int handle = tag - TAG_FETCH_SPH_TREETIMESTEP;
120  deal_with_sph_node_request(message, length, source, handle, &Dp);
121  Mem.myfree(message);
122  }
123  else if(tag >= TAG_FETCH_GRAVTREE && tag < TAG_FETCH_GRAVTREE + MAX_TREE_INFOS) // fetch from gravity tree
124  {
125  int handle = tag - TAG_FETCH_GRAVTREE;
126  deal_with_gravity_node_request(message, length, source, handle);
127  Mem.myfree(message);
128  }
129  else if(tag == TAG_KEY) // request to terminate gracefully
130  {
131  H5Eset_auto(H5E_DEFAULT, NULL, NULL);
132  MPI_Finalize();
133  exit(0);
134  }
135  else if(tag == TAG_TABLE_ALLOC) // take over storage for TableData
136  {
137  size_t tab_len = *((size_t *)message);
138  Mem.myfree(message);
139 
140  TableData = (char *)Mem.mymalloc("table", tab_len);
141  MPI_Recv(TableData, tab_len, MPI_BYTE, source, TAG_DMOM, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
142  ptrdiff_t off = ((char *)TableData - Mem.Base);
143  MPI_Bcast(&off, sizeof(ptrdiff_t), MPI_BYTE, Island_ThisTask, SharedMemComm);
144  }
145  else if(tag == TAG_TABLE_FREE)
146  {
147  Mem.myfree(message);
148  Mem.myfree(TableData);
149  }
150  else if(tag == TAG_EWALD_ALLOC) // take over storage for EwaldData
151  {
152  size_t tab_len = *((size_t *)message);
153  Mem.myfree(message);
154 
155  EwaldData = (char *)Mem.mymalloc("table", tab_len);
156  MPI_Recv(EwaldData, tab_len, MPI_BYTE, source, TAG_DMOM, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
157  ptrdiff_t off = ((char *)EwaldData - Mem.Base);
158  MPI_Bcast(&off, sizeof(ptrdiff_t), MPI_BYTE, Island_ThisTask, SharedMemComm);
159  }
160  else if(tag == TAG_TOPNODE_ALLOC) // take over the storage of common top-level tree data
161  {
162  int handle = num_tree_info++;
164  Terminate("num_tree_info > MAX_TREE_INFOS");
165 
166  size_t *sizep = ((size_t *)message);
167  size_t sizes[4] = {sizep[0], sizep[1], sizep[2], sizep[3]};
168  Mem.myfree(message);
169 
170  tree_info[handle].NodeLevel_storage = (char *)Mem.mymalloc("NodeLevel_storage", sizes[0]);
171  tree_info[handle].NodeSibling_storage = (char *)Mem.mymalloc("NodeSibling_storage", sizes[1]);
172  tree_info[handle].NodeIndex_storage = (char *)Mem.mymalloc("NodeIndex_storage", sizes[2]);
173  tree_info[handle].TopNodes_storage = (char *)Mem.mymalloc("TopNodes_storage", sizes[3]);
174 
175  ptrdiff_t off[4] = {
176  ((char *)tree_info[handle].NodeLevel_storage - Mem.Base), ((char *)tree_info[handle].NodeSibling_storage - Mem.Base),
177  ((char *)tree_info[handle].NodeIndex_storage - Mem.Base), ((char *)tree_info[handle].TopNodes_storage - Mem.Base)};
178 
179  MPI_Send(off, 4 * sizeof(ptrdiff_t), MPI_BYTE, source, TAG_TOPNODE_OFFSET, MPI_COMM_WORLD);
180 
181  MPI_Send(&handle, 1, MPI_INT, source, TAG_N, MPI_COMM_WORLD);
182  }
183  else if(tag == TAG_TOPNODE_FREE) // free the top-level storage for a tree again
184  {
185  int handle = *((int *)message);
186  Mem.myfree(message);
187 
188  num_tree_info--;
189  if(handle != num_tree_info)
190  Terminate("unexpected handle");
191 
192  Mem.myfree(tree_info[handle].TopNodes_storage);
193  Mem.myfree(tree_info[handle].NodeIndex_storage);
194  Mem.myfree(tree_info[handle].NodeSibling_storage);
195  Mem.myfree(tree_info[handle].NodeLevel_storage);
196  }
197  else if(tag == TAG_DRIFT_INIT) // make the shared memory handler update All and init the local drift tables
198  {
199  memcpy(All.get_data_ptr(), message, All.get_data_size());
200  Mem.myfree(message);
202  }
203  else if(tag == TAG_ALL_UPDATE) // make the shared memory handler update the contents of the All structure
204  {
205  memcpy(All.get_data_ptr(), message, All.get_data_size());
206  Mem.myfree(message);
207  }
208  }
209 }
210 
211 void shmem::deal_with_sph_node_request(char *message, int length, int source, int handle, simparticles *Sp)
212 {
213 #ifndef LEAN
214  bookkeeping_data &Bdat = tree_info[handle].Bd;
215 
216  // we got the list of requested nodes
217  ntree::node_req *node_req_recv = (ntree::node_req *)message;
218  int nrecv = length / sizeof(ntree::node_req);
219 
220  /* as part of this message, we get the real actually targeted rank in
221  * the simulation communicator. We will translate this to the rank in our shared memory
222  * block
223  */
224 
225  /* now prepare answer message by reading from shared memory */
226  /******************* prepare tree answer ********************/
227 
228  ntree::node_count_info *node_info_recv =
229  (ntree::node_count_info *)Mem.mymalloc("node_info_recv", nrecv * sizeof(ntree::node_count_info));
230 
231  /* first let's count how many nodes and particles are hanging below in each case */
232  int n_recvpoints = 0;
233  int n_recvnodes = 0;
234 
235  for(int i = 0; i < nrecv; i++)
236  {
237  node_info_recv[i].count_nodes = 0;
238  node_info_recv[i].count_parts = 0;
239 
240  int no = node_req_recv[i].foreignnode;
241  int task = node_req_recv[i].foreigntask;
242  int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
243 
244  if(no < Bdat.MaxPart || no >= Bdat.MaxPart + Bdat.MaxNodes)
245  Terminate("not an internal node");
246 
247  ngbnode *nop = ((ngbnode *)get_basenodep(no, shmrank, handle)) + no;
248 
249  int p = nop->nextnode;
250 
251  while(p != nop->sibling)
252  {
253  if(p < 0)
254  Terminate("p=%d < 0", p);
255 
256  if(p < Bdat.MaxPart) /* a local particle */
257  {
258  node_info_recv[i].count_parts++;
259  p = get_nextnodep(shmrank, handle)[p];
260  }
261  else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
262  {
263  node_info_recv[i].count_nodes++;
264  p = (((ngbnode *)get_basenodep(p, shmrank, handle)) + p)->sibling;
265  }
266  else if(p >= Bdat.ImportedNodeOffset && p < Bdat.EndOfTreePoints) /* an imported tree point */
267  {
268  node_info_recv[i].count_parts++;
269  p = get_nextnodep(shmrank, handle)[p - Bdat.MaxNodes];
270  }
271  else
272  Terminate("p=%d MaxPart=%d MaxNodes=%d", p, Bdat.MaxPart, Bdat.MaxNodes);
273  }
274 
275  if(node_info_recv[i].count_parts == 0 && node_info_recv[i].count_nodes == 0)
276  Terminate("strange: we have we requested an empty node?\n");
277 
278  n_recvpoints += node_info_recv[i].count_parts;
279  n_recvnodes += node_info_recv[i].count_nodes;
280  }
281 
282  foreign_sphpoint_data *exportbuf_points = (foreign_sphpoint_data *)Mem.mymalloc_movable(
283  &exportbuf_points, "exportbuf_points", n_recvpoints * sizeof(foreign_sphpoint_data));
284  ngbnode *exportbuf_nodes = (ngbnode *)Mem.mymalloc_movable(&exportbuf_nodes, "exportbuf_nodes", n_recvnodes * sizeof(ngbnode));
285 
286  n_recvpoints = 0;
287  n_recvnodes = 0;
288 
289  for(int i = 0; i < nrecv; i++)
290  {
291  int no = node_req_recv[i].foreignnode;
292  int task = node_req_recv[i].foreigntask;
293  int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
294 
295  ngbnode *nop = ((ngbnode *)get_basenodep(no, shmrank, handle)) + no;
296 
297  int p = nop->nextnode;
298 
299  while(p != nop->sibling)
300  {
301  if(p < Bdat.MaxPart) /* a local particle */
302  {
303  int off = n_recvpoints++;
304 
305  foreign_sphpoint_data *expoints = &exportbuf_points[off];
306 
307  particle_data *ptr = (particle_data *)get_Pp(shmrank, handle) + p;
308  sph_particle_data *sph_ptr = (sph_particle_data *)get_SphPp(shmrank, handle) + p;
309 
310  particle_data p_copy;
311  sph_particle_data sphp_copy;
312 
313  if(ptr->get_Ti_Current() != All.Ti_Current)
314  {
315  /* Because of possible lightcone output, shared memory fetch not allowed to drift original,
316  * because this rank doesn't have a lightcone buffer. The original node needs to do it.
317  * We thus drift a copy of the particle without allowing lightcone access
318  */
319 #ifndef LEAN
320  while(ptr->access.test_and_set(std::memory_order_acquire))
321  ; // acquire spin lock
322 #endif
323  p_copy = *ptr;
324  sphp_copy = *sph_ptr;
325 
326 #ifndef LEAN
327  ptr->access.clear(std::memory_order_release); // release spin lock
328 #endif
329  p_copy.access.clear(std::memory_order_release); // clear spin lock in copy
330 
331  /* use the copy from now on */
332 
333  ptr = &p_copy;
334  sph_ptr = &sphp_copy;
335 
336  // the final flag tells the drift to not consider lightcone crossings
337  Sp->drift_particle(ptr, sph_ptr, All.Ti_Current, true);
338  }
339 
340  expoints->IntPos[0] = ptr->IntPos[0];
341  expoints->IntPos[1] = ptr->IntPos[1];
342  expoints->IntPos[2] = ptr->IntPos[2];
343  expoints->Mass = ptr->getMass();
344  expoints->TimeBinHydro = ptr->TimeBinHydro;
345  expoints->SphCore = *sph_ptr;
346 
347  expoints->Nextnode = -1;
348 
349  p = get_nextnodep(shmrank, handle)[p];
350  }
351  else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
352  {
353  int off = n_recvnodes++;
354 
355  ngbnode *sourcep = (((ngbnode *)get_basenodep(p, shmrank, handle)) + p);
356  ngbnode *exnodes = &exportbuf_nodes[off];
357 
358  if(sourcep->Ti_Current != All.Ti_Current)
359  sourcep->drift_node(All.Ti_Current, Sp);
360 
361  *exnodes = *sourcep;
362 
363  exnodes->cannot_be_opened_locally = 1;
364  exnodes->nextnode = -1;
365  exnodes->sibling = -1;
366  exnodes->OriginTask = task;
367  exnodes->OriginNode = p;
368 
369  p = sourcep->sibling;
370  }
371  else if(p >= Bdat.ImportedNodeOffset) /* an imported treepoint particle */
372  {
373  Terminate("not expected here");
374  }
375  }
376  }
377 
378  /************************************************************/
379 
380  MPI_Send(node_info_recv, nrecv * sizeof(ntree::node_count_info), MPI_BYTE, source, TAG_N, MPI_COMM_WORLD);
381 
382  /* now transfer the points and nodes */
383  if(n_recvpoints > 0)
384  MPI_Send(exportbuf_points, n_recvpoints * sizeof(foreign_sphpoint_data), MPI_BYTE, source, TAG_PDATA, MPI_COMM_WORLD);
385 
386  if(n_recvnodes > 0)
387  MPI_Send(exportbuf_nodes, n_recvnodes * sizeof(ngbnode), MPI_BYTE, source, TAG_SPHDATA, MPI_COMM_WORLD);
388 
389  Mem.myfree(exportbuf_nodes);
390  Mem.myfree(exportbuf_points);
391  Mem.myfree(node_info_recv);
392 #endif
393 }
394 
395 void shmem::deal_with_gravity_node_request(char *message, int length, int source, int handle)
396 {
397  bookkeeping_data &Bdat = tree_info[handle].Bd;
398 
399  // we got the list of requested nodes
400  gtree::node_req *node_req_recv = (gtree::node_req *)message;
401  int nrecv = length / sizeof(gtree::node_req);
402 
403  /* as part of this message, we get the real actually targeted rank in
404  * the simulation communicator. We will translate this to the rank in our shared memory
405  * block
406  */
407 
408  /* now prepare answer message by reading from shared memory */
409  /******************* prepare tree answer ********************/
410 
411  gtree::node_count_info *node_info_recv =
412  (gtree::node_count_info *)Mem.mymalloc("node_info_recv", nrecv * sizeof(gtree::node_count_info));
413 
414  /* first let's count how many nodes and particles are hanging below in each case */
415  int n_recvpoints = 0;
416  int n_recvnodes = 0;
417 
418  for(int i = 0; i < nrecv; i++)
419  {
420  node_info_recv[i].count_nodes = 0;
421  node_info_recv[i].count_parts = 0;
422 
423  int no = node_req_recv[i].foreignnode;
424  int task = node_req_recv[i].foreigntask;
425  int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
426 
427  if(no < Bdat.MaxPart || no >= Bdat.MaxPart + Bdat.MaxNodes)
428  Terminate("not an internal node");
429 
430  gravnode *nop = ((gravnode *)get_basenodep(no, shmrank, handle)) + no;
431 
432  int p = nop->nextnode;
433 
434  while(p != nop->sibling)
435  {
436  if(p < 0)
437  Terminate("p=%d < 0", p);
438 
439  if(p < Bdat.MaxPart) /* a local particle */
440  {
441  node_info_recv[i].count_parts++;
442  p = get_nextnodep(shmrank, handle)[p];
443  }
444  else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
445  {
446  node_info_recv[i].count_nodes++;
447  p = (((gravnode *)get_basenodep(p, shmrank, handle)) + p)->sibling;
448  }
449  else if(p >= Bdat.ImportedNodeOffset && p < Bdat.EndOfTreePoints) /* an imported tree point */
450  {
451  node_info_recv[i].count_parts++;
452  p = get_nextnodep(shmrank, handle)[p - Bdat.MaxNodes];
453  }
454  else
455  Terminate("p=%d MaxPart=%d MaxNodes=%d", p, Bdat.MaxPart, Bdat.MaxNodes);
456  }
457 
458  if(node_info_recv[i].count_parts == 0 && node_info_recv[i].count_nodes == 0)
459  Terminate("strange: we have we requested an empty node?\n");
460 
461  n_recvpoints += node_info_recv[i].count_parts;
462  n_recvnodes += node_info_recv[i].count_nodes;
463  }
464 
465  foreign_gravpoint_data *exportbuf_points = (foreign_gravpoint_data *)Mem.mymalloc_movable(
466  &exportbuf_points, "exportbuf_points", n_recvpoints * sizeof(foreign_gravpoint_data));
467  gravnode *exportbuf_nodes = (gravnode *)Mem.mymalloc_movable(&exportbuf_nodes, "exportbuf_nodes", n_recvnodes * sizeof(gravnode));
468 
469  n_recvpoints = 0;
470  n_recvnodes = 0;
471 
472  for(int i = 0; i < nrecv; i++)
473  {
474  int no = node_req_recv[i].foreignnode;
475  int task = node_req_recv[i].foreigntask;
476  int shmrank = GetShmRankForSimulCommRank[task]; // corresponding local shared memory rank, stays fixed below
477 
478  gravnode *nop = ((gravnode *)get_basenodep(no, shmrank, handle)) + no;
479 
480  int p = nop->nextnode;
481 
482  while(p != nop->sibling)
483  {
484  if(p < Bdat.MaxPart) /* a local particle */
485  {
486  int off = n_recvpoints++;
487 
488  foreign_gravpoint_data *expoints = &exportbuf_points[off];
489 
490  particle_data *ptr = (particle_data *)get_Pp(shmrank, handle) + p;
491 
492  expoints->IntPos[0] = ptr->IntPos[0];
493  expoints->IntPos[1] = ptr->IntPos[1];
494  expoints->IntPos[2] = ptr->IntPos[2];
495  expoints->Mass = ptr->getMass();
496  expoints->Type = ptr->getType();
497  expoints->OldAcc = ptr->OldAcc;
498 #if NSOFTCLASSES > 1
499  expoints->SofteningClass = ptr->getSofteningClass();
500 #endif
501 #if defined(PMGRID) && defined(PLACEHIGHRESREGION)
502  expoints->InsideOutsideFlag = ptr->InsideOutsideFlag;
503 #endif
504  expoints->Nextnode = -1;
505 
506  p = get_nextnodep(shmrank, handle)[p];
507  }
508  else if(p < Bdat.MaxPart + Bdat.MaxNodes) /* an internal node */
509  {
510  int off = n_recvnodes++;
511 
512  gravnode *exnodes = &exportbuf_nodes[off];
513  gravnode *sourcep = (((gravnode *)get_basenodep(p, shmrank, handle)) + p);
514 
515  memcpy(static_cast<void *>(exnodes), static_cast<void *>(sourcep),
516  sizeof(gravnode)); // cannot do a *exnodes = *sourcep; because out std::atomic_flag
517  // has a deleted default copy operator
518 
519  exnodes->cannot_be_opened_locally = 1;
520  exnodes->nextnode = -1;
521  exnodes->sibling = -1;
522  exnodes->OriginTask = task;
523  exnodes->OriginNode = p;
524 
525  p = sourcep->sibling;
526  }
527  else if(p >= Bdat.ImportedNodeOffset) /* an imported Treepoint particle */
528  {
529  int off = n_recvpoints++;
530 
531  foreign_gravpoint_data *expoints = &exportbuf_points[off];
532  int n = p - Bdat.ImportedNodeOffset;
533  gravpoint_data *pointsp = ((gravpoint_data *)get_pointsp(shmrank, handle)) + n;
534 
535  expoints->IntPos[0] = pointsp->IntPos[0];
536  expoints->IntPos[1] = pointsp->IntPos[1];
537  expoints->IntPos[2] = pointsp->IntPos[2];
538  expoints->Mass = pointsp->Mass;
539  expoints->Type = pointsp->Type;
540  expoints->OldAcc = pointsp->OldAcc;
541 #if NSOFTCLASSES > 1
542  expoints->SofteningClass = pointsp->SofteningClass;
543 #endif
544 #if defined(PMGRID) && defined(PLACEHIGHRESREGION)
545  expoints->InsideOutsideFlag = pointsp->InsideOutsideFlag;
546 #endif
547  expoints->Nextnode = -1;
548 
549  p = get_nextnodep(shmrank, handle)[p - Bdat.MaxNodes];
550  }
551  }
552  }
553 
554  /************************************************************/
555 
556  MPI_Send(node_info_recv, nrecv * sizeof(gtree::node_count_info), MPI_BYTE, source, TAG_N, MPI_COMM_WORLD);
557 
558  /* now transfer the points and nodes */
559  if(n_recvpoints > 0)
560  MPI_Send(exportbuf_points, n_recvpoints * sizeof(foreign_gravpoint_data), MPI_BYTE, source, TAG_PDATA, MPI_COMM_WORLD);
561 
562  if(n_recvnodes > 0)
563  MPI_Send(exportbuf_nodes, n_recvnodes * sizeof(gravnode), MPI_BYTE, source, TAG_SPHDATA, MPI_COMM_WORLD);
564 
565  Mem.myfree(exportbuf_nodes);
566  Mem.myfree(exportbuf_points);
567  Mem.myfree(node_info_recv);
568 }
global_data_all_processes All
Definition: main.cc:40
void init_drift_table(void)
Definition: driftfac.cc:26
char * Base
Definition: mymalloc.h:49
void mymalloc_init(int maxmemsize, enum restart_options restartflag)
Initialize memory manager.
Definition: mymalloc.cc:54
char * get_basenodep(int no, unsigned char shmrank, int handle)
char * get_SphPp(unsigned char shmrank, int handle)
void prepare_offset_table(void *p, ptrdiff_t *&offset_tab)
void deal_with_gravity_node_request(char *message, int length, int source, int handle)
MPI_Comm SharedMemComm
int * get_nextnodep(unsigned char shmrank, int handle)
char * TableData
void deal_with_sph_node_request(char *message, int length, int source, int handle, simparticles *Sp)
void shared_memory_handler(void)
int Island_ThisTask
char * get_pointsp(unsigned char shmrank, int handle)
void inform_offset_table(void *p)
int Island_NTask
int * GetShmRankForSimulCommRank
tree_storage_info tree_info[MAX_TREE_INFOS]
char * get_Pp(unsigned char shmrank, int handle)
char * EwaldData
void free_offset_table(ptrdiff_t *&offset_tab)
int drift_particle(particle_data *P, sph_particle_data *SphP, integertime time1, bool ignore_light_cone=false)
This function drifts a particle i to time1.
Definition: predict.cc:313
@ RST_BEGIN
Definition: dtypes.h:313
#define Terminate(...)
Definition: macros.h:19
driftfac Driftfac
Definition: main.cc:41
#define TAG_SPHDATA
Definition: mpi_utils.h:28
#define TAG_HEADER
Definition: mpi_utils.h:26
#define TAG_TABLE_FREE
Definition: mpi_utils.h:24
#define TAG_FETCH_SPH_DENSITY
Definition: mpi_utils.h:103
#define TAG_FETCH_SPH_TREETIMESTEP
Definition: mpi_utils.h:105
#define TAG_ALL_UPDATE
Definition: mpi_utils.h:100
#define TAG_TOPNODE_FREE
Definition: mpi_utils.h:19
#define TAG_PDATA
Definition: mpi_utils.h:27
#define TAG_DMOM
Definition: mpi_utils.h:30
#define TAG_FETCH_GRAVTREE
Definition: mpi_utils.h:102
#define TAG_TOPNODE_ALLOC
Definition: mpi_utils.h:21
#define TAG_EWALD_ALLOC
Definition: mpi_utils.h:22
#define TAG_N
Definition: mpi_utils.h:25
#define TAG_KEY
Definition: mpi_utils.h:29
#define TAG_FETCH_SPH_HYDRO
Definition: mpi_utils.h:104
#define TAG_TABLE_ALLOC
Definition: mpi_utils.h:23
#define TAG_TOPNODE_OFFSET
Definition: mpi_utils.h:20
#define TAG_METDATA
Definition: mpi_utils.h:101
#define TAG_DRIFT_INIT
Definition: mpi_utils.h:99
memory Mem
Definition: main.cc:44
gravtree< simparticles > gtree
ngbtree ntree
#define MAX_TREE_INFOS
std::atomic< unsigned char > cannot_be_opened_locally
Definition: tree.h:70
int OriginNode
Definition: tree.h:62
int nextnode
Definition: tree.h:57
int OriginTask
Definition: tree.h:61
int sibling
Definition: tree.h:53
unsigned char Type
Definition: gravtree.h:134
unsigned char SofteningClass
Definition: gravtree.h:137
MyIntPosType IntPos[3]
Definition: gravtree.h:130
sph_particle_data_hydrocore SphCore
Definition: ngbtree.h:105
signed char TimeBinHydro
Definition: ngbtree.h:113
MyIntPosType IntPos[3]
Definition: ngbtree.h:108
char * get_data_ptr(void)
Definition: allvars.h:360
integertime Ti_Current
Definition: allvars.h:188
size_t get_data_size(void)
Definition: allvars.h:362
MyDouble Mass
Definition: gravtree.h:103
unsigned char Type
Definition: gravtree.h:107
unsigned char SofteningClass
Definition: gravtree.h:109
float OldAcc
Definition: gravtree.h:104
MyIntPosType IntPos[3]
Definition: gravtree.h:102
std::atomic< integertime > Ti_Current
Definition: ngbtree.h:56
void drift_node(integertime time1, simparticles *Sp)
Definition: ngbtree.h:58
std::atomic_flag access
Definition: particle_data.h:88
integertime get_Ti_Current(void)
MyDouble getMass(void)
unsigned char getSofteningClass(void)
unsigned char getType(void)
signed char TimeBinHydro
Definition: particle_data.h:73
MyIntPosType IntPos[3]
Definition: particle_data.h:53