Skip to content
Snippets Groups Projects
Commit ea293e81 authored by Paul Heinzlreiter's avatar Paul Heinzlreiter
Browse files

* debugging

parent 74a74c25
Branches
No related merge requests found
......@@ -56,14 +56,12 @@ namespace nbody {
void BarnesHutTree::split(Node* current) {
vector<Box> subboxes = BarnesHutTree::splitBB(current);
//cout << "splitting " << subboxes.size() << endl;
current->leaf = false;
Node* after = current->next;
for (vector<Box>::iterator it = subboxes.begin(); it != subboxes.end(); it++) {
Node* child = new Node(current->tree);
//cout << "new node" << endl;
pthread_rwlock_wrlock(&child->lock);
child->parent = current;
child->bb = *it;
......@@ -77,7 +75,6 @@ namespace nbody {
child->prev->afterSubtree = child;
}
//this->control.toProcess.push(child);
//cout << "storing " << child << endl;
}
after->prev->afterSubtree = current->afterSubtree;
current->bodies.clear();
......@@ -88,7 +85,6 @@ namespace nbody {
void BarnesHutTree::splitNode(Node* current) {
//this->control.toProcess.pop();
//cout << "storing " << current << endl;
if (current->isSplitable()) {
split(current);
}
......@@ -114,9 +110,8 @@ namespace nbody {
//this->control.toProcess.push(current);
//iterate over existing boxes and split if it contains too much bodies
//this->numberOfNodes()
BarnesHutTree::splitSubtree(this->nodes->next);
//this->update();
this->update();
}
......@@ -173,18 +168,15 @@ namespace nbody {
bool toSplitLeft;
Node* current = root;
//cout << "split" << endl;
do {
toSplitLeft = false;
while (current != root->afterSubtree) {
if (current->isSplitable()) {
split(current);
//cout << "splitted" << endl;
toSplitLeft = true;
}
current = current->next;
}
//cout << "loop: " << current->tree->numberOfNodes() << endl;
} while (toSplitLeft);
}
}
......@@ -21,9 +21,8 @@ int main(int argc, char* argv[]) {
//local tree is built and correct domains are distributed
simulation.runStep();
}
simulation.cleanup();
*/
//simulation.cleanup();
MPI_Finalize();
return 0;
}
......@@ -72,6 +72,10 @@ namespace nbody {
MpiSimulation::~MpiSimulation() {
delete[] this->domains;
delete this->tree;
while (!this->sendStores.empty()) {
delete[] this->sendStores.back().bodies;
this->sendStores.pop_back();
}
}
int MpiSimulation::getNumberOfProcesses() {
......@@ -88,11 +92,14 @@ namespace nbody {
void MpiSimulation::send(vector<Body> bodies, int target) {
int bodySize = bodies.size();
SendStore* store = this->availableSendStore(bodySize);
MPI_Send(&(bodies[0]), bodySize, this->bodyType, target, 0, MPI_COMM_WORLD);
memcpy(store->bodies, &(bodies[0]), bodySize * sizeof(Body));
MPI_Isend(store->bodies, bodySize, this->bodyType, target, 0, MPI_COMM_WORLD, &store->request);
//MPI_Send(&(bodies[0]), bodySize, this->bodyType, target, 0, MPI_COMM_WORLD);
}
void MpiSimulation::recv(vector<Body>& bodies, int source) {
void MpiSimulation::recv(vector<Body>& bodies) {
MPI_Status status;
int count;
Body* lb;
......@@ -145,7 +152,7 @@ namespace nbody {
this->send(nodes[i].getBodies(), i);
}
} else {
this->recv(this->bodies, 0);
this->recv(this->bodies);
}
}
......@@ -170,67 +177,87 @@ namespace nbody {
}
}
SendStore* MpiSimulation::availableSendStore(int numElems) {
vector<SendStore>::iterator it = this->sendStores.begin();
while (it != this->sendStores.end()) {
int completed;
MPI_Test(&it->request, &completed, MPI_STATUS_IGNORE);
if (it->size >= numElems && completed) {
return &(*it);
} else if (completed) {
delete[] it->bodies;
it = this->sendStores.erase(it);
} else {
it++;
}
}
SendStore store;
store.bodies = new Body[numElems];
store.size = numElems;
this->sendStores.push_back(store);
return &(this->sendStores.back());
}
void MpiSimulation::distributeLETs() {
//send out locally essential trees (local bodies needed by remote simulations)
/*
for (int i = 0; i < this->parallelSize; i++) {
if (i != this->parallelRank) {
printBB(this->parallelRank, this->domains[i]);
//printBB(this->parallelRank, this->domains[i]);
vector<Body> refinements = this->tree->copyRefinements(this->domains[i]);
//cout << "ref " << refinements.size() << endl;
vector<MpiBodyComm>::iterator it = this->comms.begin();
while (it != this->comms.end() && !it->sendUnblocking(i, refinements)) {
it++;
}
if (it == this->comms.end()) {
this->comms.push_back(MpiBodyComm(&this->bodyType));
this->comms.back().sendUnblocking(i, refinements);
}
//cout << this->parallelRank << " -> " << i << ": " << refinements.size() << endl;
this->send(refinements, i);
}
}
//receive bodies and integrate them into local tree for simulation
int received = 0;
while (received < this->parallelSize - 1) {
vector<Body> refinements;
this->recv(refinements);
//this->tree->mergeLET(refinements);
/*
//integrate bodies in order of arrival to do communication/computation overlapping
this->comms[0].recvBlocking(MPI_ANY_SOURCE, refinements);
//cout << "recv: " << refinements.size() << endl;
this->tree->mergeLET(refinements);
//this->tree.getRootBB().print();
*/
received++;
}
if (!this->tree->isCorrect()) {
cout << "WRONG" << endl;
}
*/
}
void MpiSimulation::buildTree() {
if (this->bodies.size() > 0 && isValid(this->overallDomain)) {
//cout << "building from " << bodies.size() << endl;
this->tree->build(this->bodies, this->overallDomain);
} else if (this->bodies.size() > 0) {
this->tree->build(this->bodies);
}
if (!this->tree->isCorrect()) {
cout << "wrong tree" << endl;
} else {
cout << this->parallelRank << " tree: " << this->tree->numberOfNodes() << endl;
}
}
void MpiSimulation::rebuildTree() {
this->tree->rebuild(this->overallDomain);
//this->tree.getRootBB().print(this->parallelRank);
}
void MpiSimulation::runStep() {
this->distributeLETs();
/*
this->tree->computeForces();
this->distributeDomains(this->tree->advance());
this->rebuildTree();
*/
if (!this->tree->isCorrect()) {
cout << "WRONG" << endl;
......
......@@ -10,15 +10,23 @@
namespace nbody {
using namespace std;
typedef struct _SendStore {
Body* bodies;
MPI_Request request;
int size;
} SendStore;
class MpiSimulation : public Simulation {
protected:
MPI_Datatype bodyType;
MPI_Datatype boxType;
Box* domains;
Box overallDomain;
vector<SendStore> sendStores;
virtual SendStore* availableSendStore(int numElems);
virtual void send(vector<Body> bodies, int target);
virtual void recv(vector<Body>& bodies, int source);
virtual void recv(vector<Body>& bodies);
public:
MpiSimulation(int& argc, char**& argv);
virtual ~MpiSimulation();
......
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment