5#ifndef RUN_LOCALSTIFFNESSMATRICESDYNAMICDISTRIBUTION_H
6#define RUN_LOCALSTIFFNESSMATRICESDYNAMICDISTRIBUTION_H
9#include "LocalStiffnessMatrices.h"
21 std::random_device rd;
23 std::mt19937 gen(rd());
25 std::uniform_int_distribution<> dis(0, 2);
28 int sleepTime = dis(gen);
32 std::this_thread::sleep_for(std::chrono::seconds(sleepTime));
37enum LS_TAGS{TAG_READY_WORKER=11, TAG_START_WORK=12, TAG_TERMINATE_WORK=13};
39class LocalStiffnessMatricesDynamicDistribution:
public LocalStiffnessMatrices{
41 LocalStiffnessMatricesDynamicDistribution(
AdaptiveSparseGrid &sg, StencilTemplate& stencilClass,
int number_processes_)
42 :LocalStiffnessMatrices(sg, stencilClass, number_processes_){
46 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
47 MPI_Comm_size(MPI_COMM_WORLD, &size);
49 MPI_Barrier(MPI_COMM_WORLD);
54 for (
auto it = depthList.begin_all(); it != depthList.end_all(); ++it) {
63 }
else if(rank>=size-number_processes_){
71 MPI_Barrier(MPI_COMM_WORLD);
75 for (
auto it = depthList.begin_all(); it != depthList.end_all(); ++it) {
80 node = distributedDepthsHashtable.getNodeForDepth(T);
83 MPI_Bcast(&node, 1, MPI_INT, 0, MPI_COMM_WORLD);
85 distributedDepthsHashtable.setNodeDepth(node,T);
93 MPI_Barrier(MPI_COMM_WORLD);
96 cout <<
"LocalStiffness only with MPI!" <<endl;
106 int d[DimensionSparseGrid];
107 auto it = depthList.begin_all();
109 for (; it != depthList.end_all(); ++it) {
112 MPI_Probe(MPI_ANY_SOURCE,TAG_READY_WORKER,MPI_COMM_WORLD,&status);
113 MPI_Recv(&message,1, MPI_INT,status.MPI_SOURCE, TAG_READY_WORKER, MPI_COMM_WORLD, &status);
114 for(
int j=0; j<DimensionSparseGrid; j++)d[j]=T.at(j);
116 MPI_Send(d,DimensionSparseGrid, MPI_INT,status.MPI_SOURCE,TAG_START_WORK,MPI_COMM_WORLD);
117 distributedDepthsHashtable.setNodeDepth(status.MPI_SOURCE,T);
121 MPI_Comm_size(MPI_COMM_WORLD, &size);
122 for(
int i=size-number_processes; i < size; i++){
124 MPI_Probe(i, TAG_READY_WORKER, MPI_COMM_WORLD, &status);
125 MPI_Recv(&message,1, MPI_INT,status.MPI_SOURCE, TAG_READY_WORKER, MPI_COMM_WORLD, &status);
127 MPI_Send(&message,1, MPI_INT,status.MPI_SOURCE,TAG_TERMINATE_WORK,MPI_COMM_WORLD);
139 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
148 MPI_Send(&message, 1, MPI_INT, master_rank, TAG_READY_WORKER, MPI_COMM_WORLD);
150 int d[DimensionSparseGrid];
154 MPI_Probe(0, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
158 if(status.MPI_TAG == TAG_START_WORK){
159 MPI_Recv(d, DimensionSparseGrid, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
160 for(
int j=0; j<DimensionSparseGrid; j++)D.set(d[j],j);
163 if(status.MPI_TAG == TAG_TERMINATE_WORK){
165 MPI_Recv(&message, 1, MPI_INT, 0, TAG_TERMINATE_WORK, MPI_COMM_WORLD, &status);
175 void calculateEntries(Depth& depth);
178 DistributedDepthsHashtable getDistributedDepthsHashtable(){
return distributedDepthsHashtable;};
181 void addStencil(StencilTemplate& newStencil){
182 for(
auto& item : pairedDepthsLocalStiffnessMatrices) {
183 Depth T = item.first;
184 newStencil.initialize(T);
185 auto& matrices=item.second;
186 int end=matrices.size();
188#pragma omp parallel for
189 for (
int i=0; i<end;i++) {
190 auto& matrix = matrices.at(i);
191 CellDimension cell = *matrix.getCell();
192 LocalStiffnessMatrixFixedDepthSymmetric matrix_add(cell, grid, newStencil);
199 bool operator==(LocalStiffnessMatrices &localStiffnessMatrices){
202 for(
auto& item : pairedDepthsLocalStiffnessMatrices){
203 Depth T = item.first;
205 auto it = std::find_if(
206 localStiffnessMatrices.getPairedDepthsLocalStiffnessMatrices()->begin(),
207 localStiffnessMatrices.getPairedDepthsLocalStiffnessMatrices()->end(),
208 [&T](
const std::pair<Depth,std::vector<LocalStiffnessMatrices::LocalStiffnessMatrixFixedDepthSymmetric>>& pair) {
209 return pair.first == T;
213 if (it != localStiffnessMatrices.getPairedDepthsLocalStiffnessMatrices()->end()) {
215 std::vector<LocalStiffnessMatrices::LocalStiffnessMatrixFixedDepthSymmetric> &matrices_add = it->second;
217 for(
auto& matrix : item.second){
218 for (
auto &matrix_add: matrices_add){
219 CellDimension cellCompare=*matrix.getCell();
220 if(cellCompare==*matrix_add.getCell()){
221 if(!(matrix_add==matrix)){
242void LocalStiffnessMatricesDynamicDistribution::calculateEntries(Depth &depth) {
245 struct timeval begin_time, end_time;
248 gettimeofday(&begin_time, 0);
251 stencilClass.initialize(depth);
257 std::vector<LocalStiffnessMatrixFixedDepthSymmetric> vector;
259 const SingleDepthHashCellStructure &depthGrid = cellData.getGridForDepth(Tcell);
260 const auto &map = depthGrid._map;
261 const auto end = depthGrid.getNumberOfEntries();
264#pragma omp parallel for schedule(dynamic)
265 for (
size_t i = 0; i < end; i++) {
266 CellDimension cellDimension;
267 cellDimension = map.getIndexOfTable(i);
268 LocalStiffnessMatrixFixedDepthSymmetric localStiffnessMatrixFixedDepthSymmetric(
269 cellDimension, grid, stencilClass);
273 vector.push_back(localStiffnessMatrixFixedDepthSymmetric);
276 pairedDepthsLocalStiffnessMatrices.emplace_back(depth, vector);
278 gettimeofday(&end_time, 0);
279 seconds = end_time.tv_sec - begin_time.tv_sec;
280 microseconds = end_time.tv_usec - begin_time.tv_usec;
281 double t = seconds + microseconds * 1e-6;
Definition sparseGrid.h:277