LoAdSG
LocalStiffnessMatricesDynamicDistribution.h
1//
2// Created by to35jepo on 6/26/24.
3//
4
5#ifndef RUN_LOCALSTIFFNESSMATRICESDYNAMICDISTRIBUTION_H
6#define RUN_LOCALSTIFFNESSMATRICESDYNAMICDISTRIBUTION_H
7
8
9#include "LocalStiffnessMatrices.h"
10#include "../mympi.h"
11#include <sys/time.h>
12
13#include <iostream>
14#include <thread>
15#include <chrono>
16#include <random>
17
18// Function to simulate random time passing
19void randomSleep() {
20 // Seed with a real random value, if available
21 std::random_device rd;
22 // Initialize random number generator
23 std::mt19937 gen(rd());
24 // Define the range for random sleep time (e.g., 1 to 5 seconds)
25 std::uniform_int_distribution<> dis(0, 2);
26
27 // Generate a random sleep duration
28 int sleepTime = dis(gen);
29
30
31 // Sleep for the randomly generated duration
32 std::this_thread::sleep_for(std::chrono::seconds(sleepTime));
33
34
35}
36
37enum LS_TAGS{TAG_READY_WORKER=11, TAG_START_WORK=12, TAG_TERMINATE_WORK=13};
38
39class LocalStiffnessMatricesDynamicDistribution: public LocalStiffnessMatrices{
40public:
41 LocalStiffnessMatricesDynamicDistribution(AdaptiveSparseGrid &sg, StencilTemplate& stencilClass, int number_processes_)
42 :LocalStiffnessMatrices(sg, stencilClass, number_processes_){
43 int rank;
44 int size;
45#ifdef MY_MPI_ON
46 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
47 MPI_Comm_size(MPI_COMM_WORLD, &size);
48
49 MPI_Barrier(MPI_COMM_WORLD);
50
51
52
53 int count=0;
54 for (auto it = depthList.begin_all(); it != depthList.end_all(); ++it) {
55 count++;
56 }
57
58 if(rank==0){
59
60 master();
61
62
63 }else if(rank>=size-number_processes_){
64
65 worker();
66
67
68 }
69
70
71 MPI_Barrier(MPI_COMM_WORLD);
72
73
74
75 for (auto it = depthList.begin_all(); it != depthList.end_all(); ++it) {
76
77 Depth T = *it;
78 int node;
79 if(rank==0){
80 node = distributedDepthsHashtable.getNodeForDepth(T);
81 }
82
83 MPI_Bcast(&node, 1, MPI_INT, 0, MPI_COMM_WORLD);
84 if(rank>0){
85 distributedDepthsHashtable.setNodeDepth(node,T);
86 }
87
88 }
89
90
91
92
93 MPI_Barrier(MPI_COMM_WORLD);
94
95#else
96 cout << "LocalStiffness only with MPI!" <<endl;
97 MPI_Finalize();
98 exit(1);
99#endif
100 }
101
102 void master(){
103#ifdef MY_MPI_ON
104
105 MPI_Status status;
106 int d[DimensionSparseGrid];
107 auto it = depthList.begin_all();
108 int message=0;
109 for (; it != depthList.end_all(); ++it) {
110 Depth T = *it;
111
112 MPI_Probe(MPI_ANY_SOURCE,TAG_READY_WORKER,MPI_COMM_WORLD,&status);
113 MPI_Recv(&message,1, MPI_INT,status.MPI_SOURCE, TAG_READY_WORKER, MPI_COMM_WORLD, &status);
114 for(int j=0; j<DimensionSparseGrid; j++)d[j]=T.at(j);
115
116 MPI_Send(d,DimensionSparseGrid, MPI_INT,status.MPI_SOURCE,TAG_START_WORK,MPI_COMM_WORLD);
117 distributedDepthsHashtable.setNodeDepth(status.MPI_SOURCE,T);
118 }
119
120 int size;
121 MPI_Comm_size(MPI_COMM_WORLD, &size);
122 for(int i=size-number_processes; i < size; i++){
123
124 MPI_Probe(i, TAG_READY_WORKER, MPI_COMM_WORLD, &status);
125 MPI_Recv(&message,1, MPI_INT,status.MPI_SOURCE, TAG_READY_WORKER, MPI_COMM_WORLD, &status);
126
127 MPI_Send(&message,1, MPI_INT,status.MPI_SOURCE,TAG_TERMINATE_WORK,MPI_COMM_WORLD);
128
129 }
130
131#endif
132 }
133
134 void worker(){
135
136 #ifdef MY_MPI_ON
137
138 int rank;
139 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
140
141 while(1){
142 int message = 1; // Worker ready message
143 int master_rank = 0; // Assuming master is rank 0
144
145
146 // Send a message (with a tag) to the master indicating the worker is ready
147
148 MPI_Send(&message, 1, MPI_INT, master_rank, TAG_READY_WORKER, MPI_COMM_WORLD);
149
150 int d[DimensionSparseGrid];
151 Depth D;
152 MPI_Status status;
153 int flag;
154 MPI_Probe(0, MPI_ANY_TAG, MPI_COMM_WORLD,&status);
155
156
157
158 if(status.MPI_TAG == TAG_START_WORK){
159 MPI_Recv(d, DimensionSparseGrid, MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
160 for(int j=0; j<DimensionSparseGrid; j++)D.set(d[j],j);
161 calculateEntries(D);
162 }
163 if(status.MPI_TAG == TAG_TERMINATE_WORK){
164
165 MPI_Recv(&message, 1, MPI_INT, 0, TAG_TERMINATE_WORK, MPI_COMM_WORLD, &status);
166
167 break;
168 }
169
170 }
171#endif
172 }
173
174
175 void calculateEntries(Depth& depth);
176
177
178 DistributedDepthsHashtable getDistributedDepthsHashtable(){return distributedDepthsHashtable;};
179
180
181 void addStencil(StencilTemplate& newStencil){
182 for(auto& item : pairedDepthsLocalStiffnessMatrices) {
183 Depth T = item.first;
184 newStencil.initialize(T);
185 auto& matrices=item.second;
186 int end=matrices.size();
187
188#pragma omp parallel for
189 for (int i=0; i<end;i++) {
190 auto& matrix = matrices.at(i);
191 CellDimension cell = *matrix.getCell();
192 LocalStiffnessMatrixFixedDepthSymmetric matrix_add(cell, grid, newStencil);
193 matrix+=matrix_add;
194
195 }
196 }
197 }
198
199 bool operator==(LocalStiffnessMatrices &localStiffnessMatrices){
200
201 bool retbool = true;
202 for(auto& item : pairedDepthsLocalStiffnessMatrices){
203 Depth T = item.first;
204
205 auto it = std::find_if(
206 localStiffnessMatrices.getPairedDepthsLocalStiffnessMatrices()->begin(),
207 localStiffnessMatrices.getPairedDepthsLocalStiffnessMatrices()->end(),
208 [&T](const std::pair<Depth,std::vector<LocalStiffnessMatrices::LocalStiffnessMatrixFixedDepthSymmetric>>& pair) {
209 return pair.first == T;
210 }
211 );
212
213 if (it != localStiffnessMatrices.getPairedDepthsLocalStiffnessMatrices()->end()) {
214 // Found the matching entry
215 std::vector<LocalStiffnessMatrices::LocalStiffnessMatrixFixedDepthSymmetric> &matrices_add = it->second;
216
217 for(auto& matrix : item.second){
218 for (auto &matrix_add: matrices_add){
219 CellDimension cellCompare=*matrix.getCell();
220 if(cellCompare==*matrix_add.getCell()){
221 if(!(matrix_add==matrix)){
222
223 retbool= false;
224 return false;
225 break;}
226 }
227 }
228 }
229 }
230 }
231 return retbool;
232 }
233
234
235
236
237
238
239
240};
241
242void LocalStiffnessMatricesDynamicDistribution::calculateEntries(Depth &depth) {
243
244
245 struct timeval begin_time, end_time;
246 long seconds;
247 long microseconds;
248 gettimeofday(&begin_time, 0);
249
250
251 stencilClass.initialize(depth);
252
253 Depth Tcell = depth;
254 ++Tcell;
255
256
257 std::vector<LocalStiffnessMatrixFixedDepthSymmetric> vector;
258
259 const SingleDepthHashCellStructure &depthGrid = cellData.getGridForDepth(Tcell);
260 const auto &map = depthGrid._map;
261 const auto end = depthGrid.getNumberOfEntries();
262
263
264#pragma omp parallel for schedule(dynamic)
265 for (size_t i = 0; i < end; i++) {
266 CellDimension cellDimension;
267 cellDimension = map.getIndexOfTable(i);
268 LocalStiffnessMatrixFixedDepthSymmetric localStiffnessMatrixFixedDepthSymmetric(
269 cellDimension, grid, stencilClass);
270
271
272#pragma omp critical
273 vector.push_back(localStiffnessMatrixFixedDepthSymmetric);
274 numbercells++;
275 }
276 pairedDepthsLocalStiffnessMatrices.emplace_back(depth, vector);
277
278 gettimeofday(&end_time, 0);
279 seconds = end_time.tv_sec - begin_time.tv_sec;
280 microseconds = end_time.tv_usec - begin_time.tv_usec;
281 double t = seconds + microseconds * 1e-6;
282 if(t>time) {
283 time = t;
284 }
285}
286
287
288#endif //RUN_LOCALSTIFFNESSMATRICESDYNAMICDISTRIBUTION_H
Definition sparseGrid.h:277