LoAdSG
ADDLocalStiffnessMatrices.h
1//
2// Created by to35jepo on 6/24/24.
3//
4
5#ifndef RUN_ADDLOCALSTIFFNESSMATRICES_H
6#define RUN_ADDLOCALSTIFFNESSMATRICES_H
7
8#include "LocalStiffnessMatrices.h"
9
10
11class ADDLocalStiffnessMatrices{
12public:
13 //ghost functions
14 virtual inline void initialize(Depth &T_){};
15 virtual inline void applyStencilOnCell(CellDimension& cell,VectorSparseG& input, VectorSparseG& output){};
16 virtual inline double returnValue(const IndexDimension &Index, const MultiDimCompass &mc){return 0.0;};
17 void applyLocalStiffnessMatricesOnIndices_onNode(VectorSparseG& input, VectorSparseG& output, Depth& D, std::vector<IndexDimension>& Indices){
18 matricesA->applyLocalStiffnessMatricesOnIndices_onNode(input,output,D,Indices);
19 matricesB->applyLocalStiffnessMatricesOnIndices_onNode(input,output,D,Indices);
20 };
21
22 ADDLocalStiffnessMatrices(LocalStiffnessMatrices* matricesA_, LocalStiffnessMatrices* matricesB_, AdaptiveSparseGrid& grid): matricesA(matricesA_),
23 matricesB(matricesB_), input_node(grid), output_node(grid){
24 number_processes=matricesB_->getNumberProcesses();
25 resetActiveWorkers();
26 };
27
28 void applyLocalStiffnessMatricesDepth(VectorSparseG& input, VectorSparseG& output, Depth& depth){
29
30#ifdef MY_MPI_ON
31 int num_tasks = 1;
32 int rank = 0;
33 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
34 MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
35 MPI_Status status;
36 int flag=0;
37 MPI_Request send_requests[3], recv_requests[3];
38
39 Depth depth_node;
40 int depth_node_int[DimensionSparseGrid];
41
42
43
44
45 int nodeA = matricesA->getNodeForDepth(depth);
46 int nodeB = matricesB->getNodeForDepth(depth);
47
48 if(nodeA!=nodeB){cout << "ADDLocalStiffnessMatrices: ADD Local Stiffness Matrices only implemented for MemoryDistribution" << endl; exit(1);}
49
50 if(rank == nodeA){
51
52
53 matricesA->applyLocalStiffnessMatricesFixedDepth_onNode(input,output,depth);
54 matricesB->applyLocalStiffnessMatricesFixedDepth_onNode(input,output,depth);
55
56 }else{
57
58
59
60 MPI_Status status;
61
62 int length = int(input.getSparseGrid()->getMaximalOccupiedSecondTable());
63
64 MPI_Isend(input.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD, &send_requests[0]);
65 MPI_Isend(output.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD, &send_requests[1]);
66
67
68 int depth_node_int2[DimensionSparseGrid];
69 for(int d=0; d<DimensionSparseGrid; d++) depth_node_int2[d]=depth.at(d);
70 int dim = int(DimensionSparseGrid);
71
72 MPI_Isend(depth_node_int2, dim , MPI_INT, nodeA, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&send_requests[2]);
73
74
75
76 MPI_Wait(&send_requests[0], &status);
77 MPI_Wait(&send_requests[1], &status);
78 MPI_Wait(&send_requests[2], &status);
79
80 //braucht man das? man wird ja nie von nodeA einen Apply Auftrage bekommen
81 /*MPI_Iprobe(nodeA, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD, &flag, &status);
82 if (flag) {
83 receiveApplySend(nodeA);
84 }*/
85
86
87 MPI_Irecv(input.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD, &recv_requests[0]);
88 MPI_Irecv(output.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD, &recv_requests[1]);
89
90
91
92 // Wait for all send and receive operations to complete
93 MPI_Wait(&recv_requests[0], &status);
94 MPI_Wait(&recv_requests[1], &status);
95
96
97
98
99
100 }
101
102
103
104
105
106#endif
107
108
109 };
110
111 void receiveApplySend(int n) {
112#ifdef MY_MPI_ON
113 int depth_node_int[DimensionSparseGrid];
114 Depth depth_node;
115 MPI_Status status;
116 int rank;
117 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
118
119
120
121 // receive input, output, depth
122 int length = int(input_node.getSparseGrid()->getMaximalOccupiedSecondTable());
123 MPI_Recv(input_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&status);
124 MPI_Recv(output_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&status);
125
126 int dim = int(DimensionSparseGrid);
127 MPI_Recv(depth_node_int, dim, MPI_INT, n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&status);
128
129 for (int d = 0; d < DimensionSparseGrid; d++) depth_node.set(depth_node_int[d], d);
130
131
132 matricesA->applyLocalStiffnessMatricesFixedDepth_onNode(input_node, output_node, depth_node);
133 matricesB->applyLocalStiffnessMatricesFixedDepth_onNode(input_node, output_node, depth_node);
134
135
136
137 MPI_Send(input_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD);
138 MPI_Send(output_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD);
139
140#endif
141 }
142 TypeMatrixVectorMultiplication getTypeMatrixVectorMultiplication(){return typeMatrixVectorMultiplication;};
143
144 int getNodeForDepth(Depth& T){return matricesA->getNodeForDepth(T);};
145
146
147 double applyLocalStiffnessMatricesFixedDepthIndex_onNode(VectorSparseG &input, VectorSparseG &output,
148 Depth &depth, IndexDimension &Index) {
149
150 double value = 0.0;
151 value += matricesA->applyLocalStiffnessMatricesFixedDepthIndex_onNode(input,output,depth,Index);
152 value += matricesB->applyLocalStiffnessMatricesFixedDepthIndex_onNode(input,output,depth,Index);
153
154
155 return value;
156
157
158
159 }
160
161
162 void resetActiveWorkers(){
163#ifdef MY_MPI_ON
164 int num_tasks = 1;
165 int rank = 0;
166 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
167 MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
168 active_worker.clear();
169 for(int i=0; i< num_tasks-number_processes; i++) active_worker.push_back(i);
170#endif
171 };
172
173 int getNumberProcesses(){return number_processes;};
174
175
176
177
178 void receiveApplySendOnActiveWorkers() {
179#ifdef MY_MPI_ON
180 MPI_Status status;
181 int flag;
182 int rank;
183 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
184
185 for (auto it = active_worker.begin(); it != active_worker.end();){
186 int n=*it;
187 flag=0;
188 MPI_Iprobe(n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD, &flag, &status);
189 if (flag) {
190 receiveApplySend(n);
191 }
192 flag=0;
193 MPI_Iprobe(n, TAG_LOCALSTIFFNESS_END, MPI_COMM_WORLD, &flag, &status);
194 if (flag) {
195 int index;
196 MPI_Recv(&index, 1, MPI_INT, status.MPI_SOURCE, TAG_LOCALSTIFFNESS_END, MPI_COMM_WORLD, &status);
197
198 it = active_worker.erase(it);
199
200
201 }else{
202 ++it;
203 }
204 }
205
206#endif
207 }
208 std::vector<int> active_worker;
209private:
210 LocalStiffnessMatrices* matricesA;
211 LocalStiffnessMatrices* matricesB;
212 TypeMatrixVectorMultiplication typeMatrixVectorMultiplication = StoreLocalStiffnessMatrix;
213
214 int number_processes=1;
215 VectorSparseG input_node,output_node;
216};
217
218#endif //RUN_ADDLOCALSTIFFNESSMATRICES_H
Definition sparseGrid.h:277
Definition index.h:356