5#ifndef RUN_ADDLOCALSTIFFNESSMATRICES_H
6#define RUN_ADDLOCALSTIFFNESSMATRICES_H
8#include "LocalStiffnessMatrices.h"
11class ADDLocalStiffnessMatrices{
14 virtual inline void initialize(Depth &T_){};
15 virtual inline void applyStencilOnCell(CellDimension& cell,VectorSparseG& input, VectorSparseG& output){};
16 virtual inline double returnValue(
const IndexDimension &Index,
const MultiDimCompass &mc){
return 0.0;};
17 void applyLocalStiffnessMatricesOnIndices_onNode(VectorSparseG& input, VectorSparseG& output, Depth& D, std::vector<IndexDimension>& Indices){
18 matricesA->applyLocalStiffnessMatricesOnIndices_onNode(input,output,D,Indices);
19 matricesB->applyLocalStiffnessMatricesOnIndices_onNode(input,output,D,Indices);
22 ADDLocalStiffnessMatrices(LocalStiffnessMatrices* matricesA_, LocalStiffnessMatrices* matricesB_,
AdaptiveSparseGrid& grid): matricesA(matricesA_),
23 matricesB(matricesB_), input_node(grid), output_node(grid){
24 number_processes=matricesB_->getNumberProcesses();
28 void applyLocalStiffnessMatricesDepth(VectorSparseG& input, VectorSparseG& output, Depth& depth){
33 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
34 MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
37 MPI_Request send_requests[3], recv_requests[3];
40 int depth_node_int[DimensionSparseGrid];
45 int nodeA = matricesA->getNodeForDepth(depth);
46 int nodeB = matricesB->getNodeForDepth(depth);
48 if(nodeA!=nodeB){cout <<
"ADDLocalStiffnessMatrices: ADD Local Stiffness Matrices only implemented for MemoryDistribution" << endl; exit(1);}
53 matricesA->applyLocalStiffnessMatricesFixedDepth_onNode(input,output,depth);
54 matricesB->applyLocalStiffnessMatricesFixedDepth_onNode(input,output,depth);
62 int length = int(input.getSparseGrid()->getMaximalOccupiedSecondTable());
64 MPI_Isend(input.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD, &send_requests[0]);
65 MPI_Isend(output.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD, &send_requests[1]);
68 int depth_node_int2[DimensionSparseGrid];
69 for(
int d=0; d<DimensionSparseGrid; d++) depth_node_int2[d]=depth.at(d);
70 int dim = int(DimensionSparseGrid);
72 MPI_Isend(depth_node_int2, dim , MPI_INT, nodeA, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&send_requests[2]);
76 MPI_Wait(&send_requests[0], &status);
77 MPI_Wait(&send_requests[1], &status);
78 MPI_Wait(&send_requests[2], &status);
87 MPI_Irecv(input.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD, &recv_requests[0]);
88 MPI_Irecv(output.getDatatableVector(), length, MPI_DOUBLE, nodeA, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD, &recv_requests[1]);
93 MPI_Wait(&recv_requests[0], &status);
94 MPI_Wait(&recv_requests[1], &status);
111 void receiveApplySend(
int n) {
113 int depth_node_int[DimensionSparseGrid];
117 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
122 int length = int(input_node.getSparseGrid()->getMaximalOccupiedSecondTable());
123 MPI_Recv(input_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&status);
124 MPI_Recv(output_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&status);
126 int dim = int(DimensionSparseGrid);
127 MPI_Recv(depth_node_int, dim, MPI_INT, n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD,&status);
129 for (
int d = 0; d < DimensionSparseGrid; d++) depth_node.set(depth_node_int[d], d);
132 matricesA->applyLocalStiffnessMatricesFixedDepth_onNode(input_node, output_node, depth_node);
133 matricesB->applyLocalStiffnessMatricesFixedDepth_onNode(input_node, output_node, depth_node);
137 MPI_Send(input_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD);
138 MPI_Send(output_node.getDatatableVector(), length, MPI_DOUBLE, n, TAG_LOCALSTIFFNESS_SENDBACK, MPI_COMM_WORLD);
142 TypeMatrixVectorMultiplication getTypeMatrixVectorMultiplication(){
return typeMatrixVectorMultiplication;};
144 int getNodeForDepth(Depth& T){
return matricesA->getNodeForDepth(T);};
147 double applyLocalStiffnessMatricesFixedDepthIndex_onNode(VectorSparseG &input, VectorSparseG &output,
148 Depth &depth, IndexDimension &Index) {
151 value += matricesA->applyLocalStiffnessMatricesFixedDepthIndex_onNode(input,output,depth,Index);
152 value += matricesB->applyLocalStiffnessMatricesFixedDepthIndex_onNode(input,output,depth,Index);
162 void resetActiveWorkers(){
166 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
167 MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
168 active_worker.clear();
169 for(
int i=0; i< num_tasks-number_processes; i++) active_worker.push_back(i);
173 int getNumberProcesses(){
return number_processes;};
178 void receiveApplySendOnActiveWorkers() {
183 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
185 for (
auto it = active_worker.begin(); it != active_worker.end();){
188 MPI_Iprobe(n, TAG_LOCALSTIFFNESS_SEND, MPI_COMM_WORLD, &flag, &status);
193 MPI_Iprobe(n, TAG_LOCALSTIFFNESS_END, MPI_COMM_WORLD, &flag, &status);
196 MPI_Recv(&index, 1, MPI_INT, status.MPI_SOURCE, TAG_LOCALSTIFFNESS_END, MPI_COMM_WORLD, &status);
198 it = active_worker.erase(it);
208 std::vector<int> active_worker;
210 LocalStiffnessMatrices* matricesA;
211 LocalStiffnessMatrices* matricesB;
212 TypeMatrixVectorMultiplication typeMatrixVectorMultiplication = StoreLocalStiffnessMatrix;
214 int number_processes=1;
215 VectorSparseG input_node,output_node;
Definition sparseGrid.h:277