LoAdSG
mympi.h
1/**********************************************************************************
2*
3 **********************************************************************************/
4
6// dummympi.h (include file for library)
8
9#ifndef RUN_MPI
10#define RUN_MPI
11enum Type_of_task { TAG_TASK=0, TAG_TERMINATE=1,TAG_LOCALSTIFFNESS_SEND=2, TAG_LOCALSTIFFNESS_SENDBACK=3,TAG_LOCALSTIFFNESS_END=4 };
12
13
14#ifdef _MPI_PARALLEL
15#include <mpi.h>
16
17
18#endif
19#include <iostream>
20
21
22
23
24
25#ifndef _MPI_PARALLEL
26
27
28#ifndef DUMMYMPI_H_
29#define DUMMYMPI_H_
30
31
32#define MPI_MAX_PROCESSOR_NAME 6
33
34inline int MPI_Get_processor_name(char s[MPI_MAX_PROCESSOR_NAME], int *i) {
35 return 0;
36}
37
38#define MPI_Comm int
39#define MPI_Group int
40#define MPI_COMM_WORLD 0
41#define MPI_Request int
42#define MPI_Datatype int
43#define MPI_IN_PLACE double
44#define MPI_UNDEFINED int
45#define MPI_COMM_NULL 0
46#define MPI_Status int
47
48
49
50#define MPI_INT 0
51#define MPI_DOUBLE 0
52#define MPI_LOR 0
53#define MPI_SUM 0
54#define MPI_MIN 0
55#define MPI_BOOL 0
56#define MPI_OR 0
57#define MPI_DOUBLE_COMPLEX 0
58#define MPI_BYTE 0
59#define MPI_MAX 0
60#define MPI_ANY_SOURCE 0
61
62inline void Error_Dummy_mpi() {
63 std::cout << " Error_Dummy_mpi! " << std::endl;
64}
65
66
67inline void MPI_Comm_rank(int, int *my_rank) { *my_rank = 0; }
68
69inline void MPI_Comm_size(int, int *p) { *p = 1; }
70
71template<class A>
72inline void MPI_Irecv(A *, int, int, int, int, int, int *) {}
73
74template<class A>
75inline void MPI_Isend(A *, int, int, int, int, int, int *) {}
76
77template<class A>
78inline void MPI_Recv(A *, int, int, int, int, int) {}
79
80template<class A>
81inline void MPI_Send(A *, int, int, int, int, int) {}
82
83template<class A>
84inline void MPI_Recv(A *, int, int, int, int, int, int *) {}
85
86template<class A>
87inline void MPI_Send(A *, int, int, int, int, int, int *) {}
88
89inline void MPI_Waitall(int, int *, int *) {}
90
91inline void MPI_Barrier(int) {}
92
93inline void MPI_Gather(int *, int, int, int *, int, int, int, int) {}
94
95inline void MPI_Type_commit(int *) {}
96
97inline void MPI_Type_free(int *) {}
98
99inline void MPI_Type_vector(int, int, int, int, int *) {}
100
101template<class A>
102inline void MPI_Bcast(A *, int, int, int, int *) {}
103
104template<class A>
105inline void MPI_Reduce(A *, A *, int, int, int, int, int *) {}
106
107inline int MPI_Init(int *, char ***) {
108 return 0;
109}
110
111inline double MPI_Wtime() {
112 return clock() / CLOCKS_PER_SEC;
113}
114
115inline void MPI_Allreduce(int *input, int *output, int, int, int, int) {
116 *output = *input;
117}
118
119inline void MPI_Allreduce(double *input, double *output, int, int, int, int) {
120 *output = *input;
121}
122
123inline void MPI_Allreduce(bool *input, bool *output, int, int, int, int) {
124 *output = *input;
125}
126
127inline int MPI_Finalize() {
128 std::cout << " This is the serial version of EXPDE on sparse grids! " << std::endl;
129 return 0;
130}
131inline int MPI_Comm_split(MPI_Comm comm,int color,int key,MPI_Comm *My_Comm);
132int MPI_Comm_group(MPI_Comm comm, MPI_Group *group);
133int MPI_Group_incl(MPI_Group group, int n, const int ranks[], MPI_Group *newgroup);
134int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm);
135int MPI_Comm_create_group(MPI_Comm comm, MPI_Group group, int tag, MPI_Comm * newcomm);
136
137inline int MPI_Comm_free(MPI_Comm *My_Comm);
138#endif
139#endif
140#endif