11enum Type_of_task { TAG_TASK=0, TAG_TERMINATE=1,TAG_LOCALSTIFFNESS_SEND=2, TAG_LOCALSTIFFNESS_SENDBACK=3,TAG_LOCALSTIFFNESS_END=4 };
32#define MPI_MAX_PROCESSOR_NAME 6
34inline int MPI_Get_processor_name(
char s[MPI_MAX_PROCESSOR_NAME],
int *i) {
40#define MPI_COMM_WORLD 0
41#define MPI_Request int
42#define MPI_Datatype int
43#define MPI_IN_PLACE double
44#define MPI_UNDEFINED int
45#define MPI_COMM_NULL 0
57#define MPI_DOUBLE_COMPLEX 0
60#define MPI_ANY_SOURCE 0
62inline void Error_Dummy_mpi() {
63 std::cout <<
" Error_Dummy_mpi! " << std::endl;
67inline void MPI_Comm_rank(
int,
int *my_rank) { *my_rank = 0; }
69inline void MPI_Comm_size(
int,
int *p) { *p = 1; }
72inline void MPI_Irecv(A *,
int,
int,
int,
int,
int,
int *) {}
75inline void MPI_Isend(A *,
int,
int,
int,
int,
int,
int *) {}
78inline void MPI_Recv(A *,
int,
int,
int,
int,
int) {}
81inline void MPI_Send(A *,
int,
int,
int,
int,
int) {}
84inline void MPI_Recv(A *,
int,
int,
int,
int,
int,
int *) {}
87inline void MPI_Send(A *,
int,
int,
int,
int,
int,
int *) {}
89inline void MPI_Waitall(
int,
int *,
int *) {}
91inline void MPI_Barrier(
int) {}
93inline void MPI_Gather(
int *,
int,
int,
int *,
int,
int,
int,
int) {}
95inline void MPI_Type_commit(
int *) {}
97inline void MPI_Type_free(
int *) {}
99inline void MPI_Type_vector(
int,
int,
int,
int,
int *) {}
102inline void MPI_Bcast(A *,
int,
int,
int,
int *) {}
105inline void MPI_Reduce(A *, A *,
int,
int,
int,
int,
int *) {}
107inline int MPI_Init(
int *,
char ***) {
111inline double MPI_Wtime() {
112 return clock() / CLOCKS_PER_SEC;
115inline void MPI_Allreduce(
int *input,
int *output,
int,
int,
int,
int) {
119inline void MPI_Allreduce(
double *input,
double *output,
int,
int,
int,
int) {
123inline void MPI_Allreduce(
bool *input,
bool *output,
int,
int,
int,
int) {
127inline int MPI_Finalize() {
128 std::cout <<
" This is the serial version of EXPDE on sparse grids! " << std::endl;
131inline int MPI_Comm_split(MPI_Comm comm,
int color,
int key,MPI_Comm *My_Comm);
132int MPI_Comm_group(MPI_Comm comm, MPI_Group *group);
133int MPI_Group_incl(MPI_Group group,
int n,
const int ranks[], MPI_Group *newgroup);
134int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm);
135int MPI_Comm_create_group(MPI_Comm comm, MPI_Group group,
int tag, MPI_Comm * newcomm);
137inline int MPI_Comm_free(MPI_Comm *My_Comm);