Actual source code: mpi.h
petsc-3.6.3 2015-12-03
1: /*
2: This is a special set of bindings for uni-processor use of MPI by the PETSc library.
4: NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.
6: For example,
7: * Does not implement send to self.
8: * Does not implement attributes correctly.
9: */
11: /*
12: The following info is a response to one of the petsc-maint questions
13: regarding MPIUNI.
15: MPIUNI was developed with the aim of getting PETSc compiled, and
16: usable in the absence of a full MPI implementation. With this, we
17: were able to provide PETSc on Windows, Windows64 even before any MPI
18: implementation was available on these platforms. [Or with certain
19: compilers - like borland, that do not have a usable MPI
20: implementation]
22: However - providing a seqential, standards compliant MPI
23: implementation is *not* the goal of MPIUNI. The development strategy
24: was - to make enough changes to it so that PETSc sources, examples
25: compile without errors, and runs in the uni-processor mode. This is
26: the reason each function is not documented.
28: PETSc usage of MPIUNI is primarily from C. However a minimal fortran
29: interface is also provided - to get PETSc fortran examples with a
30: few MPI calls working.
32: One of the optimzation with MPIUNI, is to avoid the function call
33: overhead, when possible. Hence most of the C functions are
34: implemented as macros. However the function calls cannot be avoided
35: with fortran usage.
37: Most PETSc objects have both sequential and parallel
38: implementations, which are separate. For eg: We have two types of
39: sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
40: routines are used in the Seq part, but most of them are used in the
41: MPI part. The send/receive calls can be found mostly in the MPI
42: part.
44: When MPIUNI is used, only the Seq version of the PETSc objects are
45: used, even though the MPI variant of the objects are compiled. Since
46: there are no send/receive calls in the Seq variant, PETSc works fine
47: with MPIUNI in seq mode.
49: The reason some send/receive functions are defined to abort(), is to
50: detect sections of code that use send/receive functions, and gets
51: executed in the sequential mode. (which shouldn't happen in case of
52: PETSc).
54: Proper implementation of send/receive would involve writing a
55: function for each of them. Inside each of these functions, we have
56: to check if the send is to self or receive is from self, and then
57: doing the buffering accordingly (until the receive is called) - or
58: what if a nonblocking receive is called, do a copy etc.. Handling
59: the buffering aspects might be complicated enough, that in this
60: case, a proper implementation of MPI might as well be used. This is
61: the reason the send to self is not implemented in MPIUNI, and never
62: will be.
64: Proper implementations of MPI [for eg: MPICH & OpenMPI] are
65: available for most machines. When these packages are available, Its
66: generally preferable to use one of them instead of MPIUNI - even if
67: the user is using PETSc sequentially.
69: - MPIUNI does not support all MPI functions [or functionality].
70: Hence it might not work with external packages or user code that
71: might have MPI calls in it.
73: - MPIUNI is not a standards compliant implementation for np=1.
74: For eg: if the user code has send/recv to self, then it will
75: abort. [Similar issues with a number of other MPI functionality]
76: However MPICH & OpenMPI are the correct implementations of MPI
77: standard for np=1.
79: - When user code uses multiple MPI based packages that have their
80: own *internal* stubs equivalent to MPIUNI - in sequential mode,
81: invariably these multiple implementations of MPI for np=1 conflict
82: with each other. The correct thing to do is: make all such
83: packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
84: satisfy this requirement correctly [and hence the correct choice].
86: - Using MPICH/OpenMPI sequentially should have minimal
87: disadvantages. [for eg: these binaries can be run without
88: mpirun/mpiexec as ./executable, without requiring any extra
89: configurations for ssh/rsh/daemons etc..]. This should not be a
90: reason to avoid these packages for sequential use.
92: Instructions for building standalone MPIUNI [for eg: linux/gcc+gfortran]:
93: - extract include/mpiuni/mpi.h,mpif.f, src/sys/mpiuni/mpi.c from PETSc
94: - remove reference to petscconf.h from mpi.h
95: - gcc -c mpi.c -DPETSC_HAVE_STDLIB_H -DPETSC_HAVE_FORTRAN_UNDERSCORE
96: - ar cr libmpiuni.a mpi.o
98: */
103: /* Requred by abort() in mpi.c & for win64 */
104: #include <petscconf.h>
106: /* This is reproduced from petscsys.h so that mpi.h can be used standalone without first including petscsys.h */
107: #if defined(_WIN32) && defined(PETSC_USE_SHARED_LIBRARIES)
108: # define MPIUni_ __declspec(dllexport)
109: # define MPIUni_PETSC_DLLIMPORT __declspec(dllimport)
110: #elif defined(PETSC_USE_VISIBILITY)
111: # define MPIUni_ __attribute__((visibility ("default")))
112: # define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
113: #else
114: # define MPIUni_
115: # define MPIUni_PETSC_DLLIMPORT
116: #endif
118: #if defined(petsc_EXPORTS)
119: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_
120: #else /* Win32 users need this to import symbols from petsc.dll */
121: # define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_PETSC_DLLIMPORT
122: #endif
124: #if defined(__cplusplus)
125: #define MPIUni_PETSC_EXTERN extern "C" MPIUni_PETSC_VISIBILITY_PUBLIC
126: #else
127: #define MPIUni_PETSC_EXTERN extern MPIUni_PETSC_VISIBILITY_PUBLIC
128: #endif
130: #if defined(__cplusplus)
131: extern "C" {
132: #endif
134: /* require an int variable large enough to hold a pointer */
135: #if (PETSC_SIZEOF_LONG == PETSC_SIZEOF_VOID_P)
136: typedef long MPIUNI_INTPTR;
137: #elif (PETSC_SIZEOF_SIZE_T == PETSC_SIZEOF_VOID_P)
138: typedef size_t MPIUNI_INTPTR;
139: #else
140: typedef unknownuniptr MPIUNI_INTPTR;
141: #endif
143: /* old 32bit MS compiler does not support long long */
144: #if defined(PETSC_SIZEOF_LONG_LONG)
145: typedef long long MPIUNI_INT64;
146: #elif defined(PETSC_HAVE___INT64)
147: typedef _int64 MPIUNI_INT64;
148: #else
149: typedef unknownunint64 MPIUNI_INT64;
150: #endif
152: /*
154: MPIUNI_TMP is used in the macros below only to stop various C/C++ compilers
155: from generating warning messages about unused variables while compiling PETSc.
156: */
157: MPIUni_PETSC_EXTERN void *MPIUNI_TMP;
159: #define MPI_COMM_SELF 1
160: #define MPI_COMM_WORLD 2
161: #define MPI_COMM_NULL 0
162: #define MPI_SUCCESS 0
163: #define MPI_IDENT 0
164: #define MPI_CONGRUENT 1
165: #define MPI_SIMILAR 2
166: #define MPI_UNEQUAL 3
167: #define MPI_ANY_SOURCE (-2)
168: #define MPI_KEYVAL_INVALID 0
169: #define MPI_ERR_UNKNOWN 18
170: #define MPI_ERR_INTERN 21
171: #define MPI_ERR_OTHER 1
172: #define MPI_TAG_UB 0
173: #define MPI_ERRORS_RETURN 0
174: #define MPI_UNDEFINED (-32766)
175: #define MPI_ERRORS_ARE_FATAL (-32765)
176: #define MPI_MAXLOC 5
177: #define MPI_MINLOC 6
180: /* External types */
181: typedef int MPI_Comm;
182: typedef void *MPI_Request;
183: typedef void *MPI_Group;
184: typedef struct {int MPI_TAG,MPI_SOURCE,MPI_ERROR;} MPI_Status;
185: typedef char *MPI_Errhandler;
186: typedef int MPI_Fint;
187: typedef int MPI_File;
188: typedef int MPI_Info;
189: typedef int MPI_Offset;
191: /* In order to handle datatypes, we make them into "sizeof(raw-type)";
192: this allows us to do the MPIUNI_Memcpy's easily */
193: #define MPI_Datatype int
194: #define MPI_FLOAT (1 << 16 | sizeof(float))
195: #define MPI_DOUBLE (1 << 16 | sizeof(double))
196: #define MPI_LONG_DOUBLE (1 << 16 | sizeof(long double))
198: #define MPI_COMPLEX (2 << 16 | 2*sizeof(float))
199: #define MPI_C_COMPLEX (2 << 16 | 2*sizeof(float))
200: #define MPI_C_DOUBLE_COMPLEX (2 << 16 | 2*sizeof(double))
202: #define MPI_CHAR (3 << 16 | sizeof(char))
203: #define MPI_BYTE (3 << 16 | sizeof(char))
204: #define MPI_UNSIGNED_CHAR (3 << 16 | sizeof(unsigned char))
206: #define MPI_INT (4 << 16 | sizeof(int))
207: #define MPI_LONG (4 << 16 | sizeof(long))
208: #define MPI_LONG_LONG_INT (4 << 16 | sizeof(MPIUNI_INT64))
209: #define MPI_SHORT (4 << 16 | sizeof(short))
211: #define MPI_UNSIGNED_SHORT (5 << 16 | sizeof(unsigned short))
212: #define MPI_UNSIGNED (5 << 16 | sizeof(unsigned))
213: #define MPI_UNSIGNED_LONG (5 << 16 | sizeof(unsigned long))
214: #define MPI_UNSIGNED_LONG_LONG (5 << 16 | sizeof(unsigned MPIUNI_INT64))
216: #define MPI_FLOAT_INT (10 << 16 | (sizeof(float) + sizeof(int)))
217: #define MPI_DOUBLE_INT (11 << 16 | (sizeof(double) + sizeof(int)))
218: #define MPI_LONG_INT (12 << 16 | (sizeof(long) + sizeof(int)))
219: #define MPI_SHORT_INT (13 << 16 | (sizeof(short) + sizeof(int)))
220: #define MPI_2INT (14 << 16 | (2* sizeof(int)))
222: #if defined(PETSC_USE_REAL___FLOAT128)
223: extern MPI_Datatype MPIU___FLOAT128;
224: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? 2*sizeof(double) : (datatype) & 0xff)
225: #else
226: #define MPI_sizeof(datatype) ((datatype) & 0xff)
227: #endif
228: MPIUni_PETSC_EXTERN int MPIUNI_Memcpy(void*,const void*,int);
230: #define MPI_MAX_PROCESSOR_NAME 1024
232: #define MPI_REQUEST_NULL ((MPI_Request)0)
233: #define MPI_GROUP_NULL ((MPI_Group)0)
234: #define MPI_INFO_NULL ((MPI_Info)0)
235: #define MPI_BOTTOM (void *)0
236: typedef int MPI_Op;
238: #define MPI_MODE_RDONLY 0
239: #define MPI_MODE_WRONLY 0
240: #define MPI_MODE_CREATE 0
242: #define MPI_SUM 1
243: #define MPI_MAX 2
244: #define MPI_MIN 3
245: #define MPI_REPLACE 4
246: #define MPI_PROD 5
247: #define MPI_LAND 6
248: #define MPI_BAND 7
249: #define MPI_LOR 8
250: #define MPI_BOR 9
251: #define MPI_LXOR 10
252: #define MPI_BXOR 11
253: #define MPI_ANY_TAG (-1)
254: #define MPI_DATATYPE_NULL 0
255: #define MPI_PACKED 0
256: #define MPI_MAX_ERROR_STRING 2056
257: #define MPI_STATUS_IGNORE (MPI_Status *)1
258: #define MPI_STATUSES_IGNORE (MPI_Status *)1
259: #define MPI_ORDER_FORTRAN 57
260: #define MPI_IN_PLACE (void *) -1
262: /*
263: Prototypes of some functions which are implemented in mpi.c
264: */
265: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
266: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
267: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);
269: /*
270: In order that the PETSc MPIUNI can be used with another package that has its
271: own MPIUni we map the following function names to a unique PETSc name. Those functions
272: are defined in mpi.c and put into the libpetscsys.a or libpetsc.a library.
274: Note that this does not work for the MPIUni Fortran symbols which are explicitly in the
275: PETSc libraries unless the flag MPIUNI_AVOID_MPI_NAMESPACE is set.
276: */
277: #define MPI_Abort Petsc_MPI_Abort
278: #define MPI_Attr_get Petsc_MPI_Attr_get
279: #define MPI_Keyval_free Petsc_MPI_Keyval_free
280: #define MPI_Attr_put Petsc_MPI_Attr_put
281: #define MPI_Attr_delete Petsc_MPI_Attr_delete
282: #define MPI_Keyval_create Petsc_MPI_Keyval_create
283: #define MPI_Comm_free Petsc_MPI_Comm_free
284: #define MPI_Comm_dup Petsc_MPI_Comm_dup
285: #define MPI_Comm_create Petsc_MPI_Comm_create
286: #define MPI_Init Petsc_MPI_Init
287: #define MPI_Finalize Petsc_MPI_Finalize
288: #define MPI_Initialized Petsc_MPI_Initialized
289: #define MPI_Finalized Petsc_MPI_Finalized
290: #define MPI_Comm_size Petsc_MPI_Comm_size
291: #define MPI_Comm_rank Petsc_MPI_Comm_rank
292: #define MPI_Wtime Petsc_MPI_Wtime
294: /* identical C bindings */
295: #define MPI_Comm_create_keyval Petsc_MPI_Keyval_create
296: #define MPI_Comm_free_keyval Petsc_MPI_Keyval_free
297: #define MPI_Comm_get_attr Petsc_MPI_Attr_get
298: #define MPI_Comm_set_attr Petsc_MPI_Attr_put
300: MPIUni_PETSC_EXTERN int MPI_Abort(MPI_Comm,int);
301: MPIUni_PETSC_EXTERN int MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
302: MPIUni_PETSC_EXTERN int MPI_Keyval_free(int*);
303: MPIUni_PETSC_EXTERN int MPI_Attr_put(MPI_Comm,int,void *);
304: MPIUni_PETSC_EXTERN int MPI_Attr_delete(MPI_Comm,int);
305: MPIUni_PETSC_EXTERN int MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
306: MPIUni_PETSC_EXTERN int MPI_Comm_free(MPI_Comm*);
307: MPIUni_PETSC_EXTERN int MPI_Comm_dup(MPI_Comm,MPI_Comm *);
308: MPIUni_PETSC_EXTERN int MPI_Comm_create(MPI_Comm,MPI_Group,MPI_Comm *);
309: MPIUni_PETSC_EXTERN int MPI_Init(int *, char ***);
310: MPIUni_PETSC_EXTERN int MPI_Finalize(void);
311: MPIUni_PETSC_EXTERN int MPI_Initialized(int*);
312: MPIUni_PETSC_EXTERN int MPI_Finalized(int*);
313: MPIUni_PETSC_EXTERN int MPI_Comm_size(MPI_Comm,int*);
314: MPIUni_PETSC_EXTERN int MPI_Comm_rank(MPI_Comm,int*);
315: MPIUni_PETSC_EXTERN double MPI_Wtime(void);
317: #define MPI_Aint MPIUNI_INTPTR
318: /*
319: Routines we have replace with macros that do nothing
320: Some return error codes others return success
321: */
323: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
324: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
325: #define MPI_Type_f2c(type) (MPI_Datatype)(type)
326: #define MPI_Type_c2f(type) (MPI_Fint)(type)
327: #define MPI_Op_c2f(op) (MPI_Fint)(op)
329: #define MPI_Send(buf,count,datatype,dest,tag,comm) \
330: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
331: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
332: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
333: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
334: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
335: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
336: MPI_Abort(MPI_COMM_WORLD,0))
337: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
338: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
339: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
340: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
341: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
342: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
343: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
344: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
345: MPI_Abort(MPI_COMM_WORLD,0))
346: #define MPI_Get_count(status, datatype,count) \
347: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
348: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
349: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
350: MPI_Abort(MPI_COMM_WORLD,0))
351: #define MPI_Bsend(buf,count,datatype,dest,tag,comm) \
352: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
353: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
354: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
355: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
356: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
357: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
358: MPI_Abort(MPI_COMM_WORLD,0))
359: #define MPI_Ssend(buf,count, datatype,dest,tag,comm) \
360: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
361: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
362: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
363: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
364: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
365: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
366: MPI_Abort(MPI_COMM_WORLD,0))
367: #define MPI_Rsend(buf,count, datatype,dest,tag,comm) \
368: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
369: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
370: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
371: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
372: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
373: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
374: MPI_Abort(MPI_COMM_WORLD,0))
375: #define MPI_Buffer_attach(buffer,size) \
376: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
377: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
378: MPI_SUCCESS)
379: #define MPI_Buffer_detach(buffer,size)\
380: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
381: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (size),\
382: MPI_SUCCESS)
383: #define MPI_Ibsend(buf,count, datatype,dest,tag,comm,request) \
384: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
385: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
386: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
387: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
388: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
389: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
390: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
391: MPI_Abort(MPI_COMM_WORLD,0))
392: #define MPI_Issend(buf,count, datatype,dest,tag,comm,request) \
393: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
394: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
395: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
396: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
397: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
398: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
399: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
400: MPI_Abort(MPI_COMM_WORLD,0))
401: #define MPI_Irsend(buf,count, datatype,dest,tag,comm,request) \
402: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
403: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
404: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
405: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
406: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
407: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
408: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
409: MPI_Abort(MPI_COMM_WORLD,0))
410: #define MPI_Irecv(buf,count, datatype,source,tag,comm,request) \
411: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
412: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
413: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
414: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
415: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
416: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
417: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
418: MPI_Abort(MPI_COMM_WORLD,0))
419: #define MPI_Isend(buf,count, datatype,dest,tag,comm,request) \
420: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
421: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
422: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
423: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
424: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
425: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
426: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
427: MPI_Abort(MPI_COMM_WORLD,0))
428: #define MPI_Wait(request,status) \
429: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
430: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
431: MPI_SUCCESS)
432: #define MPI_Test(request,flag,status) \
433: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
434: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status),\
435: *(flag) = 0, \
436: MPI_SUCCESS)
437: #define MPI_Request_free(request) \
438: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
439: MPI_SUCCESS)
440: #define MPI_Waitany(a,b,c,d) \
441: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
442: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
443: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
444: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),(*c = 0), \
445: MPI_SUCCESS)
446: #define MPI_Testany(a,b,c,d,e) \
447: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (a),\
448: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (b),\
449: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (c),\
450: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (d),\
451: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (e),\
452: MPI_SUCCESS)
453: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
454: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
455: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
456: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
457: MPI_SUCCESS)
458: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
459: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
460: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
461: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (flag),\
462: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
463: MPI_SUCCESS)
464: #define MPI_Waitsome(incount,array_of_requests,outcount,\
465: array_of_indices,array_of_statuses) \
466: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (incount),\
467: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
468: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (outcount),\
469: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_indices),\
470: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_statuses),\
471: MPI_SUCCESS)
472: #define MPI_Comm_group(comm,group) \
473: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
474: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
475: MPI_SUCCESS)
476: #define MPI_Group_incl(group,n,ranks,newgroup) \
477: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
478: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (n),\
479: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ranks),\
480: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newgroup),\
481: MPI_SUCCESS)
482: #define MPI_Testsome(incount,array_of_requests,outcount,\
483: array_of_indices,array_of_statuses) MPI_SUCCESS
484: #define MPI_Iprobe(source,tag,comm,flag,status) (*(flag)=0, MPI_SUCCESS)
485: #define MPI_Probe(source,tag,comm,status) MPI_SUCCESS
486: #define MPI_Cancel(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
487: #define MPI_Test_cancelled(status,flag) (*(flag)=0,MPI_SUCCESS)
488: #define MPI_Send_init(buf,count, datatype,dest,tag,comm,request) \
489: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
490: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
491: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
492: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
493: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
494: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
495: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
496: MPI_SUCCESS)
497: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
498: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
499: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
500: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
501: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
502: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
503: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
504: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
505: MPI_SUCCESS)
506: #define MPI_Ssend_init(buf,count, datatype,dest,tag,comm,request) \
507: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
508: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
509: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
510: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
511: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
512: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
513: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
514: MPI_SUCCESS)
515: #define MPI_Bsend_init(buf,count, datatype,dest,tag,comm,request) \
516: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
517: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
518: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
519: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
520: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
521: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
522: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
523: MPI_SUCCESS)
524: #define MPI_Rsend_init(buf,count, datatype,dest,tag,comm,request) \
525: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
526: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
527: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
528: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (dest),\
529: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
530: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
531: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
532: MPI_SUCCESS)
533: #define MPI_Recv_init(buf,count, datatype,source,tag,comm,request) \
534: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf),\
535: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
536: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
537: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (source),\
538: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (tag),\
539: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
540: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),\
541: MPI_SUCCESS)
542: #define MPI_Start(request) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (request),MPI_SUCCESS)
543: #define MPI_Startall(count,array_of_requests) \
544: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
545: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_requests),\
546: MPI_SUCCESS)
547: #define MPI_Op_create(function,commute,op) \
548: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (function),\
549: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (commute),\
550: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
551: MPI_SUCCESS)
552: #define MPI_Op_free(op) \
553: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (op),\
554: MPI_SUCCESS)
555: /* Need to determine sizeof "sendtype" */
556: #define MPI_Sendrecv(sendbuf,sendcount, sendtype,\
557: dest,sendtag,recvbuf,recvcount,\
558: recvtype,source,recvtag,\
559: comm,status) \
560: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount) * MPI_sizeof(sendtype))
561: #define MPI_Sendrecv_replace(buf,count, datatype,dest,sendtag,\
562: source,recvtag,comm,status) MPI_SUCCESS
563: #define MPI_Type_contiguous(count, oldtype,newtype) \
564: (*(newtype) = (count)*(oldtype),MPI_SUCCESS)
565: #define MPI_Type_vector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
566: #define MPI_Type_hvector(count,blocklength,stride,oldtype, newtype) MPI_SUCCESS
567: #define MPI_Type_indexed(count,array_of_blocklengths,\
568: array_of_displacements, oldtype,\
569: newtype) MPI_SUCCESS
570: #define MPI_Type_hindexed(count,array_of_blocklengths,\
571: array_of_displacements, oldtype,\
572: newtype) MPI_SUCCESS
573: #define MPI_Type_struct(count,array_of_blocklengths,\
574: array_of_displacements,\
575: array_of_types, newtype) \
576: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
577: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_blocklengths),\
578: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_displacements),\
579: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_types),\
580: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype),\
581: MPI_SUCCESS)
582: #define MPI_Address(location,address) \
583: (*(address) = (MPIUNI_INTPTR)(char *)(location),MPI_SUCCESS)
584: #define MPI_Type_extent(datatype,extent) *(extent) = datatype
585: #define MPI_Type_size(datatype,size) (*(size) = (datatype) & 0xff, MPI_SUCCESS)
586: #define MPI_Type_lb(datatype,displacement) \
587: MPI_Abort(MPI_COMM_WORLD,0)
588: #define MPI_Type_ub(datatype,displacement) \
589: MPI_Abort(MPI_COMM_WORLD,0)
590: #define MPI_Type_commit(datatype) (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
591: MPI_SUCCESS)
592: #define MPI_Type_free(datatype) MPI_SUCCESS
593: #define MPI_Get_elements(status, datatype,count) \
594: MPI_Abort(MPI_COMM_WORLD,0)
595: #define MPI_Pack(inbuf,incount, datatype,outbuf,\
596: outsize,position, comm) \
597: MPI_Abort(MPI_COMM_WORLD,0)
598: #define MPI_Unpack(inbuf,insize,position,outbuf,\
599: outcount, datatype,comm) \
600: MPI_Abort(MPI_COMM_WORLD,0)
601: #define MPI_Pack_size(incount, datatype,comm,size) \
602: MPI_Abort(MPI_COMM_WORLD,0)
603: #define MPI_Barrier(comm) \
604: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
605: MPI_SUCCESS)
606: #define MPI_Bcast(buffer,count,datatype,root,comm) \
607: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buffer),\
608: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count),\
609: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype),\
610: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
611: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
612: MPI_SUCCESS)
613: #define MPI_Gather(sendbuf,sendcount, sendtype,\
614: recvbuf,recvcount, recvtype,\
615: root,comm) \
616: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
617: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
618: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
619: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
620: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
621: MPI_SUCCESS)
622: #define MPI_Gatherv(sendbuf,sendcount, sendtype,\
623: recvbuf,recvcounts,displs,\
624: recvtype,root,comm) \
625: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
626: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
627: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
628: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
629: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
630: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
631: MPI_SUCCESS)
632: #define MPI_Scatter(sendbuf,sendcount, sendtype,\
633: recvbuf,recvcount, recvtype,\
634: root,comm) \
635: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendbuf),\
636: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcount),\
637: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
638: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvbuf),\
639: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
640: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
641: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
642: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_Abort(MPI_COMM_WORLD,0))
643: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
644: sendtype, recvbuf,recvcount,\
645: recvtype,root,comm) \
646: (MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)),\
647: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
648: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendtype),\
649: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (sendcounts),\
650: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (root),\
651: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
652: MPI_SUCCESS)
653: #define MPI_Allgather(sendbuf,sendcount, sendtype,\
654: recvbuf,recvcount, recvtype,comm) \
655: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
656: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
657: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
658: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
659: MPI_SUCCESS)
660: #define MPI_Allgatherv(sendbuf,sendcount, sendtype,\
661: recvbuf,recvcounts,displs,recvtype,comm) \
662: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcounts),\
663: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (displs),\
664: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
665: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
666: MPIUNI_Memcpy((recvbuf),(sendbuf),(sendcount)*MPI_sizeof(sendtype)), \
667: MPI_SUCCESS)
668: #define MPI_Alltoall(sendbuf,sendcount, sendtype,\
669: recvbuf,recvcount, recvtype,comm) \
670: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvcount),\
671: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (recvtype),\
672: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
673: MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)),\
674: MPI_SUCCESS)
675: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,\
676: sendtype, recvbuf,recvcounts,\
677: rdispls, recvtype,comm) MPI_Abort(MPI_COMM_WORLD,0)
678: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,\
679: sendtypes, recvbuf,recvcounts,\
680: rdispls, recvtypes,comm) MPI_Abort(MPI_COMM_WORLD,0)
681: #define MPI_Reduce(sendbuf, recvbuf,count,\
682: datatype,op,root,comm) \
683: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
684: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
685: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
686: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)), \
687: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
688: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
689: (MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)),\
690: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),MPI_SUCCESS)
691: #define MPI_Exscan(sendbuf, recvbuf,count,datatype,op,comm) MPI_SUCCESS
692: #define MPI_Reduce_scatter(sendbuf, recvbuf,recvcounts,\
693: datatype,op,comm) \
694: MPI_Abort(MPI_COMM_WORLD,0)
695: #define MPI_Group_size(group,size) (*(size)=1,MPI_SUCCESS)
696: #define MPI_Group_rank(group,rank) (*(rank)=0,MPI_SUCCESS)
697: #define MPI_Group_translate_ranks(group1,n,ranks1,group2,ranks2) \
698: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group1), \
699: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group2), \
700: MPIUNI_Memcpy((ranks2),(ranks1),(n) * sizeof(int)), \
701: MPI_SUCCESS)
702: #define MPI_Group_compare(group1,group2,result) \
703: (*(result)=1,MPI_SUCCESS)
704: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
705: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
706: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
707: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
708: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
709: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
710: #define MPI_Group_free(group) \
711: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (group),\
712: MPI_SUCCESS)
713: #define MPI_Comm_compare(comm1,comm2,result) \
714: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm1),\
715: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm2),\
716: *(result)=MPI_IDENT,\
717: MPI_SUCCESS)
718: #define MPI_Comm_split(comm,color,key,newcomm) \
719: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (color),\
720: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (key),\
721: MPI_Comm_dup(comm,newcomm))
722: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1,MPI_SUCCESS)
723: #define MPI_Comm_remote_size(comm,size) (*(size)=1,MPI_SUCCESS)
724: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
725: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
726: remote_leader,tag,newintercomm) MPI_SUCCESS
727: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
729: #define MPI_Topo_test(comm,status) MPI_SUCCESS
730: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
731: reorder,comm_cart) MPI_SUCCESS
732: #define MPI_Dims_create(nnodes,ndims,dims) MPI_Abort(MPI_COMM_WORLD,0)
733: #define MPI_Graph_create(comm,a,b,c,d,e) MPI_SUCCESS
734: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPI_Abort(MPI_COMM_WORLD,0)
735: #define MPI_Graph_get(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
736: #define MPI_Cartdim_get(comm,ndims) MPI_Abort(MPI_COMM_WORLD,0)
737: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
738: MPI_Abort(MPI_COMM_WORLD,0)
739: #define MPI_Cart_rank(comm,coords,rank) MPI_Abort(MPI_COMM_WORLD,0)
740: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
741: MPI_Abort(MPI_COMM_WORLD,0)
742: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
743: MPI_Abort(MPI_COMM_WORLD,0)
744: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
745: MPI_Abort(MPI_COMM_WORLD,0)
746: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
747: MPI_Abort(MPI_COMM_WORLD,0)
748: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPI_Abort(MPI_COMM_WORLD,0)
749: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPI_Abort(MPI_COMM_WORLD,0)
750: #define MPI_Graph_map(comm,a,b,c,d) MPI_Abort(MPI_COMM_WORLD,0)
751: #define MPI_Get_processor_name(name,result_len) \
752: (MPIUNI_Memcpy(name,"localhost",9*sizeof(char)),name[10] = 0,*(result_len) = 10)
753: #define MPI_Errhandler_create(function,errhandler) (*(errhandler) = (MPI_Errhandler) 0, MPI_SUCCESS)
754: #define MPI_Errhandler_set(comm,errhandler) \
755: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm),\
756: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (errhandler),\
757: MPI_SUCCESS)
758: #define MPI_Errhandler_get(comm,errhandler) MPI_SUCCESS
759: #define MPI_Errhandler_free(errhandler) MPI_SUCCESS
760: #define MPI_Error_string(errorcode,string,result_len) MPI_SUCCESS
761: #define MPI_Error_class(errorcode,errorclass) MPI_SUCCESS
762: #define MPI_Wtick() 1.0
763: #define MPI_Pcontrol(level) MPI_SUCCESS
765: #define MPI_NULL_COPY_FN 0
766: #define MPI_NULL_DELETE_FN 0
768: /* MPI-IO additions */
770: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
771: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (comm), \
772: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filename), \
773: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (amode), \
774: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
775: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
776: MPI_Abort(MPI_COMM_WORLD,0))
778: #define MPI_File_close(mpi_fh) \
779: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
780: MPI_Abort(MPI_COMM_WORLD,0))
782: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
783: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
784: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (disp), \
785: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (etype), \
786: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (filetype), \
787: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datarep), \
788: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (info), \
789: MPI_Abort(MPI_COMM_WORLD,0))
791: #define MPI_Type_get_extent(datatype,lb,extent) \
792: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
793: *(lb) = 0, *(extent) = datatype,0)
795: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
796: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
797: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
798: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
799: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
800: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
801: MPI_Abort(MPI_COMM_WORLD,0))
803: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
804: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (mpi_fh), \
805: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (buf), \
806: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (count), \
807: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (datatype), \
808: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (status), \
809: MPI_Abort(MPI_COMM_WORLD,0))
811: /* called from PetscInitialize() - so return success */
812: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
813: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (name), \
814: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (read_conv_fn), \
815: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (write_conv_fn), \
816: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (extent_fn), \
817: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (state), \
818: MPI_SUCCESS)
820: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
821: (MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (ndims), \
822: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_sizes), \
823: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_subsizes), \
824: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (array_of_starts), \
825: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (order), \
826: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (oldtype), \
827: MPIUNI_TMP = (void*)(MPIUNI_INTPTR) (newtype), \
828: MPI_Abort(MPI_COMM_WORLD,0))
830: #if defined(__cplusplus)
831: }
832: #endif
833: #endif