Actual source code: sbaijov.c
2: /*
3: Routines to compute overlapping regions of a parallel MPI matrix.
4: Used for finding submatrices that were shared across processors.
5: */
6: #include <../src/mat/impls/sbaij/mpi/mpisbaij.h>
7: #include <petscbt.h>
9: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat,PetscInt,IS*);
10: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat,PetscInt*,PetscInt,PetscInt*,PetscBT*);
12: PetscErrorCode MatIncreaseOverlap_MPISBAIJ(Mat C,PetscInt is_max,IS is[],PetscInt ov)
13: {
15: PetscInt i,N=C->cmap->N, bs=C->rmap->bs,M=C->rmap->N,Mbs=M/bs,*nidx,isz,iov;
16: IS *is_new,*is_row;
17: Mat *submats;
18: Mat_MPISBAIJ *c=(Mat_MPISBAIJ*)C->data;
19: Mat_SeqSBAIJ *asub_i;
20: PetscBT table;
21: PetscInt *ai,brow,nz,nis,l,nmax,nstages_local,nstages,max_no,pos;
22: const PetscInt *idx;
23: PetscBool flg;
26: PetscMalloc1(is_max,&is_new);
27: /* Convert the indices into block format */
28: ISCompressIndicesGeneral(N,C->rmap->n,bs,is_max,is,is_new);
29: if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");
31: /* ----- previous non-scalable implementation ----- */
32: flg = PETSC_FALSE;
33: PetscOptionsHasName(NULL,NULL, "-IncreaseOverlap_old", &flg);
34: if (flg) { /* previous non-scalable implementation */
35: printf("use previous non-scalable implementation...\n");
36: for (i=0; i<ov; ++i) {
37: MatIncreaseOverlap_MPISBAIJ_Once(C,is_max,is_new);
38: }
39: } else { /* implementation using modified BAIJ routines */
41: PetscMalloc1(Mbs+1,&nidx);
42: PetscBTCreate(Mbs,&table); /* for column search */
44: /* Create is_row */
45: PetscMalloc1(is_max,&is_row);
46: ISCreateStride(PETSC_COMM_SELF,Mbs,0,1,&is_row[0]);
48: for (i=1; i<is_max; i++) {
49: is_row[i] = is_row[0]; /* reuse is_row[0] */
50: }
52: /* Allocate memory to hold all the submatrices - Modified from MatCreateSubMatrices_MPIBAIJ() */
53: PetscMalloc1(is_max+1,&submats);
55: /* Determine the number of stages through which submatrices are done */
56: nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
57: if (!nmax) nmax = 1;
58: nstages_local = is_max/nmax + ((is_max % nmax) ? 1 : 0);
60: /* Make sure every processor loops through the nstages */
61: MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));
63: for (iov=0; iov<ov; ++iov) {
64: /* 1) Get submats for column search */
65: for (i=0,pos=0; i<nstages; i++) {
66: if (pos+nmax <= is_max) max_no = nmax;
67: else if (pos == is_max) max_no = 0;
68: else max_no = is_max-pos;
69: c->ijonly = PETSC_TRUE; /* only matrix data structures are requested */
70: /* The resulting submatrices should be BAIJ, not SBAIJ, hence we change this value to trigger that */
71: PetscStrcpy(((PetscObject)c->A)->type_name,MATSEQBAIJ);
72: MatCreateSubMatrices_MPIBAIJ_local(C,max_no,is_row+pos,is_new+pos,MAT_INITIAL_MATRIX,submats+pos);
73: PetscStrcpy(((PetscObject)c->A)->type_name,MATSEQSBAIJ);
74: pos += max_no;
75: }
77: /* 2) Row search */
78: MatIncreaseOverlap_MPIBAIJ_Once(C,is_max,is_new);
80: /* 3) Column search */
81: for (i=0; i<is_max; i++) {
82: asub_i = (Mat_SeqSBAIJ*)submats[i]->data;
83: ai = asub_i->i;
85: /* put is_new obtained from MatIncreaseOverlap_MPIBAIJ() to table */
86: PetscBTMemzero(Mbs,table);
88: ISGetIndices(is_new[i],&idx);
89: ISGetLocalSize(is_new[i],&nis);
90: for (l=0; l<nis; l++) {
91: PetscBTSet(table,idx[l]);
92: nidx[l] = idx[l];
93: }
94: isz = nis;
96: /* add column entries to table */
97: for (brow=0; brow<Mbs; brow++) {
98: nz = ai[brow+1] - ai[brow];
99: if (nz) {
100: if (!PetscBTLookupSet(table,brow)) nidx[isz++] = brow;
101: }
102: }
103: ISRestoreIndices(is_new[i],&idx);
104: ISDestroy(&is_new[i]);
106: /* create updated is_new */
107: ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,PETSC_COPY_VALUES,is_new+i);
108: }
110: /* Free tmp spaces */
111: for (i=0; i<is_max; i++) {
112: MatDestroy(&submats[i]);
113: }
114: }
116: PetscBTDestroy(&table);
117: PetscFree(submats);
118: ISDestroy(&is_row[0]);
119: PetscFree(is_row);
120: PetscFree(nidx);
122: }
124: for (i=0; i<is_max; i++) {ISDestroy(&is[i]);}
125: ISExpandIndicesGeneral(N,N,bs,is_max,is_new,is);
127: for (i=0; i<is_max; i++) {ISDestroy(&is_new[i]);}
128: PetscFree(is_new);
129: return(0);
130: }
132: typedef enum {MINE,OTHER} WhoseOwner;
133: /* data1, odata1 and odata2 are packed in the format (for communication):
134: data[0] = is_max, no of is
135: data[1] = size of is[0]
136: ...
137: data[is_max] = size of is[is_max-1]
138: data[is_max + 1] = data(is[0])
139: ...
140: data[is_max+1+sum(size of is[k]), k=0,...,i-1] = data(is[i])
141: ...
142: data2 is packed in the format (for creating output is[]):
143: data[0] = is_max, no of is
144: data[1] = size of is[0]
145: ...
146: data[is_max] = size of is[is_max-1]
147: data[is_max + 1] = data(is[0])
148: ...
149: data[is_max + 1 + Mbs*i) = data(is[i])
150: ...
151: */
152: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Once(Mat C,PetscInt is_max,IS is[])
153: {
154: Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
156: PetscMPIInt size,rank,tag1,tag2,*len_s,nrqr,nrqs,*id_r1,*len_r1,flag,len,*iwork;
157: const PetscInt *idx_i;
158: PetscInt idx,isz,col,*n,*data1,**data1_start,*data2,*data2_i,*data,*data_i;
159: PetscInt Mbs,i,j,k,*odata1,*odata2;
160: PetscInt proc_id,**odata2_ptr,*ctable=NULL,*btable,len_max,len_est;
161: PetscInt proc_end=0,len_unused,nodata2;
162: PetscInt ois_max; /* max no of is[] in each of processor */
163: char *t_p;
164: MPI_Comm comm;
165: MPI_Request *s_waits1,*s_waits2,r_req;
166: MPI_Status *s_status,r_status;
167: PetscBT *table; /* mark indices of this processor's is[] */
168: PetscBT table_i;
169: PetscBT otable; /* mark indices of other processors' is[] */
170: PetscInt bs=C->rmap->bs,Bn = c->B->cmap->n,Bnbs = Bn/bs,*Bowners;
171: IS garray_local,garray_gl;
174: PetscObjectGetComm((PetscObject)C,&comm);
175: size = c->size;
176: rank = c->rank;
177: Mbs = c->Mbs;
179: PetscObjectGetNewTag((PetscObject)C,&tag1);
180: PetscObjectGetNewTag((PetscObject)C,&tag2);
182: /* create tables used in
183: step 1: table[i] - mark c->garray of proc [i]
184: step 3: table[i] - mark indices of is[i] when whose=MINE
185: table[0] - mark incideces of is[] when whose=OTHER */
186: len = PetscMax(is_max, size);
187: PetscMalloc2(len,&table,(Mbs/PETSC_BITS_PER_BYTE+1)*len,&t_p);
188: for (i=0; i<len; i++) {
189: table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
190: }
192: MPIU_Allreduce(&is_max,&ois_max,1,MPIU_INT,MPI_MAX,comm);
194: /* 1. Send this processor's is[] to other processors */
195: /*---------------------------------------------------*/
196: /* allocate spaces */
197: PetscMalloc1(is_max,&n);
198: len = 0;
199: for (i=0; i<is_max; i++) {
200: ISGetLocalSize(is[i],&n[i]);
201: len += n[i];
202: }
203: if (!len) {
204: is_max = 0;
205: } else {
206: len += 1 + is_max; /* max length of data1 for one processor */
207: }
209: PetscMalloc1(size*len+1,&data1);
210: PetscMalloc1(size,&data1_start);
211: for (i=0; i<size; i++) data1_start[i] = data1 + i*len;
213: PetscMalloc4(size,&len_s,size,&btable,size,&iwork,size+1,&Bowners);
215: /* gather c->garray from all processors */
216: ISCreateGeneral(comm,Bnbs,c->garray,PETSC_COPY_VALUES,&garray_local);
217: ISAllGather(garray_local, &garray_gl);
218: ISDestroy(&garray_local);
219: MPI_Allgather(&Bnbs,1,MPIU_INT,Bowners+1,1,MPIU_INT,comm);
221: Bowners[0] = 0;
222: for (i=0; i<size; i++) Bowners[i+1] += Bowners[i];
224: if (is_max) {
225: /* hash table ctable which maps c->row to proc_id) */
226: PetscMalloc1(Mbs,&ctable);
227: for (proc_id=0,j=0; proc_id<size; proc_id++) {
228: for (; j<C->rmap->range[proc_id+1]/bs; j++) ctable[j] = proc_id;
229: }
231: /* hash tables marking c->garray */
232: ISGetIndices(garray_gl,&idx_i);
233: for (i=0; i<size; i++) {
234: table_i = table[i];
235: PetscBTMemzero(Mbs,table_i);
236: for (j = Bowners[i]; j<Bowners[i+1]; j++) { /* go through B cols of proc[i]*/
237: PetscBTSet(table_i,idx_i[j]);
238: }
239: }
240: ISRestoreIndices(garray_gl,&idx_i);
241: } /* if (is_max) */
242: ISDestroy(&garray_gl);
244: /* evaluate communication - mesg to who, length, and buffer space */
245: for (i=0; i<size; i++) len_s[i] = 0;
247: /* header of data1 */
248: for (proc_id=0; proc_id<size; proc_id++) {
249: iwork[proc_id] = 0;
250: *data1_start[proc_id] = is_max;
251: data1_start[proc_id]++;
252: for (j=0; j<is_max; j++) {
253: if (proc_id == rank) {
254: *data1_start[proc_id] = n[j];
255: } else {
256: *data1_start[proc_id] = 0;
257: }
258: data1_start[proc_id]++;
259: }
260: }
262: for (i=0; i<is_max; i++) {
263: ISGetIndices(is[i],&idx_i);
264: for (j=0; j<n[i]; j++) {
265: idx = idx_i[j];
266: *data1_start[rank] = idx; data1_start[rank]++; /* for local proccessing */
267: proc_end = ctable[idx];
268: for (proc_id=0; proc_id<=proc_end; proc_id++) { /* for others to process */
269: if (proc_id == rank) continue; /* done before this loop */
270: if (proc_id < proc_end && !PetscBTLookup(table[proc_id],idx)) continue; /* no need for sending idx to [proc_id] */
271: *data1_start[proc_id] = idx; data1_start[proc_id]++;
272: len_s[proc_id]++;
273: }
274: }
275: /* update header data */
276: for (proc_id=0; proc_id<size; proc_id++) {
277: if (proc_id== rank) continue;
278: *(data1 + proc_id*len + 1 + i) = len_s[proc_id] - iwork[proc_id];
279: iwork[proc_id] = len_s[proc_id];
280: }
281: ISRestoreIndices(is[i],&idx_i);
282: }
284: nrqs = 0; nrqr = 0;
285: for (i=0; i<size; i++) {
286: data1_start[i] = data1 + i*len;
287: if (len_s[i]) {
288: nrqs++;
289: len_s[i] += 1 + is_max; /* add no. of header msg */
290: }
291: }
293: for (i=0; i<is_max; i++) {
294: ISDestroy(&is[i]);
295: }
296: PetscFree(n);
297: PetscFree(ctable);
299: /* Determine the number of messages to expect, their lengths, from from-ids */
300: PetscGatherNumberOfMessages(comm,NULL,len_s,&nrqr);
301: PetscGatherMessageLengths(comm,nrqs,nrqr,len_s,&id_r1,&len_r1);
303: /* Now post the sends */
304: PetscMalloc2(size,&s_waits1,size,&s_waits2);
305: k = 0;
306: for (proc_id=0; proc_id<size; proc_id++) { /* send data1 to processor [proc_id] */
307: if (len_s[proc_id]) {
308: MPI_Isend(data1_start[proc_id],len_s[proc_id],MPIU_INT,proc_id,tag1,comm,s_waits1+k);
309: k++;
310: }
311: }
313: /* 2. Receive other's is[] and process. Then send back */
314: /*-----------------------------------------------------*/
315: len = 0;
316: for (i=0; i<nrqr; i++) {
317: if (len_r1[i] > len) len = len_r1[i];
318: }
319: PetscFree(len_r1);
320: PetscFree(id_r1);
322: for (proc_id=0; proc_id<size; proc_id++) len_s[proc_id] = iwork[proc_id] = 0;
324: PetscMalloc1(len+1,&odata1);
325: PetscMalloc1(size,&odata2_ptr);
326: PetscBTCreate(Mbs,&otable);
328: len_max = ois_max*(Mbs+1); /* max space storing all is[] for each receive */
329: len_est = 2*len_max; /* estimated space of storing is[] for all receiving messages */
330: PetscMalloc1(len_est+1,&odata2);
331: nodata2 = 0; /* nodata2+1: num of PetscMalloc(,&odata2_ptr[]) called */
333: odata2_ptr[nodata2] = odata2;
335: len_unused = len_est; /* unused space in the array odata2_ptr[nodata2]-- needs to be >= len_max */
337: k = 0;
338: while (k < nrqr) {
339: /* Receive messages */
340: MPI_Iprobe(MPI_ANY_SOURCE,tag1,comm,&flag,&r_status);
341: if (flag) {
342: MPI_Get_count(&r_status,MPIU_INT,&len);
343: proc_id = r_status.MPI_SOURCE;
344: MPI_Irecv(odata1,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
345: MPI_Wait(&r_req,&r_status);
347: /* Process messages */
348: /* make sure there is enough unused space in odata2 array */
349: if (len_unused < len_max) { /* allocate more space for odata2 */
350: PetscMalloc1(len_est+1,&odata2);
352: odata2_ptr[++nodata2] = odata2;
354: len_unused = len_est;
355: }
357: MatIncreaseOverlap_MPISBAIJ_Local(C,odata1,OTHER,odata2,&otable);
358: len = 1 + odata2[0];
359: for (i=0; i<odata2[0]; i++) len += odata2[1 + i];
361: /* Send messages back */
362: MPI_Isend(odata2,len,MPIU_INT,proc_id,tag2,comm,s_waits2+k);
363: k++;
364: odata2 += len;
365: len_unused -= len;
366: len_s[proc_id] = len; /* num of messages sending back to [proc_id] by this proc */
367: }
368: }
369: PetscFree(odata1);
370: PetscBTDestroy(&otable);
372: /* 3. Do local work on this processor's is[] */
373: /*-------------------------------------------*/
374: /* make sure there is enough unused space in odata2(=data) array */
375: len_max = is_max*(Mbs+1); /* max space storing all is[] for this processor */
376: if (len_unused < len_max) { /* allocate more space for odata2 */
377: PetscMalloc1(len_est+1,&odata2);
379: odata2_ptr[++nodata2] = odata2;
380: }
382: data = odata2;
383: MatIncreaseOverlap_MPISBAIJ_Local(C,data1_start[rank],MINE,data,table);
384: PetscFree(data1_start);
386: /* 4. Receive work done on other processors, then merge */
387: /*------------------------------------------------------*/
388: /* get max number of messages that this processor expects to recv */
389: MPIU_Allreduce(len_s,iwork,size,MPI_INT,MPI_MAX,comm);
390: PetscMalloc1(iwork[rank]+1,&data2);
391: PetscFree4(len_s,btable,iwork,Bowners);
393: k = 0;
394: while (k < nrqs) {
395: /* Receive messages */
396: MPI_Iprobe(MPI_ANY_SOURCE,tag2,comm,&flag,&r_status);
397: if (flag) {
398: MPI_Get_count(&r_status,MPIU_INT,&len);
400: proc_id = r_status.MPI_SOURCE;
402: MPI_Irecv(data2,len,MPIU_INT,proc_id,r_status.MPI_TAG,comm,&r_req);
403: MPI_Wait(&r_req,&r_status);
404: if (len > 1+is_max) { /* Add data2 into data */
405: data2_i = data2 + 1 + is_max;
406: for (i=0; i<is_max; i++) {
407: table_i = table[i];
408: data_i = data + 1 + is_max + Mbs*i;
409: isz = data[1+i];
410: for (j=0; j<data2[1+i]; j++) {
411: col = data2_i[j];
412: if (!PetscBTLookupSet(table_i,col)) data_i[isz++] = col;
413: }
414: data[1+i] = isz;
415: if (i < is_max - 1) data2_i += data2[1+i];
416: }
417: }
418: k++;
419: }
420: }
421: PetscFree(data2);
422: PetscFree2(table,t_p);
424: /* phase 1 sends are complete */
425: PetscMalloc1(size,&s_status);
426: if (nrqs) {MPI_Waitall(nrqs,s_waits1,s_status);}
427: PetscFree(data1);
429: /* phase 2 sends are complete */
430: if (nrqr) {MPI_Waitall(nrqr,s_waits2,s_status);}
431: PetscFree2(s_waits1,s_waits2);
432: PetscFree(s_status);
434: /* 5. Create new is[] */
435: /*--------------------*/
436: for (i=0; i<is_max; i++) {
437: data_i = data + 1 + is_max + Mbs*i;
438: ISCreateGeneral(PETSC_COMM_SELF,data[1+i],data_i,PETSC_COPY_VALUES,is+i);
439: }
440: for (k=0; k<=nodata2; k++) {
441: PetscFree(odata2_ptr[k]);
442: }
443: PetscFree(odata2_ptr);
444: return(0);
445: }
447: /*
448: MatIncreaseOverlap_MPISBAIJ_Local - Called by MatIncreaseOverlap, to do
449: the work on the local processor.
451: Inputs:
452: C - MAT_MPISBAIJ;
453: data - holds is[]. See MatIncreaseOverlap_MPISBAIJ_Once() for the format.
454: whose - whose is[] to be processed,
455: MINE: this processor's is[]
456: OTHER: other processor's is[]
457: Output:
458: nidx - whose = MINE:
459: holds input and newly found indices in the same format as data
460: whose = OTHER:
461: only holds the newly found indices
462: table - table[i]: mark the indices of is[i], i=0,...,is_max. Used only in the case 'whose=MINE'.
463: */
464: /* Would computation be reduced by swapping the loop 'for each is' and 'for each row'? */
465: static PetscErrorCode MatIncreaseOverlap_MPISBAIJ_Local(Mat C,PetscInt *data,PetscInt whose,PetscInt *nidx,PetscBT *table)
466: {
467: Mat_MPISBAIJ *c = (Mat_MPISBAIJ*)C->data;
468: Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(c->A)->data;
469: Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(c->B)->data;
471: PetscInt row,mbs,Mbs,*nidx_i,col,col_max,isz,isz0,*ai,*aj,*bi,*bj,*garray,rstart,l;
472: PetscInt a_start,a_end,b_start,b_end,i,j,k,is_max,*idx_i,n;
473: PetscBT table0; /* mark the indices of input is[] for look up */
474: PetscBT table_i; /* poits to i-th table. When whose=OTHER, a single table is used for all is[] */
477: Mbs = c->Mbs; mbs = a->mbs;
478: ai = a->i; aj = a->j;
479: bi = b->i; bj = b->j;
480: garray = c->garray;
481: rstart = c->rstartbs;
482: is_max = data[0];
484: PetscBTCreate(Mbs,&table0);
486: nidx[0] = is_max;
487: idx_i = data + is_max + 1; /* ptr to input is[0] array */
488: nidx_i = nidx + is_max + 1; /* ptr to output is[0] array */
489: for (i=0; i<is_max; i++) { /* for each is */
490: isz = 0;
491: n = data[1+i]; /* size of input is[i] */
493: /* initialize and set table_i(mark idx and nidx) and table0(only mark idx) */
494: if (whose == MINE) { /* process this processor's is[] */
495: table_i = table[i];
496: nidx_i = nidx + 1+ is_max + Mbs*i;
497: } else { /* process other processor's is[] - only use one temp table */
498: table_i = table[0];
499: }
500: PetscBTMemzero(Mbs,table_i);
501: PetscBTMemzero(Mbs,table0);
502: if (n==0) {
503: nidx[1+i] = 0; /* size of new is[i] */
504: continue;
505: }
507: isz0 = 0; col_max = 0;
508: for (j=0; j<n; j++) {
509: col = idx_i[j];
510: if (col >= Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"index col %D >= Mbs %D",col,Mbs);
511: if (!PetscBTLookupSet(table_i,col)) {
512: PetscBTSet(table0,col);
513: if (whose == MINE) nidx_i[isz0] = col;
514: if (col_max < col) col_max = col;
515: isz0++;
516: }
517: }
519: if (whose == MINE) isz = isz0;
520: k = 0; /* no. of indices from input is[i] that have been examined */
521: for (row=0; row<mbs; row++) {
522: a_start = ai[row]; a_end = ai[row+1];
523: b_start = bi[row]; b_end = bi[row+1];
524: if (PetscBTLookup(table0,row+rstart)) { /* row is on input is[i]:
525: do row search: collect all col in this row */
526: for (l = a_start; l<a_end ; l++) { /* Amat */
527: col = aj[l] + rstart;
528: if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
529: }
530: for (l = b_start; l<b_end ; l++) { /* Bmat */
531: col = garray[bj[l]];
532: if (!PetscBTLookupSet(table_i,col)) nidx_i[isz++] = col;
533: }
534: k++;
535: if (k >= isz0) break; /* for (row=0; row<mbs; row++) */
536: } else { /* row is not on input is[i]:
537: do col serach: add row onto nidx_i if there is a col in nidx_i */
538: for (l = a_start; l<a_end; l++) { /* Amat */
539: col = aj[l] + rstart;
540: if (col > col_max) break;
541: if (PetscBTLookup(table0,col)) {
542: if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
543: break; /* for l = start; l<end ; l++) */
544: }
545: }
546: for (l = b_start; l<b_end; l++) { /* Bmat */
547: col = garray[bj[l]];
548: if (col > col_max) break;
549: if (PetscBTLookup(table0,col)) {
550: if (!PetscBTLookupSet(table_i,row+rstart)) nidx_i[isz++] = row+rstart;
551: break; /* for l = start; l<end ; l++) */
552: }
553: }
554: }
555: }
557: if (i < is_max - 1) {
558: idx_i += n; /* ptr to input is[i+1] array */
559: nidx_i += isz; /* ptr to output is[i+1] array */
560: }
561: nidx[1+i] = isz; /* size of new is[i] */
562: } /* for each is */
563: PetscBTDestroy(&table0);
564: return(0);
565: }