Actual source code: ex42.c
2: static char help[] = "Tests MatIncreaseOverlap() and MatGetSubmatrices() for the parallel case.\n\
3: This example is similar to ex40.c; here the index sets used are random.\n\
4: Input arguments are:\n\
5: -f <input_file> : file to load. For a 5X5 example of the 5-pt. stencil,\n\
6: use the file petsc/src/mat/examples/matbinary.ex\n\
7: -nd <size> : > 0 no of domains per processor \n\
8: -ov <overlap> : >=0 amount of overlap between domains\n\n";
10: #include <petscksp.h>
14: int main(int argc,char **args)
15: {
17: PetscInt nd = 2,ov=1,i,j,lsize,m,n,*idx;
18: PetscMPIInt rank;
19: PetscBool flg;
20: Mat A,B,*submatA,*submatB;
21: char file[PETSC_MAX_PATH_LEN];
22: PetscViewer fd;
23: IS *is1,*is2;
24: PetscRandom r;
25: PetscScalar rand;
27: PetscInitialize(&argc,&args,(char *)0,help);
28: #if defined(PETSC_USE_COMPLEX)
29: SETERRQ(PETSC_COMM_WORLD,1,"This example does not work with complex numbers");
30: #else
31:
32: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
33: PetscOptionsGetString(PETSC_NULL,"-f",file,PETSC_MAX_PATH_LEN,PETSC_NULL);
34: PetscOptionsGetInt(PETSC_NULL,"-nd",&nd,PETSC_NULL);
35: PetscOptionsGetInt(PETSC_NULL,"-ov",&ov,PETSC_NULL);
37: /* Read matrix and RHS */
38: PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd);
39: MatCreate(PETSC_COMM_WORLD,&A);
40: MatSetType(A,MATMPIAIJ);
41: MatLoad(A,fd);
42: PetscViewerDestroy(&fd);
44: /* Read the matrix again as a seq matrix */
45: PetscViewerBinaryOpen(PETSC_COMM_SELF,file,FILE_MODE_READ,&fd);
46: MatCreate(PETSC_COMM_SELF,&B);
47: MatSetType(B,MATSEQAIJ);
48: MatLoad(B,fd);
49: PetscViewerDestroy(&fd);
50:
51: /* Create the Random no generator */
52: MatGetSize(A,&m,&n);
53: PetscRandomCreate(PETSC_COMM_SELF,&r);
54: PetscRandomSetFromOptions(r);
56: /* Create the IS corresponding to subdomains */
57: PetscMalloc(nd*sizeof(IS **),&is1);
58: PetscMalloc(nd*sizeof(IS **),&is2);
59: PetscMalloc(m *sizeof(PetscInt),&idx);
60:
61: /* Create the random Index Sets */
62: for (i=0; i<nd; i++) {
63: /* Skip a few,so that the IS on different procs are diffeent*/
64: for (j=0; j<rank; j++) {
65: PetscRandomGetValue(r,&rand);
66: }
67: PetscRandomGetValue(r,&rand);
68: lsize = (PetscInt)(rand*m);
69: for (j=0; j<lsize; j++) {
70: PetscRandomGetValue(r,&rand);
71: idx[j] = (PetscInt)(rand*m);
72: }
73: PetscSortInt(lsize,idx);
74: ISCreateGeneral(PETSC_COMM_SELF,lsize,idx,PETSC_COPY_VALUES,is1+i);
75: ISCreateGeneral(PETSC_COMM_SELF,lsize,idx,PETSC_COPY_VALUES,is2+i);
76: }
78: MatIncreaseOverlap(A,nd,is1,ov);
79: MatIncreaseOverlap(B,nd,is2,ov);
81: for (i=0; i<nd; ++i) {
82: ISSort(is1[i]);
83: ISSort(is2[i]);
84: }
85:
86: MatGetSubMatrices(A,nd,is1,is1,MAT_INITIAL_MATRIX,&submatA);
87: MatGetSubMatrices(B,nd,is2,is2,MAT_INITIAL_MATRIX,&submatB);
88:
89: /* Now see if the serial and parallel case have the same answers */
90: for (i=0; i<nd; ++i) {
91: MatEqual(submatA[i],submatB[i],&flg);
92: PetscPrintf(PETSC_COMM_SELF,"proc:[%d], i=%D, flg =%d\n",rank,i,(int)flg);
93: }
95: /* Free Allocated Memory */
96: for (i=0; i<nd; ++i) {
97: ISDestroy(&is1[i]);
98: ISDestroy(&is2[i]);
99: MatDestroy(&submatA[i]);
100: MatDestroy(&submatB[i]);
101: }
102: PetscFree(submatA);
103: PetscFree(submatB);
104: PetscRandomDestroy(&r);
105: PetscFree(is1);
106: PetscFree(is2);
107: MatDestroy(&A);
108: MatDestroy(&B);
109: PetscFree(idx);
111: PetscFinalize();
112: #endif
113: return 0;
114: }