87 static const scalar defaultMergeTol = 1
E-6;
101 Pout<<
"Create mesh for time = "
124 Pout<<
"Writing dummy mesh to " << dummyMesh.polyMesh::objectPath()
129 Pout<<
"Reading mesh from " << io.objectPath() <<
endl;
135 if (Pstream::master())
140 int slave=Pstream::firstSlave();
141 slave<=Pstream::lastSlave();
145 OPstream toSlave(Pstream::blocking, slave);
152 IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
161 forAll(patchEntries, patchI)
163 const entry&
e = patchEntries[patchI];
167 if (
type == processorPolyPatch::typeName)
172 if (patchI >= patches.
size())
176 "createMesh(const Time&, const fileName&, const bool)"
177 ) <<
"Non-processor patches not synchronised."
179 <<
"Processor " << Pstream::myProcNo()
180 <<
" has only " << patches.
size()
181 <<
" patches, master has "
189 || name != patches[patchI].
name()
194 "createMesh(const Time&, const fileName&, const bool)"
195 ) <<
"Non-processor patches not synchronised."
197 <<
"Master patch " << patchI
199 <<
" type:" <<
type << endl
200 <<
"Processor " << Pstream::myProcNo()
201 <<
" patch " << patchI
202 <<
" has name:" << patches[patchI].
name()
203 <<
" type:" << patches[patchI].type()
214 forAll(patchEntries, patchI)
216 const entry& e = patchEntries[patchI];
220 if (
type == processorPolyPatch::typeName)
225 Pout<<
"Adding patch:" << nPatches
230 patchDict.
remove(
"nFaces");
231 patchDict.add(
"nFaces", 0);
232 patchDict.remove(
"startFace");
233 patchDict.add(
"startFace", 0);
235 patches[patchI] = polyPatch::New
254 Pout<<
"Removing dummy mesh " << io.objectPath()
256 rmDir(io.objectPath());
268 scalar getMergeDistance
275 scalar mergeTol = defaultMergeTol;
279 Foam::pow(scalar(10.0), -scalar(IOstream::defaultPrecision()));
281 Info<<
"Merge tolerance : " << mergeTol <<
nl
282 <<
"Write tolerance : " << writeTol <<
endl;
284 if (runTime.
writeFormat() == IOstream::ASCII && mergeTol < writeTol)
287 <<
"Your current settings specify ASCII writing with "
288 << IOstream::defaultPrecision() <<
" digits precision." << endl
289 <<
"Your merging tolerance (" << mergeTol <<
") is finer than this."
291 <<
"Please change your writeFormat to binary"
292 <<
" or increase the writePrecision" << endl
293 <<
"or adjust the merge tolerance (-mergeTol)."
297 scalar mergeDist = mergeTol * bb.
mag();
299 Info<<
"Overall meshes bounding box : " << bb <<
nl
300 <<
"Relative tolerance : " << mergeTol <<
nl
301 <<
"Absolute matching distance : " << mergeDist <<
nl
310 os <<
"Number of points: " << mesh.
points().
size() <<
nl
322 void writeDecomposition
329 Info<<
"Writing wanted cell distribution to volScalarField " << name
330 <<
" for postprocessing purposes." <<
nl <<
endl;
340 IOobject::AUTO_WRITE,
345 zeroGradientFvPatchScalarField::typeName
350 procCells[cI] = decomp[cI];
358 template<
class GeoField>
373 wordList objectNames = objects.toc();
376 Pstream::scatter(masterNames);
378 if (haveMesh[Pstream::myProcNo()] && objectNames != masterNames)
381 <<
"differing fields of type " << GeoField::typeName
382 <<
" on processors." << endl
383 <<
"Master has:" << masterNames << endl
384 << Pstream::myProcNo() <<
" has:" << objectNames
388 fields.
setSize(masterNames.size());
391 if (Pstream::master())
395 const word& name = masterNames[i];
397 io.
writeOpt() = IOobject::AUTO_WRITE;
400 fields.
set(i,
new GeoField(io, mesh));
403 if (subsetterPtr.
valid())
405 tmp<GeoField> tsubfld = subsetterPtr().interpolate(fields[i]);
408 for (label procI = 1; procI < Pstream::nProcs(); procI++)
410 if (!haveMesh[procI])
412 OPstream toProc(Pstream::blocking, procI);
419 else if (!haveMesh[Pstream::myProcNo()])
425 const word& name = masterNames[i];
428 IPstream fromMaster(Pstream::blocking, Pstream::masterNo());
457 const word& name = masterNames[i];
459 io.
writeOpt() = IOobject::AUTO_WRITE;
462 fields.
set(i,
new GeoField(io, mesh));
478 if (
mag(b[cellI] - a[cellI]) > tolDim)
483 "(const scalar, const volVectorField&, const volVectorField&)"
484 ) <<
"Did not map volVectorField correctly:" <<
nl
486 <<
" transfer b:" << b[cellI]
487 <<
" real cc:" << a[cellI]
506 if (
mag(aBoundary[i] - bBoundary[i]) > tolDim)
511 "(const scalar, const volVectorField&"
512 ", const volVectorField&)"
513 ) <<
"Did not map volVectorField correctly:"
515 <<
"patch:" << patchI <<
" patchFace:" << i
517 <<
" real :" << aBoundary[i] << endl
518 <<
" mapped :" << bBoundary[i] << endl
529 int main(
int argc,
char *argv[])
532 argList::validOptions.insert(
"mergeTol",
"relative merge distance");
537 if (!Pstream::master() && !
isDir(args.
path()))
539 Pout<<
"Creating case directory " << args.
path() <<
endl;
545 word regionName = polyMesh::defaultRegion;
550 meshSubDir = regionName/polyMesh::meshSubDir;
554 meshSubDir = polyMesh::meshSubDir;
556 Info<<
"Using mesh subdirectory " << meshSubDir <<
nl <<
endl;
563 if (Pstream::master())
565 masterInstDir = runTime.
findInstance(meshSubDir,
"points");
567 Pstream::scatter(masterInstDir);
570 const fileName meshPath = runTime.
path()/masterInstDir/meshSubDir;
572 Info<<
"Found points in " << meshPath <<
nl <<
endl;
575 boolList haveMesh(Pstream::nProcs(),
false);
576 haveMesh[Pstream::myProcNo()] =
isDir(meshPath);
577 Pstream::gatherList(haveMesh);
578 Pstream::scatterList(haveMesh);
579 Info<<
"Per processor mesh availability : " << haveMesh <<
endl;
580 const bool allHaveMesh = (
findIndex(haveMesh,
false) == -1);
588 haveMesh[Pstream::myProcNo()]
593 printMeshData(
Pout, mesh);
614 decompositionMethod::New
621 if (!decomposer().parallelAware())
624 <<
"You have selected decomposition method "
625 << decomposer().typeName
626 <<
" which does" << endl
627 <<
"not synchronise the decomposition across"
628 <<
" processor patches." << endl
629 <<
" You might want to select a decomposition method which"
630 <<
" is aware of this. Continuing."
634 finalDecomp = decomposer().decompose(mesh.
cellCentres());
638 writeDecomposition(
"decomposition", mesh, finalDecomp);
657 if (isA<processorPolyPatch>(patches[patchI]))
667 <<
"Cannot find non-processor patch on processor "
668 << Pstream::myProcNo() << endl
675 subsetterPtr().setLargeCellSubset(
labelHashSet(0), nonProcI,
false);
683 IOobjectList::iterator iter = objects.
find(
"decomposition");
684 if (iter != objects.end())
716 volSphereTensorFields
766 surfSphereTensorFields
795 const scalar tolDim = getMergeDistance
805 Pout<<
"Wanted distribution:"
806 << distributor.countCells(finalDecomp) <<
nl <<
endl;
816 Pout<<
"After distribution mesh:" <<
endl;
817 printMeshData(
Pout, mesh);
826 compareFields(tolDim, mesh.
C(), mapCc);
833 nFaces[Pstream::myProcNo()] = mesh.
nFaces();
834 Pstream::gatherList(nFaces);
835 Pstream::scatterList(nFaces);
838 <<
"You can pick up the redecomposed mesh from the polyMesh directory"
840 <<
"If you redecomposed the mesh to less processors you can delete"
842 <<
"the processor directories with 0 sized meshes in them." <<
nl
843 <<
"Below is a sample set of commands to do this."
844 <<
" Take care when issuing these" <<
nl
845 <<
"commands." <<
nl <<
endl;
851 if (nFaces[procI] == 0)
853 Info<<
" rm -r " << procDir.c_str() <<
nl;
860 Info<<
" rm -r " << constDir.c_str() <<
nl
861 <<
" mv " << timeDir.c_str()
862 <<
' ' << constDir.c_str() <<
nl;