GAMGAgglomerateLduAddressing.C
Go to the documentation of this file.
1/*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | www.openfoam.com
6 \\/ M anipulation |
7-------------------------------------------------------------------------------
8 Copyright (C) 2011-2017 OpenFOAM Foundation
9 Copyright (C) 2019-2022 OpenCFD Ltd.
10-------------------------------------------------------------------------------
11License
12 This file is part of OpenFOAM.
13
14 OpenFOAM is free software: you can redistribute it and/or modify it
15 under the terms of the GNU General Public License as published by
16 the Free Software Foundation, either version 3 of the License, or
17 (at your option) any later version.
18
19 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26
27\*---------------------------------------------------------------------------*/
28
29#include "GAMGAgglomeration.H"
30#include "GAMGInterface.H"
32#include "cyclicLduInterface.H"
33
34// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
35
37(
38 const label fineLevelIndex
39)
40{
41 const lduMesh& fineMesh = meshLevel(fineLevelIndex);
42 const lduAddressing& fineMeshAddr = fineMesh.lduAddr();
43
44 const labelUList& upperAddr = fineMeshAddr.upperAddr();
45 const labelUList& lowerAddr = fineMeshAddr.lowerAddr();
46
47 const label nFineFaces = upperAddr.size();
48
49 // Get restriction map for current level
50 const labelField& restrictMap = restrictAddressing(fineLevelIndex);
51
52 if (min(restrictMap) == -1)
53 {
55 << "min(restrictMap) == -1" << exit(FatalError);
56 }
57
58 if (restrictMap.size() != fineMeshAddr.size())
59 {
61 << "restrict map does not correspond to fine level. " << endl
62 << " Sizes: restrictMap: " << restrictMap.size()
63 << " nEqns: " << fineMeshAddr.size()
64 << abort(FatalError);
65 }
66
67
68 // Get the number of coarse cells
69 const label nCoarseCells = nCells_[fineLevelIndex];
70
71 // Storage for coarse cell neighbours and coefficients
72
73 // Guess initial maximum number of neighbours in coarse cell
74 label maxNnbrs = 10;
75
76 // Number of faces for each coarse-cell
77 labelList cCellnFaces(nCoarseCells, Zero);
78
79 // Setup initial packed storage for coarse-cell faces
80 labelList cCellFaces(maxNnbrs*nCoarseCells);
81
82 // Create face-restriction addressing
83 faceRestrictAddressing_.set(fineLevelIndex, new labelList(nFineFaces));
84 labelList& faceRestrictAddr = faceRestrictAddressing_[fineLevelIndex];
85
86 // Initial neighbour array (not in upper-triangle order)
87 labelList initCoarseNeighb(nFineFaces);
88
89 // Counter for coarse faces
90 label& nCoarseFaces = nFaces_[fineLevelIndex];
91 nCoarseFaces = 0;
92
93 // Loop through all fine faces
94 forAll(upperAddr, fineFacei)
95 {
96 label rmUpperAddr = restrictMap[upperAddr[fineFacei]];
97 label rmLowerAddr = restrictMap[lowerAddr[fineFacei]];
98
99 if (rmUpperAddr == rmLowerAddr)
100 {
101 // For each fine face inside of a coarse cell keep the address
102 // of the cell corresponding to the face in the faceRestrictAddr
103 // as a negative index
104 faceRestrictAddr[fineFacei] = -(rmUpperAddr + 1);
105 }
106 else
107 {
108 // this face is a part of a coarse face
109
110 label cOwn = rmUpperAddr;
111 label cNei = rmLowerAddr;
112
113 // get coarse owner and neighbour
114 if (rmUpperAddr > rmLowerAddr)
115 {
116 cOwn = rmLowerAddr;
117 cNei = rmUpperAddr;
118 }
119
120 // check the neighbour to see if this face has already been found
121 label* ccFaces = &cCellFaces[maxNnbrs*cOwn];
122
123 bool nbrFound = false;
124 label& ccnFaces = cCellnFaces[cOwn];
125
126 for (int i=0; i<ccnFaces; i++)
127 {
128 if (initCoarseNeighb[ccFaces[i]] == cNei)
129 {
130 nbrFound = true;
131 faceRestrictAddr[fineFacei] = ccFaces[i];
132 break;
133 }
134 }
135
136 if (!nbrFound)
137 {
138 if (ccnFaces >= maxNnbrs)
139 {
140 label oldMaxNnbrs = maxNnbrs;
141 maxNnbrs *= 2;
142
143 cCellFaces.setSize(maxNnbrs*nCoarseCells);
144
145 forAllReverse(cCellnFaces, i)
146 {
147 label* oldCcNbrs = &cCellFaces[oldMaxNnbrs*i];
148 label* newCcNbrs = &cCellFaces[maxNnbrs*i];
149
150 for (int j=0; j<cCellnFaces[i]; j++)
151 {
152 newCcNbrs[j] = oldCcNbrs[j];
153 }
154 }
155
156 ccFaces = &cCellFaces[maxNnbrs*cOwn];
157 }
158
159 ccFaces[ccnFaces] = nCoarseFaces;
160 initCoarseNeighb[nCoarseFaces] = cNei;
161 faceRestrictAddr[fineFacei] = nCoarseFaces;
162 ccnFaces++;
163
164 // new coarse face created
165 nCoarseFaces++;
166 }
167 }
168 } // end for all fine faces
169
170
171 // Renumber into upper-triangular order
172
173 // All coarse owner-neighbour storage
174 labelList coarseOwner(nCoarseFaces);
175 labelList coarseNeighbour(nCoarseFaces);
176 labelList coarseFaceMap(nCoarseFaces);
177
178 label coarseFacei = 0;
179
180 forAll(cCellnFaces, cci)
181 {
182 label* cFaces = &cCellFaces[maxNnbrs*cci];
183 label ccnFaces = cCellnFaces[cci];
184
185 for (int i=0; i<ccnFaces; i++)
186 {
187 coarseOwner[coarseFacei] = cci;
188 coarseNeighbour[coarseFacei] = initCoarseNeighb[cFaces[i]];
189 coarseFaceMap[cFaces[i]] = coarseFacei;
190 coarseFacei++;
191 }
192 }
193
194 forAll(faceRestrictAddr, fineFacei)
195 {
196 if (faceRestrictAddr[fineFacei] >= 0)
197 {
198 faceRestrictAddr[fineFacei] =
199 coarseFaceMap[faceRestrictAddr[fineFacei]];
200 }
201 }
202
203
204 // Create face-flip status
205 faceFlipMap_.set(fineLevelIndex, new boolList(nFineFaces, false));
206 boolList& faceFlipMap = faceFlipMap_[fineLevelIndex];
207
208
209 forAll(faceRestrictAddr, fineFacei)
210 {
211 label coarseFacei = faceRestrictAddr[fineFacei];
212
213 if (coarseFacei >= 0)
214 {
215 // Maps to coarse face
216 label cOwn = coarseOwner[coarseFacei];
217 label cNei = coarseNeighbour[coarseFacei];
218
219 label rmUpperAddr = restrictMap[upperAddr[fineFacei]];
220 label rmLowerAddr = restrictMap[lowerAddr[fineFacei]];
221
222 if (cOwn == rmUpperAddr && cNei == rmLowerAddr)
223 {
224 faceFlipMap[fineFacei] = true;
225 }
226 else if (cOwn == rmLowerAddr && cNei == rmUpperAddr)
227 {
228 //faceFlipMap[fineFacei] = false;
229 }
230 else
231 {
233 << "problem."
234 << " fineFacei:" << fineFacei
235 << " rmUpperAddr:" << rmUpperAddr
236 << " rmLowerAddr:" << rmLowerAddr
237 << " coarseFacei:" << coarseFacei
238 << " cOwn:" << cOwn
239 << " cNei:" << cNei
240 << exit(FatalError);
241 }
242 }
243 }
244
245
246
247 // Clear the temporary storage for the coarse cell data
248 cCellnFaces.setSize(0);
249 cCellFaces.setSize(0);
250 initCoarseNeighb.setSize(0);
251 coarseFaceMap.setSize(0);
252
253
254 // Create coarse-level interfaces
255
256 // Get reference to fine-level interfaces
257 const lduInterfacePtrsList& fineInterfaces = interfaceLevel(fineLevelIndex);
258
259 nPatchFaces_.set
260 (
261 fineLevelIndex,
262 new labelList(fineInterfaces.size(), Zero)
263 );
264 labelList& nPatchFaces = nPatchFaces_[fineLevelIndex];
265
267 (
268 fineLevelIndex,
269 new labelListList(fineInterfaces.size())
270 );
271 labelListList& patchFineToCoarse =
272 patchFaceRestrictAddressing_[fineLevelIndex];
273
274
275 const label nReq = Pstream::nRequests();
276
277 // Initialise transfer of restrict addressing on the interface
278 // The finest mesh uses patchAddr from the original lduAdressing.
279 // the coarser levels create their own adressing for faceCells
280 forAll(fineInterfaces, inti)
281 {
282 if (fineInterfaces.set(inti))
283 {
284 if (fineLevelIndex == 0)
285 {
286 fineInterfaces[inti].initInternalFieldTransfer
287 (
289 restrictMap,
290 fineMeshAddr.patchAddr(inti)
291 );
292 }
293 else
294 {
295 fineInterfaces[inti].initInternalFieldTransfer
296 (
298 restrictMap
299 );
300 }
301 }
302 }
303
304 if (Pstream::parRun())
305 {
307 }
308
309
310 // Add the coarse level
311 meshLevels_.set
312 (
313 fineLevelIndex,
315 (
316 nCoarseCells,
317 coarseOwner,
318 coarseNeighbour,
319 fineMesh.comm(),
320 true
321 )
322 );
323
324 lduInterfacePtrsList coarseInterfaces(fineInterfaces.size());
325
326 forAll(fineInterfaces, inti)
327 {
328 if (fineInterfaces.set(inti))
329 {
330 tmp<labelField> restrictMapInternalField;
331
332 // The finest mesh uses patchAddr from the original lduAdressing.
333 // the coarser levels create thei own adressing for faceCells
334 if (fineLevelIndex == 0)
335 {
336 restrictMapInternalField =
337 fineInterfaces[inti].interfaceInternalField
338 (
339 restrictMap,
340 fineMeshAddr.patchAddr(inti)
341 );
342 }
343 else
344 {
345 restrictMapInternalField =
346 fineInterfaces[inti].interfaceInternalField
347 (
348 restrictMap
349 );
350 }
351
352 tmp<labelField> nbrRestrictMapInternalField =
353 fineInterfaces[inti].internalFieldTransfer
354 (
356 restrictMap
357 );
358
359 coarseInterfaces.set
360 (
361 inti,
363 (
364 inti,
365 meshLevels_[fineLevelIndex].rawInterfaces(),
366 fineInterfaces[inti],
367 restrictMapInternalField(),
368 nbrRestrictMapInternalField(),
369 fineLevelIndex,
370 fineMesh.comm()
371 ).ptr()
372 );
373
374 /* Same as below:
375 coarseInterfaces.set
376 (
377 inti,
378 GAMGInterface::New
379 (
380 inti,
381 meshLevels_[fineLevelIndex].rawInterfaces(),
382 fineInterfaces[inti],
383 fineInterfaces[inti].interfaceInternalField(restrictMap),
384 fineInterfaces[inti].internalFieldTransfer
385 (
386 Pstream::commsTypes::nonBlocking,
387 restrictMap
388 ),
389 fineLevelIndex,
390 fineMesh.comm()
391 ).ptr()
392 );
393 */
394
395 nPatchFaces[inti] = coarseInterfaces[inti].faceCells().size();
396 patchFineToCoarse[inti] = refCast<const GAMGInterface>
397 (
398 coarseInterfaces[inti]
399 ).faceRestrictAddressing();
400 }
401 }
402
403 meshLevels_[fineLevelIndex].addInterfaces
404 (
405 coarseInterfaces,
406 lduPrimitiveMesh::nonBlockingSchedule<processorGAMGInterface>
407 (
408 coarseInterfaces
409 )
410 );
411
412
413 if (debug & 2)
414 {
415 Pout<< "GAMGAgglomeration :"
416 << " agglomerated level " << fineLevelIndex
417 << " from nCells:" << fineMeshAddr.size()
418 << " nFaces:" << upperAddr.size()
419 << " to nCells:" << nCoarseCells
420 << " nFaces:" << nCoarseFaces
421 << endl;
422 }
423}
424
425
427(
428 const label meshComm,
429 const labelList& procAgglomMap,
430 const labelList& procIDs,
431 const label allMeshComm,
432
433 const label levelIndex
434)
435{
436 const lduMesh& myMesh = meshLevels_[levelIndex-1];
437
438
439 procAgglomMap_.set(levelIndex, new labelList(procAgglomMap));
440 agglomProcIDs_.set(levelIndex, new labelList(procIDs));
441 procCommunicator_[levelIndex] = allMeshComm;
442
443 // These could only be set on the master procs but it is
444 // quite convenient to also have them on the slaves
445 procCellOffsets_.set(levelIndex, new labelList(0));
446 procFaceMap_.set(levelIndex, new labelListList(0));
447 procBoundaryMap_.set(levelIndex, new labelListList(0));
448 procBoundaryFaceMap_.set(levelIndex, new labelListListList(0));
449
450
451 // Collect meshes
452 PtrList<lduPrimitiveMesh> otherMeshes;
453 lduPrimitiveMesh::gather(meshComm, myMesh, procIDs, otherMeshes);
454
455 if (Pstream::myProcNo(meshComm) == procIDs[0])
456 {
457 // Combine all addressing
458
459 labelList procFaceOffsets;
460 meshLevels_.set
461 (
462 levelIndex-1,
464 (
465 allMeshComm,
466 procAgglomMap,
467
468 procIDs,
469 myMesh,
470 otherMeshes,
471
472 procCellOffsets_[levelIndex],
473 procFaceOffsets,
474 procFaceMap_[levelIndex],
475 procBoundaryMap_[levelIndex],
476 procBoundaryFaceMap_[levelIndex]
477 )
478 );
479 }
480
481
482 // Combine restrict addressing
483 // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
484
485 procAgglomerateRestrictAddressing
486 (
487 meshComm,
488 procIDs,
489 levelIndex
490 );
491
492 if (Pstream::myProcNo(meshComm) != procIDs[0])
493 {
494 clearLevel(levelIndex);
495 }
496}
497
498
500(
501 const label comm,
502 const labelList& procIDs,
503 const label levelIndex
504)
505{
506 // Collect number of cells
507 labelList nFineCells;
509 (
510 comm,
511 procIDs,
512 restrictAddressing_[levelIndex].size(),
513 nFineCells,
514
517 );
518 labelList fineOffsets(globalIndex::calcOffsets(nFineCells));
519
520 // Combine and renumber nCoarseCells
521 labelList nCoarseCells;
523 (
524 comm,
525 procIDs,
526 nCells_[levelIndex],
527 nCoarseCells,
528
531 );
532 labelList coarseOffsets(globalIndex::calcOffsets(nCoarseCells));
533
534 // (cell)restrictAddressing
535 labelList procRestrictAddressing;
537 (
538 fineOffsets,
539 comm,
540 procIDs,
541 restrictAddressing_[levelIndex],
542 procRestrictAddressing,
543
545 Pstream::commsTypes::nonBlocking //Pstream::commsTypes::scheduled
546 );
547
548
549 if (Pstream::myProcNo(comm) == procIDs[0])
550 {
551 nCells_[levelIndex] = coarseOffsets.last(); // ie, totalSize()
552
553 // Renumber consecutively
554 for (label proci = 1; proci < procIDs.size(); ++proci)
555 {
556 SubList<label> procSlot
557 (
558 procRestrictAddressing,
559 fineOffsets[proci+1]-fineOffsets[proci],
560 fineOffsets[proci]
561 );
562
563 // procSlot += coarseOffsets[proci];
564 forAll(procSlot, i)
565 {
566 procSlot[i] += coarseOffsets[proci];
567 }
568 }
569
570 restrictAddressing_[levelIndex].transfer(procRestrictAddressing);
571 }
572}
573
574
576{
577 label prevLevel = curLevel - 1;
578
579 // Set the previous level nCells to the current
580 nCells_[prevLevel] = nCells_[curLevel];
581 nFaces_[prevLevel] = nFaces_[curLevel];
582
583 // Map the restrictAddressing from the coarser level into the previous
584 // finer level
585
586 const labelList& curResAddr = restrictAddressing_[curLevel];
587 labelList& prevResAddr = restrictAddressing_[prevLevel];
588
589 const labelList& curFaceResAddr = faceRestrictAddressing_[curLevel];
590 labelList& prevFaceResAddr = faceRestrictAddressing_[prevLevel];
591 const boolList& curFaceFlipMap = faceFlipMap_[curLevel];
592 boolList& prevFaceFlipMap = faceFlipMap_[prevLevel];
593
594 forAll(prevFaceResAddr, i)
595 {
596 if (prevFaceResAddr[i] >= 0)
597 {
598 label fineFacei = prevFaceResAddr[i];
599 prevFaceResAddr[i] = curFaceResAddr[fineFacei];
600 prevFaceFlipMap[i] = curFaceFlipMap[fineFacei];
601 }
602 else
603 {
604 label fineFacei = -prevFaceResAddr[i] - 1;
605 prevFaceResAddr[i] = -curResAddr[fineFacei] - 1;
606 prevFaceFlipMap[i] = curFaceFlipMap[fineFacei];
607 }
608 }
609
610 // Delete the restrictAddressing for the coarser level
611 faceRestrictAddressing_.set(curLevel, nullptr);
612 faceFlipMap_.set(curLevel, nullptr);
613
614 forAll(prevResAddr, i)
615 {
616 prevResAddr[i] = curResAddr[prevResAddr[i]];
617 }
618
619 const labelListList& curPatchFaceResAddr =
620 patchFaceRestrictAddressing_[curLevel];
621 labelListList& prevPatchFaceResAddr =
622 patchFaceRestrictAddressing_[prevLevel];
623
624 forAll(prevPatchFaceResAddr, inti)
625 {
626 const labelList& curResAddr = curPatchFaceResAddr[inti];
627 labelList& prevResAddr = prevPatchFaceResAddr[inti];
628 forAll(prevResAddr, i)
629 {
630 label fineFacei = prevResAddr[i];
631 prevResAddr[i] = curResAddr[fineFacei];
632 }
633 }
634
635 // Delete the restrictAddressing for the coarser level
636 restrictAddressing_.set(curLevel, nullptr);
637
638 // Patch faces
639 nPatchFaces_[prevLevel] = nPatchFaces_[curLevel];
640
641
642
643 // Adapt the restrict addressing for the patches
644 const lduInterfacePtrsList& curInterLevel =
645 meshLevels_[curLevel].rawInterfaces();
646 const lduInterfacePtrsList& prevInterLevel =
647 meshLevels_[prevLevel].rawInterfaces();
648
649 forAll(prevInterLevel, inti)
650 {
651 if (prevInterLevel.set(inti))
652 {
653 GAMGInterface& prevInt = refCast<GAMGInterface>
654 (
655 const_cast<lduInterface&>
656 (
657 prevInterLevel[inti]
658 )
659 );
660 const GAMGInterface& curInt = refCast<const GAMGInterface>
661 (
662 curInterLevel[inti]
663 );
664 prevInt.combine(curInt);
665 }
666 }
667
668 // Delete the matrix addressing and coefficients from the previous level
669 // and replace with the corresponding entry from the coarser level
670 meshLevels_.set(prevLevel, meshLevels_.set(curLevel, nullptr));
671}
672
673
675(
676 const label comm,
677 const labelList& procAgglomMap,
678 labelList& masterProcs,
679 List<label>& agglomProcIDs
680)
681{
682 // Determine the master processors
683 Map<label> agglomToMaster(procAgglomMap.size());
684
685 forAll(procAgglomMap, proci)
686 {
687 const label coarsei = procAgglomMap[proci];
688
689 auto iter = agglomToMaster.find(coarsei);
690 if (iter.found())
691 {
692 iter.val() = min(iter.val(), proci);
693 }
694 else
695 {
696 agglomToMaster.insert(coarsei, proci);
697 }
698 }
699
700 masterProcs.setSize(agglomToMaster.size());
701 forAllConstIters(agglomToMaster, iter)
702 {
703 masterProcs[iter.key()] = iter.val();
704 }
705
706
707 // Collect all the processors in my agglomeration
708 label myProcID = Pstream::myProcNo(comm);
709 label myAgglom = procAgglomMap[myProcID];
710
711 // Get all processors agglomerating to the same coarse
712 // processor
713 agglomProcIDs = findIndices(procAgglomMap, myAgglom);
714
715 // Make sure the master is the first element.
716 const label index =
717 agglomProcIDs.find(agglomToMaster[myAgglom]);
718
719 std::swap(agglomProcIDs[0], agglomProcIDs[index]);
720}
721
722
723// ************************************************************************* //
const labelList & nPatchFaces(const label leveli) const
void procAgglomerateRestrictAddressing(const label comm, const labelList &procIDs, const label levelIndex)
Collect and combine basic restriction addressing:
void agglomerateLduAddressing(const label fineLevelIndex)
Assemble coarse mesh addressing.
static void calculateRegionMaster(const label comm, const labelList &procAgglomMap, labelList &masterProcs, List< label > &agglomProcIDs)
Given fine to coarse processor map determine:
PtrList< labelListList > patchFaceRestrictAddressing_
Patch-local face restriction addressing array.
PtrList< boolList > faceFlipMap_
Face flip: for faces mapped to internal faces stores whether.
PtrList< labelList > nPatchFaces_
The number of (coarse) patch faces in each level.
void procAgglomerateLduAddressing(const label comm, const labelList &procAgglomMap, const labelList &procIDs, const label allMeshComm, const label levelIndex)
Collect and combine processor meshes into allMesh:
labelList nFaces_
The number of (coarse) faces in each level.
const boolList & faceFlipMap(const label leveli) const
Return face flip map of given level.
PtrList< lduPrimitiveMesh > meshLevels_
Hierarchy of mesh addressing.
labelList nCells_
The number of cells in each level.
void combineLevels(const label curLevel)
Combine a level with the previous one.
PtrList< labelList > faceRestrictAddressing_
Face restriction addressing array.
const lduInterfacePtrsList & interfaceLevel(const label leveli) const
Return LDU interface addressing of given level.
const labelField & restrictAddressing(const label leveli) const
Return cell restrict addressing of given level.
const lduMesh & meshLevel(const label leveli) const
Return LDU mesh of given level.
Abstract base class for GAMG agglomerated interfaces.
Definition: GAMGInterface.H:57
void combine(const GAMGInterface &)
Merge the next level with this level.
Definition: GAMGInterface.C:59
bool insert(const Key &key, const T &obj)
Copy insert a new entry, not overwriting existing entries.
Definition: HashTableI.H:180
label size() const noexcept
The number of elements in table.
Definition: HashTableI.H:52
iterator find(const Key &key)
Find and return an iterator set at the hashed entry.
Definition: HashTableI.H:114
std::enable_if< std::is_same< bool, TypeT >::value, bool >::type set(const label i, bool val=true)
A bitSet::set() method for a list of bool.
Definition: List.H:330
void transfer(List< T > &list)
Definition: List.C:447
void setSize(const label n)
Alias for resize()
Definition: List.H:218
A HashTable to objects of type <T> with a label key.
Definition: Map.H:60
A list of pointers to objects of type <T>, with allocation/deallocation management of the pointers....
Definition: PtrList.H:73
A List obtained as a section of another List.
Definition: SubList.H:70
static autoPtr< Time > New()
Construct (dummy) Time - no functionObjects or libraries.
Definition: Time.C:717
label find(const T &val, label pos=0) const
Find index of the first occurrence of the value.
Definition: UList.C:212
void size(const label n)
Older name for setAddressableSize.
Definition: UList.H:114
T & last()
Return the last element of the list.
Definition: UListI.H:216
@ nonBlocking
"nonBlocking"
static int & msgType() noexcept
Message tag of standard messages.
Definition: UPstream.H:556
static label nRequests()
Get number of outstanding requests.
Definition: UPstream.C:90
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
Definition: UPstream.C:100
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:433
const T * set(const label i) const
Definition: UPtrList.H:248
label size() const noexcept
The number of elements in the list.
Definition: UPtrListI.H:106
static void gather(const labelUList &offsets, const label comm, const ProcIDsContainer &procIDs, const UList< Type > &fld, List< Type > &allFld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Collect data in processor order on master (== procIDs[0]).
static void gatherValues(const label comm, const ProcIDsContainer &procIDs, const Type &localValue, List< Type > &allValues, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Which processor does global id come from?
The class contains the addressing required by the lduMatrix: upper, lower and losort.
virtual const labelUList & upperAddr() const =0
Return upper addressing.
label size() const
Return number of equations.
virtual const labelUList & lowerAddr() const =0
Return lower addressing.
virtual const labelUList & patchAddr(const label patchNo) const =0
Return patch to internal addressing given patch number.
An abstract base class for implicitly-coupled interfaces e.g. processor and cyclic patches.
Definition: lduInterface.H:58
Abstract base class for meshes which provide LDU addressing for the construction of lduMatrix and LDU...
Definition: lduMesh.H:63
virtual label comm() const =0
Return communicator used for parallel communication.
virtual const lduAddressing & lduAddr() const =0
Return ldu addressing.
Simplest concrete lduMesh that stores the addressing needed by lduMatrix.
static void gather(const label comm, const lduMesh &mesh, const labelList &procIDs, PtrList< lduPrimitiveMesh > &otherMeshes)
Gather meshes from other processors onto procIDs[0].
int myProcNo() const noexcept
Return processor number.
A class for managing temporary objects.
Definition: tmp.H:65
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:453
List< label > labelList
A List of labels.
Definition: List.H:66
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:372
List< labelList > labelListList
A List of labelList.
Definition: labelList.H:56
label min(const labelHashSet &set, label minValue=labelMax)
Find the min value in labelHashSet, optionally limited by second argument.
Definition: hashSets.C:33
List< labelListList > labelListListList
A List of labelListList.
Definition: labelList.H:57
errorManip< error > abort(error &err)
Definition: errorManip.H:144
List< bool > boolList
A List of bools.
Definition: List.H:64
static constexpr const zero Zero
Global zero (0)
Definition: zero.H:131
error FatalError
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.
labelList findIndices(const ListType &input, typename ListType::const_reference val, label start=0)
Linear search to find all occurrences of given element.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:130
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:333
#define forAllReverse(list, i)
Reverse loop across all elements in list.
Definition: stdFoam.H:346
#define forAllConstIters(container, iter)
Iterate across all elements of the container object with const access.
Definition: stdFoam.H:278