globalIndexTemplates.C
Go to the documentation of this file.
1/*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | www.openfoam.com
6 \\/ M anipulation |
7-------------------------------------------------------------------------------
8 Copyright (C) 2013-2017 OpenFOAM Foundation
9 Copyright (C) 2019-2022 OpenCFD Ltd.
10-------------------------------------------------------------------------------
11License
12 This file is part of OpenFOAM.
13
14 OpenFOAM is free software: you can redistribute it and/or modify it
15 under the terms of the GNU General Public License as published by
16 the Free Software Foundation, either version 3 of the License, or
17 (at your option) any later version.
18
19 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26
27\*---------------------------------------------------------------------------*/
28
29#include "globalIndex.H"
30
31// * * * * * * * * * * * * * Static Member Functions * * * * * * * * * * * * //
32
33template<class SubListType>
35Foam::globalIndex::calcListOffsets
36(
37 const List<SubListType>& lists,
38 const bool checkOverflow
39)
40{
42
43 const label len = lists.size();
44
45 if (len)
46 {
47 values.resize(len+1);
48
49 label start = 0;
50 for (label i = 0; i < len; ++i)
51 {
52 values[i] = start;
53 start += lists[i].size();
54
55 if (checkOverflow && start < values[i])
56 {
57 reportOverflowAndExit(i);
58 }
59 }
60 values[len] = start;
61 }
62
63 return values;
64}
65
66
67template<class ProcIDsContainer, class Type>
69(
70 const label comm,
71 const ProcIDsContainer& procIDs,
72 const Type& localValue,
73 List<Type>& allValues,
74 const int tag,
75 const UPstream::commsTypes preferredCommsType
76)
77{
78 // low-level: no parRun guard
79
80 // Automatically change from nonBlocking to scheduled for
81 // non-contiguous data.
82 const UPstream::commsTypes commsType =
83 (
84 (
86 && UPstream::commsTypes::nonBlocking == preferredCommsType
87 )
89 : preferredCommsType
90 );
91
92 const label startOfRequests = UPstream::nRequests();
93
94 if (UPstream::myProcNo(comm) == procIDs[0])
95 {
96 allValues.resize_nocopy(procIDs.size());
97 allValues[0] = localValue;
98
99 for (label i = 1; i < procIDs.size(); ++i)
100 {
102 {
104 (
105 commsType,
106 procIDs[i],
107 reinterpret_cast<char*>(&allValues[i]),
108 sizeof(Type),
109 tag,
110 comm
111 );
112 }
113 else
114 {
115 IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
116 fromProc >> allValues[i];
117 }
118 }
119 }
120 else
121 {
122 allValues.clear(); // safety: zero-size on non-master
123
125 {
127 (
128 commsType,
129 procIDs[0],
130 reinterpret_cast<const char*>(&localValue),
131 sizeof(Type),
132 tag,
133 comm
134 );
135 }
136 else
137 {
138 OPstream toMaster(commsType, procIDs[0], 0, tag, comm);
139 toMaster << localValue;
140 }
141 }
142
143 if (commsType == UPstream::commsTypes::nonBlocking)
144 {
145 // Wait for all to finish
146 UPstream::waitRequests(startOfRequests);
147 }
148}
149
150
151template<class ProcIDsContainer, class Type>
153(
154 const labelUList& off, // needed on master only
155 const label comm,
156 const ProcIDsContainer& procIDs,
157 const UList<Type>& fld,
158 List<Type>& allFld,
159 const int tag,
160 const UPstream::commsTypes preferredCommsType
161)
162{
163 // low-level: no parRun guard
164
165 // Automatically change from nonBlocking to scheduled for
166 // non-contiguous data.
167 const UPstream::commsTypes commsType =
168 (
169 (
171 && UPstream::commsTypes::nonBlocking == preferredCommsType
172 )
174 : preferredCommsType
175 );
176
177 const label startOfRequests = UPstream::nRequests();
178
179 if (Pstream::myProcNo(comm) == procIDs[0])
180 {
181 allFld.resize_nocopy(off.last()); // == totalSize()
182
183 // Assign my local data - respect offset information
184 // so that we can request 0 entries to be copied.
185 // Also handle the case where we have a slice of the full
186 // list.
187
188 SubList<Type>(allFld, off[1]-off[0], off[0]) =
189 SubList<Type>(fld, off[1]-off[0]);
190
191 for (label i = 1; i < procIDs.size(); ++i)
192 {
193 SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
194
195 if (procSlot.empty())
196 {
197 // Nothing to do
198 }
200 {
202 (
203 commsType,
204 procIDs[i],
205 procSlot.data_bytes(),
206 procSlot.size_bytes(),
207 tag,
208 comm
209 );
210 }
211 else
212 {
213 IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
214 fromProc >> procSlot;
215 }
216 }
217 }
218 else
219 {
220 if (fld.empty())
221 {
222 // Nothing to do
223 }
225 {
227 (
228 commsType,
229 procIDs[0],
230 fld.cdata_bytes(),
231 fld.size_bytes(),
232 tag,
233 comm
234 );
235 }
236 else
237 {
238 OPstream toMaster(commsType, procIDs[0], 0, tag, comm);
239 toMaster << fld;
240 }
241 }
242
243 if (commsType == UPstream::commsTypes::nonBlocking)
244 {
245 // Wait for all to finish
246 UPstream::waitRequests(startOfRequests);
247 }
248}
249
250
251template<class Type, class Addr>
253(
254 const labelUList& off, // needed on master only
255 const label comm,
256 const UList<int>& procIDs,
258 List<Type>& allFld,
259 const int tag,
260 const UPstream::commsTypes preferredCommsType
261)
262{
263 // low-level: no parRun guard
264
266 {
267 // Flatten list (locally) so that we can benefit from using direct
268 // read/write of contiguous data
269
270 gather
271 (
272 off,
273 comm,
274 procIDs,
276 allFld,
277 tag,
278 preferredCommsType
279 );
280 return;
281 }
282
283 // Automatically change from nonBlocking to scheduled for
284 // non-contiguous data.
285 const UPstream::commsTypes commsType =
286 (
287 (
289 && UPstream::commsTypes::nonBlocking == preferredCommsType
290 )
292 : preferredCommsType
293 );
294
295 const label startOfRequests = UPstream::nRequests();
296
297 if (Pstream::myProcNo(comm) == procIDs[0])
298 {
299 allFld.resize_nocopy(off.last()); // == totalSize()
300
301 // Assign my local data - respect offset information
302 // so that we can request 0 entries to be copied
303
304 SubList<Type> localSlot(allFld, off[1]-off[0], off[0]);
305 if (!localSlot.empty())
306 {
307 localSlot = fld;
308 }
309
310 // Already verified commsType != nonBlocking
311 for (label i = 1; i < procIDs.size(); ++i)
312 {
313 SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
314
315 if (procSlot.empty())
316 {
317 // Nothing to do
318 }
319 else
320 {
321 IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
322 fromProc >> procSlot;
323 }
324 }
325 }
326 else
327 {
328 if (fld.empty())
329 {
330 // Nothing to do
331 }
332 else
333 {
334 OPstream toMaster(commsType, procIDs[0], 0, tag, comm);
335 toMaster << fld;
336 }
337 }
338
339 if (commsType == UPstream::commsTypes::nonBlocking)
340 {
341 // Wait for all to finish
342 UPstream::waitRequests(startOfRequests);
343 }
344}
345
346
347// * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
348
349template<class Type>
351(
352 const UList<Type>& sendData,
353 List<Type>& allData,
354 const int tag,
355 const UPstream::commsTypes commsType,
356 const label comm
357) const
358{
359 if (!UPstream::parRun())
360 {
361 // Serial: direct copy
362 allData = sendData;
363 return;
364 }
365
366 {
368 (
369 offsets_, // needed on master only
370 comm,
371 UPstream::procID(comm),
372 sendData,
373 allData,
374 tag,
375 commsType
376 );
377 if (!UPstream::master(comm))
378 {
379 allData.clear(); // safety: zero-size on non-master
380 }
381 }
382}
383
384
385template<class Type, class Addr>
387(
388 const IndirectListBase<Type, Addr>& sendData,
389 List<Type>& allData,
390 const int tag,
391 const UPstream::commsTypes commsType,
392 const label comm
393) const
394{
395 if (!UPstream::parRun())
396 {
397 // Serial: direct copy
398 allData = sendData;
399 return;
400 }
401
402 {
404 (
405 offsets_, // needed on master only
406 comm,
407 UPstream::procID(comm),
408 sendData,
409 allData,
410 tag,
411 commsType
412 );
413 if (!UPstream::master(comm))
414 {
415 allData.clear(); // safety: zero-size on non-master
416 }
417 }
418}
419
420
421template<class Type, class OutputContainer>
423(
424 const UList<Type>& sendData,
425 const int tag,
426 const UPstream::commsTypes commsType,
427 const label comm
428) const
429{
430 OutputContainer allData;
431 gather(sendData, allData, tag, commsType, comm);
432 return allData;
433}
434
435
436template<class Type, class Addr, class OutputContainer>
438(
439 const IndirectListBase<Type, Addr>& sendData,
440 const int tag,
441 const UPstream::commsTypes commsType,
442 const label comm
443) const
444{
445 OutputContainer allData;
446 gather(sendData, allData, tag, commsType, comm);
447 return allData;
448}
449
450
451template<class Type>
453(
455 const int tag,
456 const UPstream::commsTypes commsType,
457 const label comm
458) const
459{
460 if (UPstream::parRun())
461 {
462 List<Type> allData;
463 gather(fld, allData, tag, commsType, comm);
464
465 if (UPstream::master(comm))
466 {
467 fld.transfer(allData);
468 }
469 else
470 {
471 fld.clear(); // zero-size on non-master
472 }
473 }
474 // Serial: (no-op)
475}
476
477
478template<class Type, class OutputContainer>
480(
481 const UList<Type>& sendData,
482 OutputContainer& allData,
483 const label comm,
484
485 const UPstream::commsTypes commsType,
486 const int tag
487) const
488{
489 if (!UPstream::parRun())
490 {
491 // Serial: direct copy
492 allData = sendData;
493 return;
494 }
495
496 // MPI_Gatherv requires contiguous data, but a byte-wise transfer can
497 // quickly exceed the 'int' limits used for MPI sizes/offsets.
498 // Thus gather label/scalar components when possible to increase the
499 // effective size limit.
500 //
501 // Note: cannot rely on pTraits (cmptType, nComponents) since this method
502 // needs to compile (and work) even with things like strings etc.
503
504 // Single char ad hoc "enum":
505 // - b(yte): gather bytes
506 // - f(loat): gather scalars components
507 // - i(nt): gather label components
508 // - 0: gather with Pstream read/write etc.
509
510 List<int> recvCounts;
511 List<int> recvOffsets;
512
513 char dataMode(0);
514 int nCmpts(0);
515
517 {
519 {
520 dataMode = 'f';
521 nCmpts = static_cast<int>(sizeof(Type)/sizeof(scalar));
522 }
524 {
525 dataMode = 'i';
526 nCmpts = static_cast<int>(sizeof(Type)/sizeof(label));
527 }
528 else
529 {
530 dataMode = 'b';
531 nCmpts = static_cast<int>(sizeof(Type));
532 }
533
534 // Offsets must fit into int
535 if (UPstream::master(comm))
536 {
537 const globalIndex& globalAddr = *this;
538
539 if (globalAddr.totalSize() > (INT_MAX/nCmpts))
540 {
541 // Offsets do not fit into int - revert to manual.
542 dataMode = 0;
543 }
544 else
545 {
546 // Must be same as Pstream::nProcs(comm), at least on master!
547 const label nproc = globalAddr.nProcs();
548
549 allData.resize_nocopy(globalAddr.totalSize());
550
551 recvCounts.resize(nproc);
552 recvOffsets.resize(nproc+1);
553
554 for (label proci = 0; proci < nproc; ++proci)
555 {
556 recvCounts[proci] = globalAddr.localSize(proci)*nCmpts;
557 recvOffsets[proci] = globalAddr.localStart(proci)*nCmpts;
558 }
559 recvOffsets[nproc] = globalAddr.totalSize()*nCmpts;
560
561 // Assign local data directly
562
563 recvCounts[0] = 0; // ie, ignore for MPI_Gatherv
564 SubList<Type>(allData, globalAddr.range(0)) =
565 SubList<Type>(sendData, globalAddr.range(0));
566 }
567 }
568
569 // Consistent information for everyone
570 UPstream::broadcast(&dataMode, 1, comm);
571 }
572
573 // Dispatch
574 switch (dataMode)
575 {
576 case 'b': // Byte-wise
577 {
579 (
580 sendData.cdata_bytes(),
581 sendData.size_bytes(),
582 allData.data_bytes(),
583 recvCounts,
584 recvOffsets,
585 comm
586 );
587 break;
588 }
589 case 'f': // Float (scalar) components
590 {
591 typedef scalar cmptType;
592
594 (
595 reinterpret_cast<const cmptType*>(sendData.cdata()),
596 (sendData.size()*nCmpts),
597 reinterpret_cast<cmptType*>(allData.data()),
598 recvCounts,
599 recvOffsets,
600 comm
601 );
602 break;
603 }
604 case 'i': // Int (label) components
605 {
606 typedef label cmptType;
607
609 (
610 reinterpret_cast<const cmptType*>(sendData.cdata()),
611 (sendData.size()*nCmpts),
612 reinterpret_cast<cmptType*>(allData.data()),
613 recvCounts,
614 recvOffsets,
615 comm
616 );
617 break;
618 }
619 default: // Regular (manual) gathering
620 {
622 (
623 offsets_, // needed on master only
624 comm,
625 UPstream::procID(comm),
626 sendData,
627 allData,
628 tag,
629 commsType
630 );
631 break;
632 }
633 }
634
635 if (!UPstream::master(comm))
636 {
637 allData.clear(); // safety: zero-size on non-master
638 }
639}
640
641
642template<class Type, class OutputContainer>
644(
645 const UList<Type>& sendData,
646 const label comm,
647
648 const UPstream::commsTypes commsType,
649 const int tag
650) const
651{
652 OutputContainer allData;
653 mpiGather(sendData, allData, comm, commsType, tag);
654 return allData;
655}
656
657
658template<class Type>
660(
662 const label comm,
663
664 const UPstream::commsTypes commsType,
665 const int tag
666) const
667{
668 if (UPstream::parRun())
669 {
670 List<Type> allData;
671 mpiGather(fld, allData, comm, commsType, tag);
672
673 if (UPstream::master(comm))
674 {
675 fld.transfer(allData);
676 }
677 else
678 {
679 fld.clear(); // zero-size on non-master
680 }
681 }
682 // Serial: (no-op)
683}
684
685
686template<class Type, class OutputContainer>
688(
689 const UList<Type>& sendData,
690 OutputContainer& allData,
691 const label comm,
692
693 const UPstream::commsTypes commsType,
694 const int tag
695)
696{
697 if (UPstream::parRun())
698 {
699 // Gather sizes - only needed on master
700 globalIndex(sendData.size(), globalIndex::gatherOnly{}, comm)
701 .mpiGather(sendData, allData, comm, commsType, tag);
702 }
703 else
704 {
705 // Serial: direct copy
706 allData = sendData;
707 }
708}
709
710
711template<class Type, class OutputContainer>
713(
714 const UList<Type>& sendData,
715 const label comm,
716
717 const UPstream::commsTypes commsType,
718 const int tag
719)
720{
721 OutputContainer allData;
722 mpiGatherOp(sendData, allData, comm, commsType, tag);
723 return allData;
724}
725
726
727template<class Type>
729(
731 const label comm,
732
733 const UPstream::commsTypes commsType,
734 const int tag
735)
736{
737 if (UPstream::parRun())
738 {
739 List<Type> allData;
740 mpiGatherOp(fld, allData, comm, commsType, tag);
741
742 if (UPstream::master(comm))
743 {
744 fld.transfer(allData);
745 }
746 else
747 {
748 fld.clear(); // zero-size on non-master
749 }
750 }
751 // Serial: (no-op)
752}
753
754
755template<class Type>
757(
758 const UList<Type>& sendData,
759 List<Type>& allData,
760 const int tag,
761 const UPstream::commsTypes commsType,
762 const label comm
763)
764{
765 if (UPstream::parRun())
766 {
767 // Gather sizes - only needed on master
768 globalIndex(sendData.size(), globalIndex::gatherOnly{}, comm)
769 .gather(sendData, allData, tag, commsType, comm);
770 }
771 else
772 {
773 // Serial: direct copy
774 allData = sendData;
775 }
776}
777
778
779template<class Type, class Addr>
781(
782 const IndirectListBase<Type, Addr>& sendData,
783 List<Type>& allData,
784 const int tag,
785 const UPstream::commsTypes commsType,
786 const label comm
787)
788{
789 if (UPstream::parRun())
790 {
791 // Gather sizes - only needed on master
792 globalIndex(sendData.size(), globalIndex::gatherOnly{}, comm)
793 .gather(sendData, allData, tag, commsType, comm);
794 }
795 else
796 {
797 // Serial: direct copy
798 allData = List<Type>(sendData);
799 }
800}
801
802
803template<class Type, class OutputContainer>
805(
806 const UList<Type>& sendData,
807 const int tag,
808 const UPstream::commsTypes commsType,
809 const label comm
810)
811{
812 OutputContainer allData;
813 gatherOp(sendData, allData, tag, commsType, comm);
814 return allData;
815}
816
817
818template<class Type, class Addr, class OutputContainer>
820(
821 const IndirectListBase<Type, Addr>& sendData,
822 const int tag,
823 const UPstream::commsTypes commsType,
824 const label comm
825)
826{
827 OutputContainer allData;
828 gatherOp(sendData, allData, tag, commsType, comm);
829 return allData;
830}
831
832
833template<class Type>
835(
837 const int tag,
838 const UPstream::commsTypes commsType,
839 const label comm
840)
841{
842 if (UPstream::parRun())
843 {
844 // Gather sizes - only needed on master
846 .gather(fld, tag, commsType, comm);
847 }
848 // Serial: (no-op)
849}
850
851
852template<class ProcIDsContainer, class Type>
854(
855 const labelUList& off, // needed on master only
856 const label comm,
857 const ProcIDsContainer& procIDs,
858 const UList<Type>& allFld,
860 const int tag,
861 const UPstream::commsTypes preferredCommsType
862)
863{
864 // low-level: no parRun guard
865
866 // Automatically change from nonBlocking to scheduled for
867 // non-contiguous data.
868 const UPstream::commsTypes commsType =
869 (
870 (
872 && UPstream::commsTypes::nonBlocking == preferredCommsType
873 )
875 : preferredCommsType
876 );
877
878 const label startOfRequests = UPstream::nRequests();
879
880 if (Pstream::myProcNo(comm) == procIDs[0])
881 {
882 for (label i = 1; i < procIDs.size(); ++i)
883 {
884 const SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
885
886 if (procSlot.empty())
887 {
888 // Nothing to do
889 }
891 {
893 (
894 commsType,
895 procIDs[i],
896 procSlot.cdata_bytes(),
897 procSlot.size_bytes(),
898 tag,
899 comm
900 );
901 }
902 else
903 {
904 OPstream toProc(commsType, procIDs[i], 0, tag, comm);
905 toProc << procSlot;
906 }
907 }
908
909 // Assign my local data - respect offset information
910 // so that we can request 0 entries to be copied.
911 // Also handle the case where we have a slice of the full
912 // list.
913
914 SubList<Type>(fld, off[1]-off[0]) =
915 SubList<Type>(allFld, off[1]-off[0], off[0]);
916 }
917 else
918 {
919 // Note: we are receiving into UList, so sizes MUST match or we
920 // have a problem. Can therefore reasonably assume that a zero-sized
921 // send matches a zero-sized receive, and we can skip that.
922
923 if (fld.empty())
924 {
925 // Nothing to do
926 }
928 {
930 (
931 commsType,
932 procIDs[0],
933 fld.data_bytes(),
934 fld.size_bytes(),
935 tag,
936 comm
937 );
938 }
939 else
940 {
941 IPstream fromMaster(commsType, procIDs[0], 0, tag, comm);
942 fromMaster >> fld;
943 }
944 }
945
946 if (commsType == UPstream::commsTypes::nonBlocking)
947 {
948 // Wait for all to finish
949 UPstream::waitRequests(startOfRequests);
950 }
951}
952
953
954template<class Type>
956(
957 const UList<Type>& allData,
958 UList<Type>& localData,
959 const int tag,
960 const UPstream::commsTypes commsType,
961 const label comm
962) const
963{
964 if (UPstream::parRun())
965 {
966 scatter
967 (
968 offsets_, // needed on master only
969 comm,
970 UPstream::procID(comm),
971 allData,
972 localData,
973 tag,
974 commsType
975 );
976 }
977 else
978 {
979 // Serial: direct copy
980 // - fails miserably if incorrectly dimensioned!
981 localData.deepCopy(allData);
982 }
983}
984
985
986template<class Type, class OutputContainer>
988(
989 const UList<Type>& allData,
990 const int tag,
991 const UPstream::commsTypes commsType,
992 const label comm
993) const
994{
995 if (UPstream::parRun())
996 {
997 // The globalIndex might be correct on master only,
998 // so scatter local sizes to ensure consistency
999
1000 const label localLen
1001 (
1002 UPstream::listScatterValues<label>(this->localSizes(), comm)
1003 );
1004
1005 OutputContainer localData(localLen);
1006 this->scatter(allData, localData, tag, commsType, comm);
1007
1008 return localData;
1009 }
1010 else
1011 {
1012 // Serial: direct copy
1013 return OutputContainer(allData);
1014 }
1015}
1016
1017
1018template<class Type, class CombineOp>
1020(
1021 List<Type>& allFld,
1022 const labelUList& globalIds,
1023 const CombineOp& cop,
1024 const label comm,
1025 const int tag
1026) const
1027{
1028 allFld.resize_nocopy(globalIds.size());
1029
1030 if (globalIds.size())
1031 {
1032 // Sort according to processor
1033 labelList order;
1035
1037 (
1038 bin(offsets(), globalIds, order, validBins)
1039 );
1040
1041 // Send local indices to individual processors as local index
1043
1044 for (const auto proci : validBins)
1045 {
1046 labelList localIDs(bins[proci]);
1047
1048 for (label& val : localIDs)
1049 {
1050 val = toLocal(proci, val);
1051 }
1052
1053 UOPstream os(proci, sendBufs);
1054 os << localIDs;
1055 }
1056 sendBufs.finishedSends();
1057
1058
1060
1061 for (const int proci : sendBufs.allProcs())
1062 {
1063 if (sendBufs.recvDataCount(proci))
1064 {
1065 UIPstream is(proci, sendBufs);
1066 labelList localIDs(is);
1067
1068 // Collect entries
1069 List<Type> fld(localIDs.size());
1070 cop(fld, localIDs);
1071
1072 UOPstream os(proci, returnBufs);
1073 os << fld;
1074 }
1075 }
1076 returnBufs.finishedSends();
1077
1078 // Slot back
1079 for (const auto proci : validBins)
1080 {
1081 label start = bins.offsets()[proci];
1082 const SubList<label> es
1083 (
1084 order,
1085 bins.offsets()[proci+1]-start, // start
1086 start
1087 );
1088 UIPstream is(proci, returnBufs);
1089 List<Type> fld(is);
1090
1091 UIndirectList<Type>(allFld, es) = fld;
1092 }
1093 }
1094}
1095
1096
1097// ************************************************************************* //
Info<< nl<< "Wrote faMesh in vtk format: "<< writer.output().name()<< nl;}{ vtk::lineWriter writer(aMesh.points(), aMesh.edges(), fileName(aMesh.mesh().time().globalPath()/"finiteArea-edges"));writer.writeGeometry();writer.beginCellData(4);writer.writeProcIDs();{ Field< scalar > fld(faMeshTools::flattenEdgeField(aMesh.magLe(), true))
A packed storage unstructured matrix of objects of type <T> using an offset table for access.
const labelList & offsets() const noexcept
Return the offset table (= size()+1)
A 1D vector of objects of type <T> that resizes itself as necessary to accept the new objects.
Definition: DynamicList.H:72
Input inter-processor communications stream.
Definition: IPstream.H:57
Base for lists with indirect addressing, templated on the list contents type and the addressing type....
label size() const noexcept
The number of elements in the list.
void resize_nocopy(const label len)
Adjust allocated size of list without necessarily.
Definition: ListI.H:146
void resize(const label len)
Adjust allocated size of list.
Definition: ListI.H:139
void clear()
Clear the list, i.e. set size to zero.
Definition: ListI.H:116
Output inter-processor communications stream.
Definition: OPstream.H:57
unsigned int get() const
Get value as unsigned, no range-checking.
Definition: PackedListI.H:302
Buffers for inter-processor communications streams (UOPstream, UIPstream).
UPstream::rangeType allProcs() const noexcept
Range of ranks indices associated with PstreamBuffers.
label nProcs() const noexcept
Number of ranks associated with PstreamBuffers.
label recvDataCount(const label proci) const
void finishedSends(const bool wait=true)
Mark sends as done.
virtual bool read()
Re-read model coefficients if they have changed.
A List obtained as a section of another List.
Definition: SubList.H:70
A List with indirect addressing. Like IndirectList but does not store addressing.
Definition: IndirectList.H:79
char * data_bytes() noexcept
Return pointer to the underlying array serving as data storage,.
Definition: UListI.H:251
void deepCopy(const UList< T > &list)
Copy elements of the given UList. Sizes must match!
Definition: UList.C:107
bool empty() const noexcept
True if the UList is empty (ie, size() is zero)
Definition: UListI.H:427
const T * cdata() const noexcept
Return pointer to the underlying array serving as data storage.
Definition: UListI.H:230
const char * cdata_bytes() const noexcept
Return pointer to the underlying array serving as data storage,.
Definition: UListI.H:244
void size(const label n)
Older name for setAddressableSize.
Definition: UList.H:114
std::streamsize size_bytes() const noexcept
Number of contiguous bytes for the List data.
Definition: UListI.H:258
T & last()
Return the last element of the list.
Definition: UListI.H:216
commsTypes
Types of communications.
Definition: UPstream.H:67
@ nonBlocking
"nonBlocking"
static void gather(const char *sendData, int sendCount, char *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, const label communicator=worldComm)
Receive variable length char data from all ranks.
static label nRequests()
Get number of outstanding requests.
Definition: UPstream.C:90
static bool broadcast(char *buf, const std::streamsize bufSize, const label communicator=worldComm, const int rootProcNo=masterNo())
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
Definition: UPstream.C:100
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:433
virtual bool write()
Write the output fields.
Calculates a unique integer (label so might not have enough room - 2G max) for processor + local inde...
Definition: globalIndex.H:68
static void gatherInplaceOp(List< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm)
Inplace collect data in processor order on master (in serial: a no-op).
static void mpiGatherInplaceOp(List< Type > &fld, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType())
Use MPI_Gatherv call to inplace collect contiguous data when possible. (in serial: a no-op).
label localSize() const
My local size.
Definition: globalIndexI.H:207
labelRange range() const
Return start/size range of local processor data.
Definition: globalIndexI.H:232
label localStart() const
My local start.
Definition: globalIndexI.H:195
label nProcs() const noexcept
The number of processors covered by the offsets.
Definition: globalIndexI.H:144
static void gather(const labelUList &offsets, const label comm, const ProcIDsContainer &procIDs, const UList< Type > &fld, List< Type > &allFld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Collect data in processor order on master (== procIDs[0]).
void gatherInplace(List< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm) const
Inplace collect data in processor order on master (in serial: a no-op).
static void scatter(const labelUList &offsets, const label comm, const ProcIDsContainer &procIDs, const UList< Type > &allFld, UList< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Distribute data in processor order.
void mpiGather(const UList< Type > &sendData, OutputContainer &allData, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType()) const
Use MPI_Gatherv call for contiguous data when possible (in serial: performs a simple copy).
static void mpiGatherOp(const UList< Type > &sendData, OutputContainer &allData, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType())
Use MPI_Gatherv call to collect contiguous data when possible (in serial: performs a simple copy).
label totalSize() const
Global sum of localSizes.
Definition: globalIndexI.H:125
static void gatherOp(const UList< Type > &sendData, List< Type > &allData, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm)
Collect data in processor order on master (in serial: performs a simple copy).
static void gatherValues(const label comm, const ProcIDsContainer &procIDs, const Type &localValue, List< Type > &allValues, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Which processor does global id come from?
void mpiGatherInplace(List< Type > &fld, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType()) const
Use MPI_Gatherv call to inplace collect contiguous data when possible. (in serial: a no-op).
int myProcNo() const noexcept
Return processor number.
splitCell * master() const
Definition: splitCell.H:113
OBJstream os(runTime.globalPath()/outputName)
List< T > values(const HashTable< T, Key, Hash > &tbl, const bool doSort=false)
List of values from HashTable, optionally sorted.
Definition: HashOps.H:149
List< label > labelList
A List of labels.
Definition: List.H:66
A template class to specify if a data type is composed solely of Foam::label elements.
Definition: contiguous.H:86
A template class to specify if a data type is composed solely of Foam::scalar elements.
Definition: contiguous.H:94
A template class to specify that a data type can be considered as being contiguous in memory.
Definition: contiguous.H:78