UPstreamWrappingTemplates.C
Go to the documentation of this file.
1/*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | www.openfoam.com
6 \\/ M anipulation |
7-------------------------------------------------------------------------------
8 Copyright (C) 2012-2015 OpenFOAM Foundation
9 Copyright (C) 2019-2022 OpenCFD Ltd.
10-------------------------------------------------------------------------------
11License
12 This file is part of OpenFOAM.
13
14 OpenFOAM is free software: you can redistribute it and/or modify it
15 under the terms of the GNU General Public License as published by
16 the Free Software Foundation, either version 3 of the License, or
17 (at your option) any later version.
18
19 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26
27\*---------------------------------------------------------------------------*/
28
29#include "UPstreamWrapping.H"
30#include "profilingPstream.H"
31#include "PstreamGlobals.H"
32
33// * * * * * * * * * * * * * * * Global Functions * * * * * * * * * * * * * //
34
35template<class Type>
37(
38 Type* values,
39 int count,
40 MPI_Datatype datatype,
41 const label comm
42)
43{
44 if (!UPstream::parRun())
45 {
46 return;
47 }
48
49 profilingPstream::beginTiming();
50
51 // const int retval =
52 MPI_Bcast
53 (
54 values,
55 count,
56 datatype,
57 0, // (root rank) == UPstream::masterNo()
58 PstreamGlobals::MPICommunicators_[comm]
59 );
60
61 profilingPstream::addBroadcastTime();
62}
63
64
65template<class Type>
67(
68 Type* values,
69 int count,
70 MPI_Datatype datatype,
71 MPI_Op optype,
72 const label comm
73)
74{
75 if (!UPstream::parRun())
76 {
77 return;
78 }
79
80 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
81 {
82 Pout<< "** reducing:";
83 if (count == 1)
84 {
85 Pout<< (*values);
86 }
87 else
88 {
89 Pout<< UList<Type>(values, count);
90 }
91 Pout<< " with comm:" << comm
92 << " warnComm:" << UPstream::warnComm << endl;
93 error::printStack(Pout);
94 }
95
96 profilingPstream::beginTiming();
97
98 // const int retval =
99 MPI_Reduce
100 (
101 MPI_IN_PLACE, // recv is also send
102 values,
103 count,
104 datatype,
105 optype,
106 0, // (root rank) == UPstream::masterNo()
107 PstreamGlobals::MPICommunicators_[comm]
108 );
109
110 profilingPstream::addReduceTime();
111}
112
113
114template<class Type>
116(
117 Type* values,
118 int count,
119 MPI_Datatype datatype,
120 MPI_Op optype,
121 const label comm,
122 label* requestID
123)
124{
125 if (!UPstream::parRun())
126 {
127 return;
128 }
129
130 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
131 {
132 if (requestID != nullptr)
133 {
134 Pout<< "** MPI_Iallreduce (non-blocking):";
135 }
136 else
137 {
138 Pout<< "** MPI_Allreduce (blocking):";
139 }
140 if (count == 1)
141 {
142 Pout<< (*values);
143 }
144 else
145 {
146 Pout<< UList<Type>(values, count);
147 }
148 Pout<< " with comm:" << comm
149 << " warnComm:" << UPstream::warnComm << endl;
150 error::printStack(Pout);
151 }
152
153 profilingPstream::beginTiming();
154
155 bool handled(false);
156
157#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
158 if (requestID != nullptr)
159 {
160 handled = true;
161 MPI_Request request;
162 if
163 (
164 MPI_Iallreduce
165 (
166 MPI_IN_PLACE, // recv is also send
167 values,
168 count,
169 datatype,
170 optype,
171 PstreamGlobals::MPICommunicators_[comm],
172 &request
173 )
174 )
175 {
177 << "MPI_Iallreduce failed for "
178 << UList<Type>(values, count)
180 }
181
182 if (PstreamGlobals::freedRequests_.size())
183 {
184 *requestID = PstreamGlobals::freedRequests_.remove();
185 PstreamGlobals::outstandingRequests_[*requestID] = request;
186 }
187 else
188 {
189 *requestID = PstreamGlobals::outstandingRequests_.size();
190 PstreamGlobals::outstandingRequests_.append(request);
191 }
192 }
193#endif
194
195 if (!handled)
196 {
197 if (requestID != nullptr)
198 {
199 *requestID = -1;
200 }
201 if
202 (
203 MPI_Allreduce
204 (
205 MPI_IN_PLACE, // recv is also send
206 values,
207 count,
208 datatype,
209 optype,
210 PstreamGlobals::MPICommunicators_[comm]
211 )
212 )
213 {
215 << "MPI_Allreduce failed for "
216 << UList<Type>(values, count)
218 }
219 }
220
221 profilingPstream::addReduceTime();
222}
223
224
225template<class Type>
227(
228 const UList<Type>& sendData,
229 UList<Type>& recvData,
230 MPI_Datatype datatype,
231 const label comm,
232 label* requestID
233)
234{
235 const label np = UPstream::nProcs(comm);
236
237 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
238 {
239 if (requestID != nullptr)
240 {
241 Pout<< "** MPI_Ialltoall (non-blocking):";
242 }
243 else
244 {
245 Pout<< "** MPI_Alltoall (blocking):";
246 }
247 Pout<< " np:" << np
248 << " sendData:" << sendData.size()
249 << " with comm:" << comm
250 << " warnComm:" << UPstream::warnComm
251 << endl;
252 error::printStack(Pout);
253 }
254
255 if (sendData.size() != np || recvData.size() != np)
256 {
258 << "Have " << np << " ranks, but size of sendData:"
259 << sendData.size() << " or recvData:" << recvData.size()
260 << " is different!"
262 }
263
264 if (!UPstream::parRun())
265 {
266 recvData.deepCopy(sendData);
267 return;
268 }
269
270 profilingPstream::beginTiming();
271
272 bool handled(false);
273
274 #if defined(MPI_VERSION) && (MPI_VERSION >= 3)
275 if (requestID != nullptr)
276 {
277 handled = true;
278 MPI_Request request;
279 if
280 (
281 MPI_Ialltoall
282 (
283 // NOTE: const_cast is a temporary hack for
284 // backward-compatibility with versions of OpenMPI < 1.7.4
285 const_cast<Type*>(sendData.cdata()),
286 1, // one element per rank
287 datatype,
288 recvData.data(),
289 1, // one element per rank
290 datatype,
291 PstreamGlobals::MPICommunicators_[comm],
292 &request
293 )
294 )
295 {
297 << "MPI_Ialltoall [comm: " << comm << "] failed."
298 << " For " << sendData
300 }
301
302 if (PstreamGlobals::freedRequests_.size())
303 {
304 *requestID = PstreamGlobals::freedRequests_.remove();
305 PstreamGlobals::outstandingRequests_[*requestID] = request;
306 }
307 else
308 {
309 *requestID = PstreamGlobals::outstandingRequests_.size();
310 PstreamGlobals::outstandingRequests_.append(request);
311 }
312 }
313#endif
314
315
316 if (!handled)
317 {
318 if (requestID != nullptr)
319 {
320 *requestID = -1;
321 }
322 if
323 (
324 MPI_Alltoall
325 (
326 // NOTE: const_cast is a temporary hack for
327 // backward-compatibility with versions of OpenMPI < 1.7.4
328 const_cast<Type*>(sendData.cdata()),
329 1, // one element per rank
330 datatype,
331 recvData.data(),
332 1, // one element per rank
333 datatype,
334 PstreamGlobals::MPICommunicators_[comm]
335 )
336 )
337 {
339 << "MPI_Alltoall [comm: " << comm << "] failed."
340 << " For " << sendData
342 }
343 }
344
345 profilingPstream::addAllToAllTime();
346}
347
348
349template<class Type>
351(
352 const Type* sendData,
353 const UList<int>& sendCounts,
354 const UList<int>& sendOffsets,
355
356 Type* recvData,
357 const UList<int>& recvCounts,
358 const UList<int>& recvOffsets,
359
360 MPI_Datatype datatype,
361 const label comm,
362 label* requestID
363)
364{
365 const label np = UPstream::nProcs(comm);
366
367 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
368 {
369 if (requestID != nullptr)
370 {
371 Pout<< "** MPI_Ialltoallv (non-blocking):";
372 }
373 else
374 {
375 Pout<< "** MPI_Alltoallv (blocking):";
376 }
377 Pout<< " sendCounts:" << sendCounts
378 << " sendOffsets:" << sendOffsets
379 << " with comm:" << comm
380 << " warnComm:" << UPstream::warnComm
381 << endl;
382 error::printStack(Pout);
383 }
384
385 if
386 (
387 (sendCounts.size() != np || sendOffsets.size() < np)
388 || (recvCounts.size() != np || recvOffsets.size() < np)
389 )
390 {
392 << "Have " << np << " ranks, but sendCounts:" << sendCounts.size()
393 << ", sendOffsets:" << sendOffsets.size()
394 << ", recvCounts:" << recvCounts.size()
395 << " or recvOffsets:" << recvOffsets.size()
396 << " is different!"
398 }
399
400 if (!UPstream::parRun())
401 {
402 if (recvCounts[0] != sendCounts[0])
403 {
405 << "Bytes to send " << sendCounts[0]
406 << " does not equal bytes to receive " << recvCounts[0]
408 }
409 std::memmove
410 (
411 recvData,
412 (sendData + sendOffsets[0]),
413 recvCounts[0]*sizeof(Type)
414 );
415 return;
416 }
417
418 profilingPstream::beginTiming();
419
420 bool handled(false);
421
422#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
423 if (requestID != nullptr)
424 {
425 handled = true;
426 MPI_Request request;
427
428 if
429 (
430 MPI_Ialltoallv
431 (
432 const_cast<Type*>(sendData),
433 const_cast<int*>(sendCounts.cdata()),
434 const_cast<int*>(sendOffsets.cdata()),
435 datatype,
436 recvData,
437 const_cast<int*>(recvCounts.cdata()),
438 const_cast<int*>(recvOffsets.cdata()),
439 datatype,
440 PstreamGlobals::MPICommunicators_[comm],
441 &request
442 )
443 )
444 {
446 << "MPI_Ialltoallv [comm: " << comm << "] failed."
447 << " For sendCounts " << sendCounts
448 << " recvCounts " << recvCounts
450 }
451
452 if (PstreamGlobals::freedRequests_.size())
453 {
454 *requestID = PstreamGlobals::freedRequests_.remove();
455 PstreamGlobals::outstandingRequests_[*requestID] = request;
456 }
457 else
458 {
459 *requestID = PstreamGlobals::outstandingRequests_.size();
460 PstreamGlobals::outstandingRequests_.append(request);
461 }
462 }
463#endif
464
465 if (!handled)
466 {
467 if (requestID != nullptr)
468 {
469 *requestID = -1;
470 }
471 if
472 (
473 MPI_Alltoallv
474 (
475 const_cast<Type*>(sendData),
476 const_cast<int*>(sendCounts.cdata()),
477 const_cast<int*>(sendOffsets.cdata()),
478 datatype,
479 recvData,
480 const_cast<int*>(recvCounts.cdata()),
481 const_cast<int*>(recvOffsets.cdata()),
482 datatype,
483 PstreamGlobals::MPICommunicators_[comm]
484 )
485 )
486 {
488 << "MPI_Alltoallv [comm: " << comm << "] failed."
489 << " For sendCounts " << sendCounts
490 << " recvCounts " << recvCounts
492 }
493 }
494
495 profilingPstream::addAllToAllTime();
496}
497
498
499template<class Type>
501(
502 const Type* sendData,
503 int sendCount,
504
505 Type* recvData,
506 int recvCount,
507
508 MPI_Datatype datatype,
509 const label comm,
510 label* requestID
511)
512{
513 if (!UPstream::parRun())
514 {
515 std::memmove(recvData, sendData, recvCount*sizeof(Type));
516 return;
517 }
518
519 const label np = UPstream::nProcs(comm);
520
521 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
522 {
523 if (requestID != nullptr)
524 {
525 Pout<< "** MPI_Igather (non-blocking):";
526 }
527 else
528 {
529 Pout<< "** MPI_Gather (blocking):";
530 }
531 Pout<< " np:" << np
532 << " recvCount:" << recvCount
533 << " with comm:" << comm
534 << " warnComm:" << UPstream::warnComm
535 << endl;
536 error::printStack(Pout);
537 }
538
539 profilingPstream::beginTiming();
540
541 bool handled(false);
542
543#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
544 if (requestID != nullptr)
545 {
546 handled = true;
547 MPI_Request request;
548 if
549 (
550 MPI_Igather
551 (
552 const_cast<Type*>(sendData),
553 sendCount,
554 datatype,
555 recvData,
556 recvCount,
557 datatype,
558 0, // (root rank) == UPstream::masterNo()
559 PstreamGlobals::MPICommunicators_[comm],
560 &request
561 )
562 )
563 {
565 << "MPI_Igather [comm: " << comm << "] failed."
566 << " sendCount " << sendCount
567 << " recvCount " << recvCount
569 }
570
571 if (PstreamGlobals::freedRequests_.size())
572 {
573 *requestID = PstreamGlobals::freedRequests_.remove();
574 PstreamGlobals::outstandingRequests_[*requestID] = request;
575 }
576 else
577 {
578 *requestID = PstreamGlobals::outstandingRequests_.size();
579 PstreamGlobals::outstandingRequests_.append(request);
580 }
581 }
582#endif
583
584 if (!handled)
585 {
586 if (requestID != nullptr)
587 {
588 *requestID = -1;
589 }
590 if
591 (
592 MPI_Gather
593 (
594 const_cast<Type*>(sendData),
595 sendCount,
596 datatype,
597 recvData,
598 recvCount,
599 datatype,
600 0, // (root rank) == UPstream::masterNo()
601 PstreamGlobals::MPICommunicators_[comm]
602 )
603 )
604 {
606 << "MPI_Gather [comm: " << comm << "] failed."
607 << " sendCount " << sendCount
608 << " recvCount " << recvCount
610 }
611 }
612
613 profilingPstream::addGatherTime();
614}
615
616
617template<class Type>
619(
620 const Type* sendData,
621 int sendCount,
622
623 Type* recvData,
624 int recvCount,
625
626 MPI_Datatype datatype,
627 const label comm,
628 label* requestID
629)
630{
631 if (!UPstream::parRun())
632 {
633 std::memmove(recvData, sendData, recvCount*sizeof(Type));
634 return;
635 }
636
637 const label np = UPstream::nProcs(comm);
638
639 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
640 {
641 if (requestID != nullptr)
642 {
643 Pout<< "** MPI_Iscatter (non-blocking):";
644 }
645 else
646 {
647 Pout<< "** MPI_Scatter (blocking):";
648 }
649 Pout<< " np:" << np
650 << " recvCount:" << recvCount
651 << " with comm:" << comm
652 << " warnComm:" << UPstream::warnComm
653 << endl;
654 error::printStack(Pout);
655 }
656
657 profilingPstream::beginTiming();
658
659 bool handled(false);
660
661#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
662 if (requestID != nullptr)
663 {
664 handled = true;
665 MPI_Request request;
666 if
667 (
668 MPI_Iscatter
669 (
670 const_cast<Type*>(sendData),
671 sendCount,
672 datatype,
673 recvData,
674 recvCount,
675 datatype,
676 0, // (root rank) == UPstream::masterNo()
677 PstreamGlobals::MPICommunicators_[comm],
678 &request
679 )
680 )
681 {
683 << "MPI_Iscatter [comm: " << comm << "] failed."
684 << " sendCount " << sendCount
685 << " recvCount " << recvCount
687 }
688
689 if (PstreamGlobals::freedRequests_.size())
690 {
691 *requestID = PstreamGlobals::freedRequests_.remove();
692 PstreamGlobals::outstandingRequests_[*requestID] = request;
693 }
694 else
695 {
696 *requestID = PstreamGlobals::outstandingRequests_.size();
697 PstreamGlobals::outstandingRequests_.append(request);
698 }
699 }
700#endif
701
702 if (!handled)
703 {
704 if (requestID != nullptr)
705 {
706 *requestID = -1;
707 }
708 if
709 (
710 MPI_Scatter
711 (
712 const_cast<Type*>(sendData),
713 sendCount,
714 datatype,
715 recvData,
716 recvCount,
717 datatype,
718 0, // (root rank) == UPstream::masterNo()
719 PstreamGlobals::MPICommunicators_[comm]
720 )
721 )
722 {
724 << "MPI_Iscatter [comm: " << comm << "] failed."
725 << " sendCount " << sendCount
726 << " recvCount " << recvCount
728 }
729 }
730
731 profilingPstream::addScatterTime();
732}
733
734
735template<class Type>
737(
738 const Type* sendData,
739 int sendCount,
740
741 Type* recvData,
742 const UList<int>& recvCounts,
743 const UList<int>& recvOffsets,
744
745 MPI_Datatype datatype,
746 const label comm,
747 label* requestID
748)
749{
750 if (!UPstream::parRun())
751 {
752 // recvCounts[0] may be invalid - use sendCount instead
753 std::memmove(recvData, sendData, sendCount*sizeof(Type));
754 return;
755 }
756
757 const label np = UPstream::nProcs(comm);
758
759 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
760 {
761 if (requestID != nullptr)
762 {
763 Pout<< "** MPI_Igatherv (non-blocking):";
764 }
765 else
766 {
767 Pout<< "** MPI_Gatherv (blocking):";
768 }
769 Pout<< " np:" << np
770 << " recvCounts:" << recvCounts
771 << " recvOffsets:" << recvOffsets
772 << " with comm:" << comm
773 << " warnComm:" << UPstream::warnComm
774 << endl;
775 error::printStack(Pout);
776 }
777
778 if
779 (
780 UPstream::master(comm)
781 && (recvCounts.size() != np || recvOffsets.size() < np)
782 )
783 {
784 // Note: allow offsets to be e.g. 1 larger than nProc so we
785 // can easily loop over the result
786
788 << "Have " << np << " ranks, but recvCounts:" << recvCounts.size()
789 << " or recvOffsets:" << recvOffsets.size()
790 << " is too small!"
792 }
793
794 profilingPstream::beginTiming();
795
796 // Ensure send/recv consistency on master
797 if (UPstream::master(comm) && !recvCounts[0])
798 {
799 sendCount = 0;
800 }
801
802 bool handled(false);
803
804#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
805 if (requestID != nullptr)
806 {
807 handled = true;
808 MPI_Request request;
809 if
810 (
811 MPI_Igatherv
812 (
813 const_cast<Type*>(sendData),
814 sendCount,
815 datatype,
816 recvData,
817 const_cast<int*>(recvCounts.cdata()),
818 const_cast<int*>(recvOffsets.cdata()),
819 datatype,
820 0, // (root rank) == UPstream::masterNo()
821 PstreamGlobals::MPICommunicators_[comm],
822 &request
823 )
824 )
825 {
827 << "MPI_Igatherv failed [comm: " << comm << ']'
828 << " sendCount " << sendCount
829 << " recvCounts " << recvCounts
831 }
832
833 if (PstreamGlobals::freedRequests_.size())
834 {
835 *requestID = PstreamGlobals::freedRequests_.remove();
836 PstreamGlobals::outstandingRequests_[*requestID] = request;
837 }
838 else
839 {
840 *requestID = PstreamGlobals::outstandingRequests_.size();
841 PstreamGlobals::outstandingRequests_.append(request);
842 }
843 }
844#endif
845
846 if (!handled)
847 {
848 if (requestID != nullptr)
849 {
850 *requestID = -1;
851 }
852 if
853 (
854 MPI_Gatherv
855 (
856 const_cast<Type*>(sendData),
857 sendCount,
858 datatype,
859 recvData,
860 const_cast<int*>(recvCounts.cdata()),
861 const_cast<int*>(recvOffsets.cdata()),
862 datatype,
863 0, // (root rank) == UPstream::masterNo()
864 PstreamGlobals::MPICommunicators_[comm]
865 )
866 )
867 {
869 << "MPI_Gatherv failed [comm: " << comm << ']'
870 << " sendCount " << sendCount
871 << " recvCounts " << recvCounts
873 }
874 }
875
876 profilingPstream::addGatherTime();
877}
878
879
880template<class Type>
882(
883 const Type* sendData,
884 const UList<int>& sendCounts,
885 const UList<int>& sendOffsets,
886
887 Type* recvData,
888 int recvCount,
889
890 MPI_Datatype datatype,
891 const label comm,
892 label* requestID
893)
894{
895 if (!UPstream::parRun())
896 {
897 std::memmove(recvData, sendData, recvCount*sizeof(Type));
898 return;
899 }
900
901 const label np = UPstream::nProcs(comm);
902
903 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
904 {
905 if (requestID != nullptr)
906 {
907 Pout<< "** MPI_Iscatterv (non-blocking):";
908 }
909 else
910 {
911 Pout<< "** MPI_Scatterv (blocking):";
912 }
913 Pout<< " np:" << np
914 << " sendCounts:" << sendCounts
915 << " sendOffsets:" << sendOffsets
916 << " with comm:" << comm
917 << " warnComm:" << UPstream::warnComm
918 << endl;
919 error::printStack(Pout);
920 }
921
922 if
923 (
924 UPstream::master(comm)
925 && (sendCounts.size() != np || sendOffsets.size() < np)
926 )
927 {
928 // Note: allow offsets to be e.g. 1 larger than nProc so we
929 // can easily loop over the result
930
932 << "Have " << np << " ranks, but sendCounts:" << sendCounts.size()
933 << " or sendOffsets:" << sendOffsets.size()
934 << " is too small!"
936 }
937
938 profilingPstream::beginTiming();
939
940 bool handled(false);
941
942#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
943 if (requestID != nullptr)
944 {
945 handled = true;
946 MPI_Request request;
947 if
948 (
949 MPI_Iscatterv
950 (
951 const_cast<Type*>(sendData),
952 const_cast<int*>(sendCounts.cdata()),
953 const_cast<int*>(sendOffsets.cdata()),
954 datatype,
955 recvData,
956 recvCount,
957 datatype,
958 0, // (root rank) == UPstream::masterNo()
959 PstreamGlobals::MPICommunicators_[comm],
960 &request
961 )
962 )
963 {
965 << "MPI_Iscatterv [comm: " << comm << "] failed."
966 << " sendCounts " << sendCounts
967 << " sendOffsets " << sendOffsets
969 }
970
971 if (PstreamGlobals::freedRequests_.size())
972 {
973 *requestID = PstreamGlobals::freedRequests_.remove();
974 PstreamGlobals::outstandingRequests_[*requestID] = request;
975 }
976 else
977 {
978 *requestID = PstreamGlobals::outstandingRequests_.size();
979 PstreamGlobals::outstandingRequests_.append(request);
980 }
981 }
982#endif
983
984 if (!handled)
985 {
986 if (requestID != nullptr)
987 {
988 *requestID = -1;
989 }
990 if
991 (
992 MPI_Scatterv
993 (
994 const_cast<Type*>(sendData),
995 const_cast<int*>(sendCounts.cdata()),
996 const_cast<int*>(sendOffsets.cdata()),
997 datatype,
998 recvData,
999 recvCount,
1000 datatype,
1001 0, // (root rank) == UPstream::masterNo()
1002 PstreamGlobals::MPICommunicators_[comm]
1003 )
1004 )
1005 {
1007 << "MPI_Scatterv [comm: " << comm << "] failed."
1008 << " sendCounts " << sendCounts
1009 << " sendOffsets " << sendOffsets
1011 }
1012 }
1013
1014 profilingPstream::addScatterTime();
1015}
1016
1017
1018// ************************************************************************* //
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
Definition: UList.H:94
void deepCopy(const UList< T > &list)
Copy elements of the given UList. Sizes must match!
Definition: UList.C:107
const T * cdata() const noexcept
Return pointer to the underlying array serving as data storage.
Definition: UListI.H:230
T * data() noexcept
Return pointer to the underlying array serving as data storage.
Definition: UListI.H:237
void size(const label n)
Older name for setAddressableSize.
Definition: UList.H:114
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:453
void scatter(const Type *sendData, int sendCount, Type *recvData, int recvCount, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void reduce0(Type *values, int count, MPI_Datatype datatype, MPI_Op optype, const label comm)
void gather(const Type *sendData, int sendCount, Type *recvData, int recvCount, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void allReduce(Type *values, int count, MPI_Datatype datatype, MPI_Op optype, const label comm, label *requestID=nullptr)
void broadcast0(Type *values, int count, MPI_Datatype datatype, const label comm)
void allToAll(const UList< Type > &sendData, UList< Type > &recvData, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void gatherv(const Type *sendData, int sendCount, Type *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void scatterv(const Type *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, Type *recvData, int recvCount, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void allToAllv(const Type *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, Type *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:372
errorManip< error > abort(error &err)
Definition: errorManip.H:144
error FatalError
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.