40 MPI_Datatype datatype,
44 if (!UPstream::parRun())
49 profilingPstream::beginTiming();
58 PstreamGlobals::MPICommunicators_[comm]
61 profilingPstream::addBroadcastTime();
70 MPI_Datatype datatype,
75 if (!UPstream::parRun())
80 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
82 Pout<<
"** reducing:";
89 Pout<< UList<Type>(values, count);
91 Pout<<
" with comm:" << comm
92 <<
" warnComm:" << UPstream::warnComm <<
endl;
93 error::printStack(
Pout);
96 profilingPstream::beginTiming();
107 PstreamGlobals::MPICommunicators_[comm]
110 profilingPstream::addReduceTime();
119 MPI_Datatype datatype,
125 if (!UPstream::parRun())
130 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
132 if (requestID !=
nullptr)
134 Pout<<
"** MPI_Iallreduce (non-blocking):";
138 Pout<<
"** MPI_Allreduce (blocking):";
146 Pout<< UList<Type>(values, count);
148 Pout<<
" with comm:" << comm
149 <<
" warnComm:" << UPstream::warnComm <<
endl;
150 error::printStack(
Pout);
153 profilingPstream::beginTiming();
157#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
158 if (requestID !=
nullptr)
171 PstreamGlobals::MPICommunicators_[comm],
177 <<
"MPI_Iallreduce failed for "
182 if (PstreamGlobals::freedRequests_.size())
184 *requestID = PstreamGlobals::freedRequests_.remove();
185 PstreamGlobals::outstandingRequests_[*requestID] = request;
189 *requestID = PstreamGlobals::outstandingRequests_.size();
190 PstreamGlobals::outstandingRequests_.append(request);
197 if (requestID !=
nullptr)
210 PstreamGlobals::MPICommunicators_[comm]
215 <<
"MPI_Allreduce failed for "
221 profilingPstream::addReduceTime();
230 MPI_Datatype datatype,
235 const label np = UPstream::nProcs(comm);
237 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
239 if (requestID !=
nullptr)
241 Pout<<
"** MPI_Ialltoall (non-blocking):";
245 Pout<<
"** MPI_Alltoall (blocking):";
248 <<
" sendData:" << sendData.
size()
249 <<
" with comm:" << comm
250 <<
" warnComm:" << UPstream::warnComm
252 error::printStack(
Pout);
255 if (sendData.
size() != np || recvData.
size() != np)
258 <<
"Have " << np <<
" ranks, but size of sendData:"
259 << sendData.
size() <<
" or recvData:" << recvData.
size()
264 if (!UPstream::parRun())
270 profilingPstream::beginTiming();
274 #if defined(MPI_VERSION) && (MPI_VERSION >= 3)
275 if (requestID !=
nullptr)
285 const_cast<Type*
>(sendData.
cdata()),
291 PstreamGlobals::MPICommunicators_[comm],
297 <<
"MPI_Ialltoall [comm: " << comm <<
"] failed."
298 <<
" For " << sendData
302 if (PstreamGlobals::freedRequests_.size())
304 *requestID = PstreamGlobals::freedRequests_.remove();
305 PstreamGlobals::outstandingRequests_[*requestID] = request;
309 *requestID = PstreamGlobals::outstandingRequests_.size();
310 PstreamGlobals::outstandingRequests_.append(request);
318 if (requestID !=
nullptr)
328 const_cast<Type*
>(sendData.
cdata()),
334 PstreamGlobals::MPICommunicators_[comm]
339 <<
"MPI_Alltoall [comm: " << comm <<
"] failed."
340 <<
" For " << sendData
345 profilingPstream::addAllToAllTime();
352 const Type* sendData,
360 MPI_Datatype datatype,
365 const label np = UPstream::nProcs(comm);
367 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
369 if (requestID !=
nullptr)
371 Pout<<
"** MPI_Ialltoallv (non-blocking):";
375 Pout<<
"** MPI_Alltoallv (blocking):";
377 Pout<<
" sendCounts:" << sendCounts
378 <<
" sendOffsets:" << sendOffsets
379 <<
" with comm:" << comm
380 <<
" warnComm:" << UPstream::warnComm
382 error::printStack(
Pout);
387 (sendCounts.
size() != np || sendOffsets.
size() < np)
388 || (recvCounts.
size() != np || recvOffsets.
size() < np)
392 <<
"Have " << np <<
" ranks, but sendCounts:" << sendCounts.
size()
393 <<
", sendOffsets:" << sendOffsets.
size()
394 <<
", recvCounts:" << recvCounts.
size()
395 <<
" or recvOffsets:" << recvOffsets.
size()
400 if (!UPstream::parRun())
402 if (recvCounts[0] != sendCounts[0])
405 <<
"Bytes to send " << sendCounts[0]
406 <<
" does not equal bytes to receive " << recvCounts[0]
412 (sendData + sendOffsets[0]),
413 recvCounts[0]*
sizeof(Type)
418 profilingPstream::beginTiming();
422#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
423 if (requestID !=
nullptr)
432 const_cast<Type*
>(sendData),
433 const_cast<int*
>(sendCounts.
cdata()),
434 const_cast<int*
>(sendOffsets.
cdata()),
437 const_cast<int*
>(recvCounts.
cdata()),
438 const_cast<int*
>(recvOffsets.
cdata()),
440 PstreamGlobals::MPICommunicators_[comm],
446 <<
"MPI_Ialltoallv [comm: " << comm <<
"] failed."
447 <<
" For sendCounts " << sendCounts
448 <<
" recvCounts " << recvCounts
452 if (PstreamGlobals::freedRequests_.size())
454 *requestID = PstreamGlobals::freedRequests_.remove();
455 PstreamGlobals::outstandingRequests_[*requestID] = request;
459 *requestID = PstreamGlobals::outstandingRequests_.size();
460 PstreamGlobals::outstandingRequests_.append(request);
467 if (requestID !=
nullptr)
475 const_cast<Type*
>(sendData),
476 const_cast<int*
>(sendCounts.
cdata()),
477 const_cast<int*
>(sendOffsets.
cdata()),
480 const_cast<int*
>(recvCounts.
cdata()),
481 const_cast<int*
>(recvOffsets.
cdata()),
483 PstreamGlobals::MPICommunicators_[comm]
488 <<
"MPI_Alltoallv [comm: " << comm <<
"] failed."
489 <<
" For sendCounts " << sendCounts
490 <<
" recvCounts " << recvCounts
495 profilingPstream::addAllToAllTime();
502 const Type* sendData,
508 MPI_Datatype datatype,
513 if (!UPstream::parRun())
515 std::memmove(recvData, sendData, recvCount*
sizeof(Type));
519 const label np = UPstream::nProcs(comm);
521 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
523 if (requestID !=
nullptr)
525 Pout<<
"** MPI_Igather (non-blocking):";
529 Pout<<
"** MPI_Gather (blocking):";
532 <<
" recvCount:" << recvCount
533 <<
" with comm:" << comm
534 <<
" warnComm:" << UPstream::warnComm
536 error::printStack(
Pout);
539 profilingPstream::beginTiming();
543#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
544 if (requestID !=
nullptr)
552 const_cast<Type*
>(sendData),
559 PstreamGlobals::MPICommunicators_[comm],
565 <<
"MPI_Igather [comm: " << comm <<
"] failed."
566 <<
" sendCount " << sendCount
567 <<
" recvCount " << recvCount
571 if (PstreamGlobals::freedRequests_.size())
573 *requestID = PstreamGlobals::freedRequests_.remove();
574 PstreamGlobals::outstandingRequests_[*requestID] = request;
578 *requestID = PstreamGlobals::outstandingRequests_.size();
579 PstreamGlobals::outstandingRequests_.append(request);
586 if (requestID !=
nullptr)
594 const_cast<Type*
>(sendData),
601 PstreamGlobals::MPICommunicators_[comm]
606 <<
"MPI_Gather [comm: " << comm <<
"] failed."
607 <<
" sendCount " << sendCount
608 <<
" recvCount " << recvCount
613 profilingPstream::addGatherTime();
620 const Type* sendData,
626 MPI_Datatype datatype,
631 if (!UPstream::parRun())
633 std::memmove(recvData, sendData, recvCount*
sizeof(Type));
637 const label np = UPstream::nProcs(comm);
639 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
641 if (requestID !=
nullptr)
643 Pout<<
"** MPI_Iscatter (non-blocking):";
647 Pout<<
"** MPI_Scatter (blocking):";
650 <<
" recvCount:" << recvCount
651 <<
" with comm:" << comm
652 <<
" warnComm:" << UPstream::warnComm
654 error::printStack(
Pout);
657 profilingPstream::beginTiming();
661#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
662 if (requestID !=
nullptr)
670 const_cast<Type*
>(sendData),
677 PstreamGlobals::MPICommunicators_[comm],
683 <<
"MPI_Iscatter [comm: " << comm <<
"] failed."
684 <<
" sendCount " << sendCount
685 <<
" recvCount " << recvCount
689 if (PstreamGlobals::freedRequests_.size())
691 *requestID = PstreamGlobals::freedRequests_.remove();
692 PstreamGlobals::outstandingRequests_[*requestID] = request;
696 *requestID = PstreamGlobals::outstandingRequests_.size();
697 PstreamGlobals::outstandingRequests_.append(request);
704 if (requestID !=
nullptr)
712 const_cast<Type*
>(sendData),
719 PstreamGlobals::MPICommunicators_[comm]
724 <<
"MPI_Iscatter [comm: " << comm <<
"] failed."
725 <<
" sendCount " << sendCount
726 <<
" recvCount " << recvCount
731 profilingPstream::addScatterTime();
738 const Type* sendData,
745 MPI_Datatype datatype,
750 if (!UPstream::parRun())
753 std::memmove(recvData, sendData, sendCount*
sizeof(Type));
757 const label np = UPstream::nProcs(comm);
759 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
761 if (requestID !=
nullptr)
763 Pout<<
"** MPI_Igatherv (non-blocking):";
767 Pout<<
"** MPI_Gatherv (blocking):";
770 <<
" recvCounts:" << recvCounts
771 <<
" recvOffsets:" << recvOffsets
772 <<
" with comm:" << comm
773 <<
" warnComm:" << UPstream::warnComm
775 error::printStack(
Pout);
780 UPstream::master(comm)
781 && (recvCounts.
size() != np || recvOffsets.
size() < np)
788 <<
"Have " << np <<
" ranks, but recvCounts:" << recvCounts.
size()
789 <<
" or recvOffsets:" << recvOffsets.
size()
794 profilingPstream::beginTiming();
797 if (UPstream::master(comm) && !recvCounts[0])
804#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
805 if (requestID !=
nullptr)
813 const_cast<Type*
>(sendData),
817 const_cast<int*
>(recvCounts.
cdata()),
818 const_cast<int*
>(recvOffsets.
cdata()),
821 PstreamGlobals::MPICommunicators_[comm],
827 <<
"MPI_Igatherv failed [comm: " << comm <<
']'
828 <<
" sendCount " << sendCount
829 <<
" recvCounts " << recvCounts
833 if (PstreamGlobals::freedRequests_.size())
835 *requestID = PstreamGlobals::freedRequests_.remove();
836 PstreamGlobals::outstandingRequests_[*requestID] = request;
840 *requestID = PstreamGlobals::outstandingRequests_.size();
841 PstreamGlobals::outstandingRequests_.append(request);
848 if (requestID !=
nullptr)
856 const_cast<Type*
>(sendData),
860 const_cast<int*
>(recvCounts.
cdata()),
861 const_cast<int*
>(recvOffsets.
cdata()),
864 PstreamGlobals::MPICommunicators_[comm]
869 <<
"MPI_Gatherv failed [comm: " << comm <<
']'
870 <<
" sendCount " << sendCount
871 <<
" recvCounts " << recvCounts
876 profilingPstream::addGatherTime();
883 const Type* sendData,
890 MPI_Datatype datatype,
895 if (!UPstream::parRun())
897 std::memmove(recvData, sendData, recvCount*
sizeof(Type));
901 const label np = UPstream::nProcs(comm);
903 if (UPstream::warnComm != -1 && comm != UPstream::warnComm)
905 if (requestID !=
nullptr)
907 Pout<<
"** MPI_Iscatterv (non-blocking):";
911 Pout<<
"** MPI_Scatterv (blocking):";
914 <<
" sendCounts:" << sendCounts
915 <<
" sendOffsets:" << sendOffsets
916 <<
" with comm:" << comm
917 <<
" warnComm:" << UPstream::warnComm
919 error::printStack(
Pout);
924 UPstream::master(comm)
925 && (sendCounts.
size() != np || sendOffsets.
size() < np)
932 <<
"Have " << np <<
" ranks, but sendCounts:" << sendCounts.
size()
933 <<
" or sendOffsets:" << sendOffsets.
size()
938 profilingPstream::beginTiming();
942#if defined(MPI_VERSION) && (MPI_VERSION >= 3)
943 if (requestID !=
nullptr)
951 const_cast<Type*
>(sendData),
952 const_cast<int*
>(sendCounts.
cdata()),
953 const_cast<int*
>(sendOffsets.
cdata()),
959 PstreamGlobals::MPICommunicators_[comm],
965 <<
"MPI_Iscatterv [comm: " << comm <<
"] failed."
966 <<
" sendCounts " << sendCounts
967 <<
" sendOffsets " << sendOffsets
971 if (PstreamGlobals::freedRequests_.size())
973 *requestID = PstreamGlobals::freedRequests_.remove();
974 PstreamGlobals::outstandingRequests_[*requestID] = request;
978 *requestID = PstreamGlobals::outstandingRequests_.size();
979 PstreamGlobals::outstandingRequests_.append(request);
986 if (requestID !=
nullptr)
994 const_cast<Type*
>(sendData),
995 const_cast<int*
>(sendCounts.
cdata()),
996 const_cast<int*
>(sendOffsets.
cdata()),
1002 PstreamGlobals::MPICommunicators_[comm]
1007 <<
"MPI_Scatterv [comm: " << comm <<
"] failed."
1008 <<
" sendCounts " << sendCounts
1009 <<
" sendOffsets " << sendOffsets
1014 profilingPstream::addScatterTime();
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
void deepCopy(const UList< T > &list)
Copy elements of the given UList. Sizes must match!
const T * cdata() const noexcept
Return pointer to the underlying array serving as data storage.
T * data() noexcept
Return pointer to the underlying array serving as data storage.
void size(const label n)
Older name for setAddressableSize.
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
void scatter(const Type *sendData, int sendCount, Type *recvData, int recvCount, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void reduce0(Type *values, int count, MPI_Datatype datatype, MPI_Op optype, const label comm)
void gather(const Type *sendData, int sendCount, Type *recvData, int recvCount, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void allReduce(Type *values, int count, MPI_Datatype datatype, MPI_Op optype, const label comm, label *requestID=nullptr)
void broadcast0(Type *values, int count, MPI_Datatype datatype, const label comm)
void allToAll(const UList< Type > &sendData, UList< Type > &recvData, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void gatherv(const Type *sendData, int sendCount, Type *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void scatterv(const Type *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, Type *recvData, int recvCount, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
void allToAllv(const Type *sendData, const UList< int > &sendCounts, const UList< int > &sendOffsets, Type *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, MPI_Datatype datatype, const label comm, label *requestID=nullptr)
Ostream & endl(Ostream &os)
Add newline and flush stream.
errorManip< error > abort(error &err)
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.