Go to the documentation of this file.
44 #define MPI_SCALAR MPI_FLOAT
45 #define MPI_SOLVESCALAR MPI_FLOAT
46 #elif defined(WM_SPDP)
47 #define MPI_SCALAR MPI_FLOAT
48 #define MPI_SOLVESCALAR MPI_DOUBLE
50 #define MPI_SCALAR MPI_DOUBLE
51 #define MPI_SOLVESCALAR MPI_DOUBLE
83 if (str.empty() || !
Foam::read(str, len) || len <= 0)
95 Foam::Pout<<
"UPstream::init : buffer-size " << len <<
'\n';
98 char* buf =
new char[len];
100 if (MPI_SUCCESS != MPI_Buffer_attach(buf, len))
103 Foam::Pout<<
"UPstream::init : could not attach buffer\n";
125 if (MPI_SUCCESS == MPI_Buffer_detach(&buf, &len) && len)
141 validParOptions.insert(
"np",
"");
142 validParOptions.insert(
"p4pg",
"PI file");
143 validParOptions.insert(
"p4wd",
"directory");
144 validParOptions.insert(
"p4amslave",
"");
145 validParOptions.insert(
"p4yourname",
"hostname");
146 validParOptions.insert(
"machinefile",
"machine file");
154 MPI_Finalized(&flag);
159 <<
"MPI was already finalized - cannot perform MPI_Init\n"
165 MPI_Initialized(&flag);
170 Pout<<
"UPstream::initNull : was already initialized\n";
196 int numprocs = 0, myRank = 0;
197 int provided_thread_support = 0;
200 MPI_Finalized(&flag);
205 <<
"MPI was already finalized - cannot perform MPI_Init" <<
endl
211 MPI_Initialized(&flag);
220 <<
"MPI was already initialized - cannot perform MPI_Init" <<
nl
221 <<
"This could indicate an application programming error!"
228 Pout<<
"UPstream::init : was already initialized\n";
239 ? MPI_THREAD_MULTIPLE
242 &provided_thread_support
248 MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
249 MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
253 Pout<<
"UPstream::init : procs=" << numprocs
254 <<
" rank:" << myRank <<
endl;
260 <<
"attempt to run parallel on 1 processor"
265 setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
277 Pout<<
"UPstream::shutdown\n";
282 MPI_Initialized(&flag);
289 MPI_Finalized(&flag);
296 <<
"MPI was already finalized (by a connected program?)\n";
300 Pout<<
"UPstream::shutdown : was already finalized\n";
311 label nOutstanding = 0;
326 <<
"There were still " << nOutstanding
327 <<
" outstanding MPI_Requests." <<
nl
328 <<
"Which means your code exited before doing a "
329 <<
" UPstream::waitRequests()." <<
nl
330 <<
"This should not happen for a normal code exit."
336 forAll(myProcNo_, communicator)
338 if (myProcNo_[communicator] != -1)
340 freePstreamCommunicator(communicator);
351 <<
"Finalizing MPI, but was initialized elsewhere\n";
360 MPI_Abort(MPI_COMM_WORLD, errNo);
375 MPI_Abort(MPI_COMM_WORLD, 1);
382 const sumOp<scalar>& bop,
384 const label communicator
389 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
394 allReduce(Value, 1, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
401 const minOp<scalar>& bop,
403 const label communicator
408 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
413 allReduce(Value, 1, MPI_SCALAR, MPI_MIN, bop, tag, communicator);
420 const sumOp<vector2D>& bop,
422 const label communicator
427 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
432 allReduce(Value, 2, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
441 const label communicator
446 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
451 vector2D twoScalars(Value, scalar(Count));
452 reduce(twoScalars, sumOp<vector2D>(), tag, communicator);
454 Value = twoScalars.x();
455 Count = twoScalars.y();
462 const sumOp<scalar>& bop,
464 const label communicator,
468 iallReduce<scalar>(&Value, 1, MPI_SCALAR, MPI_SUM, communicator, requestID);
476 const sumOp<scalar>& bop,
478 const label communicator,
498 const sumOp<solveScalar>& bop,
500 const label communicator
505 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
510 allReduce(Value, 1, MPI_SOLVESCALAR, MPI_SUM, bop, tag, communicator);
517 const minOp<solveScalar>& bop,
519 const label communicator
524 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
529 allReduce(Value, 1, MPI_SOLVESCALAR, MPI_MIN, bop, tag, communicator);
535 Vector2D<solveScalar>& Value,
536 const sumOp<Vector2D<solveScalar>>& bop,
538 const label communicator
543 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
548 allReduce(Value, 2, MPI_SOLVESCALAR, MPI_SUM, bop, tag, communicator);
557 const label communicator
562 Pout<<
"** reducing:" << Value <<
" with comm:" << communicator
567 Vector2D<solveScalar> twoScalars(Value, solveScalar(Count));
568 reduce(twoScalars, sumOp<Vector2D<solveScalar>>(), tag, communicator);
570 Value = twoScalars.x();
571 Count = twoScalars.y();
578 const sumOp<solveScalar>& bop,
580 const label communicator,
584 iallReduce<solveScalar>
600 const sumOp<solveScalar>& bop,
602 const label communicator,
606 iallReduce<solveScalar>
623 const label communicator
626 label np = nProcs(communicator);
628 if (sendData.size() != np || recvData.size() != np)
631 <<
"Size of sendData " << sendData.size()
632 <<
" or size of recvData " << recvData.size()
633 <<
" is not equal to the number of processors in the domain "
640 recvData.deepCopy(sendData);
652 const_cast<label*
>(sendData.begin()),
663 <<
"MPI_Alltoall failed for " << sendData
664 <<
" on communicator " << communicator
675 const char* sendData,
690 sendSizes.
size() != np
691 || sendOffsets.
size() != np
692 || recvSizes.
size() != np
693 || recvOffsets.
size() != np
697 <<
"Size of sendSize " << sendSizes.
size()
698 <<
", sendOffsets " << sendOffsets.
size()
699 <<
", recvSizes " << recvSizes.
size()
700 <<
" or recvOffsets " << recvOffsets.
size()
701 <<
" is not equal to the number of processors in the domain "
708 if (recvSizes[0] != sendSizes[0])
711 <<
"Bytes to send " << sendSizes[0]
712 <<
" does not equal bytes to receive " << recvSizes[0]
715 std::memmove(recvData, &sendData[sendOffsets[0]], recvSizes[0]);
725 const_cast<char*
>(sendData),
726 const_cast<int*
>(sendSizes.
begin()),
727 const_cast<int*
>(sendOffsets.
begin()),
730 const_cast<int*
>(recvSizes.
begin()),
731 const_cast<int*
>(recvOffsets.
begin()),
738 <<
"MPI_Alltoallv failed for sendSizes " << sendSizes
739 <<
" recvSizes " << recvSizes
751 const char* sendData,
757 const label communicator
760 label np = nProcs(communicator);
765 && (recvSizes.
size() != np || recvOffsets.
size() < np)
772 <<
"Size of recvSizes " << recvSizes.
size()
773 <<
" or recvOffsets " << recvOffsets.
size()
774 <<
" is not equal to the number of processors in the domain "
781 std::memmove(recvData, sendData, sendSize);
791 const_cast<char*
>(sendData),
795 const_cast<int*
>(recvSizes.
begin()),
796 const_cast<int*
>(recvOffsets.
begin()),
804 <<
"MPI_Gatherv failed for sendSize " << sendSize
805 <<
" recvSizes " << recvSizes
806 <<
" communicator " << communicator
817 const char* sendData,
818 const UList<int>& sendSizes,
819 const UList<int>& sendOffsets,
823 const label communicator
826 label np = nProcs(communicator);
831 && (sendSizes.size() != np || sendOffsets.size() != np)
835 <<
"Size of sendSizes " << sendSizes.size()
836 <<
" or sendOffsets " << sendOffsets.size()
837 <<
" is not equal to the number of processors in the domain "
844 std::memmove(recvData, sendData, recvSize);
854 const_cast<char*
>(sendData),
855 const_cast<int*
>(sendSizes.begin()),
856 const_cast<int*
>(sendOffsets.begin()),
867 <<
"MPI_Scatterv failed for sendSizes " << sendSizes
868 <<
" sendOffsets " << sendOffsets
869 <<
" communicator " << communicator
878 void Foam::UPstream::allocatePstreamCommunicator
880 const label parentIndex,
887 MPI_Group newGroup = MPI_GROUP_NULL;
889 MPI_Comm newComm = MPI_COMM_NULL;
895 <<
"PstreamGlobals out of sync with UPstream data. Problem."
900 if (parentIndex == -1)
907 <<
"world communicator should always be index "
924 procIDs_[index].setSize(numProcs);
925 forAll(procIDs_[index], i)
927 procIDs_[index][i] = i;
936 procIDs_[index].size(),
937 procIDs_[index].
begin(),
951 myProcNo_[index] = -1;
966 <<
" when allocating communicator at " << index
967 <<
" from ranks " << procIDs_[index]
968 <<
" of parent " << parentIndex
969 <<
" cannot find my own rank"
977 void Foam::UPstream::freePstreamCommunicator(
const label communicator)
1014 Pout<<
"UPstream::waitRequests : starting wait for "
1016 <<
" outstanding requests starting at " << start <<
endl;
1021 SubList<MPI_Request> waitRequests
1034 waitRequests.size(),
1035 waitRequests.begin(),
1041 <<
"MPI_Waitall returned with error" <<
Foam::endl;
1046 resetRequests(start);
1051 Pout<<
"UPstream::waitRequests : finished wait." <<
endl;
1060 Pout<<
"UPstream::waitRequest : starting wait for request:" << i
1068 <<
" outstanding send requests and you are asking for i=" << i
1070 <<
"Maybe you are mixing blocking/non-blocking comms?"
1086 <<
"MPI_Wait returned with error" <<
Foam::endl;
1095 Pout<<
"UPstream::waitRequest : finished wait for request:" << i
1105 Pout<<
"UPstream::finishedRequest : checking request:" << i
1113 <<
" outstanding send requests and you are asking for i=" << i
1115 <<
"Maybe you are mixing blocking/non-blocking comms?"
1129 Pout<<
"UPstream::finishedRequest : finished request:" << i
1157 Pout<<
"UPstream::allocateTag " <<
s
1186 Pout<<
"UPstream::allocateTag " <<
s
1205 Pout<<
"UPstream::freeTag " <<
s <<
" tag:" << tag <<
endl;
1221 Pout<<
"UPstream::freeTag " <<
s <<
" tag:" << tag <<
endl;
int debug
Static debugging option.
static int allocateTag(const char *)
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
static void resetRequests(const label sz)
Truncate number of outstanding requests.
static void addWaitTime()
Add time increment to waitTime.
static void printStack(Ostream &os)
Helper function to print a stack.
A class for handling words, derived from Foam::string.
constexpr auto begin(C &c) -> decltype(c.begin())
Return iterator to the beginning of the container c.
DynamicList< label > freedRequests_
gmvFile<< "tracers "<< particles.size()<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().x()<< " ";}gmvFile<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().y()<< " ";}gmvFile<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().z()<< " ";}gmvFile<< nl;forAll(lagrangianScalarNames, i){ word name=lagrangianScalarNames[i];IOField< scalar > s(IOobject(name, runTime.timeName(), cloud::prefix, mesh, IOobject::MUST_READ, IOobject::NO_WRITE))
List< T > values(const HashTable< T, Key, Hash > &tbl, const bool doSort=false)
List of values from HashTable, optionally sorted.
bool read(const char *buf, int32_t &val)
Same as readInt32.
static bool & parRun()
Is this a parallel run?
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
static void beginTiming()
Update timer prior to measurement.
DynamicList< MPI_Request > outstandingRequests_
Outstanding non-blocking operations.
static void abort()
Call MPI_Abort with no other checks or cleanup.
Ostream & endl(Ostream &os)
Add newline and flush stream.
prefixOSstream Pout
An Ostream wrapper for parallel output to std::cout.
static void allToAll(const labelUList &sendData, labelUList &recvData, const label communicator=0)
Exchange label with all processors (in the communicator).
static void gather(const char *sendData, int sendSize, char *recvData, const UList< int > &recvSizes, const UList< int > &recvOffsets, const label communicator=0)
Receive data from all processors on the master.
iterator begin()
Return an iterator to begin traversing the UList.
string getEnv(const std::string &envName)
Get environment value for given envName.
#define forAll(list, i)
Loop across all elements in list.
static void addScatterTime()
Add time increment to scatterTime.
DynamicList< MPI_Comm > MPICommunicators_
void allReduce(Type &Value, int count, MPI_Datatype MPIType, MPI_Op op, const BinaryOp &bop, const int tag, const label communicator)
DynamicList< MPI_Group > MPIGroups_
void reduce(const List< UPstream::commsStruct > &comms, T &Value, const BinaryOp &bop, const int tag, const label comm)
static const int mpiBufferSize
MPI buffer-size (bytes)
static void addGatherTime()
Add time increment to gatherTime.
DynamicList< T, SizeMin > & append(const T &val)
Append an element to the end of this list.
static void waitRequest(const label i)
Wait until request i has finished.
static void addValidParOptions(HashTable< string > &validParOptions)
static void scatter(const char *sendData, const UList< int > &sendSizes, const UList< int > &sendOffsets, char *recvData, int recvSize, const label communicator=0)
Send data to all processors from the root of the communicator.
errorManip< error > abort(error &err)
static void detachOurBuffers()
Inter-processor communication reduction functions.
Various functions to wrap MPI_Allreduce.
errorManipArg< error, int > exit(error &err, const int errNo=1)
static bool master(const label communicator=0)
Am I the master process.
static void addAllToAllTime()
Add time increment to allToAllTime.
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
static label nRequests()
Get number of outstanding requests.
Vector2D< scalar > vector2D
A 2D vector of scalars obtained from the generic Vector2D.
static bool initNull()
Special purpose initialisation function.
static label worldComm
Default communicator (all processors)
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
static void shutdown(int errNo=0)
Shutdown (finalize) MPI as required.
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished?
T remove()
Remove and return the last element. Fatal on an empty list.
DynamicList< int > freedTags_
Free'd message tags.
void size(const label n) noexcept
Override size to be inconsistent with allocated storage.
static void attachOurBuffers()
void sumReduce(T &Value, label &Count, const int tag=Pstream::msgType(), const label comm=UPstream::worldComm)
Helper class for allocating/freeing communicators.
UList< label > labelUList
A UList of labels.
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
int nTags_
Max outstanding message tag operations.
#define WarningInFunction
Report a warning using Foam::Warning.
static void exit(int errNo=1)
Shutdown (finalize) MPI as required and exit program with errNo.
static void freeTag(const char *, const int tag)