33template<
class SubListType>
35Foam::globalIndex::calcListOffsets
37 const List<SubListType>& lists,
38 const bool checkOverflow
43 const label len = lists.size();
50 for (label i = 0; i < len; ++i)
53 start += lists[i].size();
55 if (checkOverflow && start < values[i])
57 reportOverflowAndExit(i);
67template<
class ProcIDsContainer,
class Type>
71 const ProcIDsContainer& procIDs,
72 const Type& localValue,
97 allValues[0] = localValue;
99 for (label i = 1; i < procIDs.size(); ++i)
107 reinterpret_cast<char*
>(&allValues[i]),
115 IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
116 fromProc >> allValues[i];
130 reinterpret_cast<const char*
>(&localValue),
138 OPstream toMaster(commsType, procIDs[0], 0, tag, comm);
139 toMaster << localValue;
151template<
class ProcIDsContainer,
class Type>
156 const ProcIDsContainer& procIDs,
191 for (label i = 1; i < procIDs.size(); ++i)
195 if (procSlot.
empty())
213 IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
214 fromProc >> procSlot;
238 OPstream toMaster(commsType, procIDs[0], 0, tag, comm);
251template<
class Type,
class Addr>
305 if (!localSlot.
empty())
311 for (label i = 1; i < procIDs.
size(); ++i)
315 if (procSlot.
empty())
321 IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
322 fromProc >> procSlot;
334 OPstream toMaster(commsType, procIDs[0], 0, tag, comm);
371 UPstream::procID(comm),
385template<
class Type,
class Addr>
407 UPstream::procID(comm),
421template<
class Type,
class OutputContainer>
430 OutputContainer allData;
431 gather(sendData, allData, tag, commsType, comm);
436template<
class Type,
class Addr,
class OutputContainer>
445 OutputContainer allData;
446 gather(sendData, allData, tag, commsType, comm);
463 gather(
fld, allData, tag, commsType, comm);
467 fld.transfer(allData);
478template<
class Type,
class OutputContainer>
482 OutputContainer& allData,
521 nCmpts =
static_cast<int>(
sizeof(Type)/
sizeof(scalar));
526 nCmpts =
static_cast<int>(
sizeof(Type)/
sizeof(label));
531 nCmpts =
static_cast<int>(
sizeof(Type));
539 if (globalAddr.
totalSize() > (INT_MAX/nCmpts))
547 const label nproc = globalAddr.
nProcs();
549 allData.resize_nocopy(globalAddr.
totalSize());
552 recvOffsets.
resize(nproc+1);
554 for (label proci = 0; proci < nproc; ++proci)
556 recvCounts[proci] = globalAddr.
localSize(proci)*nCmpts;
557 recvOffsets[proci] = globalAddr.
localStart(proci)*nCmpts;
559 recvOffsets[nproc] = globalAddr.
totalSize()*nCmpts;
582 allData.data_bytes(),
591 typedef scalar cmptType;
595 reinterpret_cast<const cmptType*
>(sendData.
cdata()),
596 (sendData.
size()*nCmpts),
597 reinterpret_cast<cmptType*
>(allData.data()),
606 typedef label cmptType;
610 reinterpret_cast<const cmptType*
>(sendData.
cdata()),
611 (sendData.
size()*nCmpts),
612 reinterpret_cast<cmptType*
>(allData.data()),
625 UPstream::procID(comm),
642template<
class Type,
class OutputContainer>
652 OutputContainer allData;
653 mpiGather(sendData, allData, comm, commsType, tag);
671 mpiGather(
fld, allData, comm, commsType, tag);
675 fld.transfer(allData);
686template<
class Type,
class OutputContainer>
690 OutputContainer& allData,
701 .
mpiGather(sendData, allData, comm, commsType, tag);
711template<
class Type,
class OutputContainer>
721 OutputContainer allData;
722 mpiGatherOp(sendData, allData, comm, commsType, tag);
740 mpiGatherOp(
fld, allData, comm, commsType, tag);
744 fld.transfer(allData);
769 .
gather(sendData, allData, tag, commsType, comm);
779template<
class Type,
class Addr>
793 .
gather(sendData, allData, tag, commsType, comm);
803template<
class Type,
class OutputContainer>
812 OutputContainer allData;
813 gatherOp(sendData, allData, tag, commsType, comm);
818template<
class Type,
class Addr,
class OutputContainer>
827 OutputContainer allData;
828 gatherOp(sendData, allData, tag, commsType, comm);
852template<
class ProcIDsContainer,
class Type>
857 const ProcIDsContainer& procIDs,
882 for (label i = 1; i < procIDs.size(); ++i)
884 const SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
886 if (procSlot.
empty())
904 OPstream toProc(commsType, procIDs[i], 0, tag, comm);
941 IPstream fromMaster(commsType, procIDs[0], 0, tag, comm);
970 UPstream::procID(comm),
986template<
class Type,
class OutputContainer>
1000 const label localLen
1002 UPstream::listScatterValues<label>(this->localSizes(), comm)
1005 OutputContainer localData(localLen);
1006 this->scatter(allData, localData, tag, commsType, comm);
1013 return OutputContainer(allData);
1018template<
class Type,
class CombineOp>
1023 const CombineOp& cop,
1030 if (globalIds.
size())
1038 bin(offsets(), globalIds, order, validBins)
1044 for (
const auto proci : validBins)
1048 for (label& val : localIDs)
1050 val = toLocal(proci, val);
1061 for (
const int proci : sendBufs.
allProcs())
1079 for (
const auto proci : validBins)
1081 label start = bins.
offsets()[proci];
1085 bins.
offsets()[proci+1]-start,
Info<< nl<< "Wrote faMesh in vtk format: "<< writer.output().name()<< nl;}{ vtk::lineWriter writer(aMesh.points(), aMesh.edges(), fileName(aMesh.mesh().time().globalPath()/"finiteArea-edges"));writer.writeGeometry();writer.beginCellData(4);writer.writeProcIDs();{ Field< scalar > fld(faMeshTools::flattenEdgeField(aMesh.magLe(), true))
A packed storage unstructured matrix of objects of type <T> using an offset table for access.
const labelList & offsets() const noexcept
Return the offset table (= size()+1)
A 1D vector of objects of type <T> that resizes itself as necessary to accept the new objects.
Input inter-processor communications stream.
Base for lists with indirect addressing, templated on the list contents type and the addressing type....
label size() const noexcept
The number of elements in the list.
void resize_nocopy(const label len)
Adjust allocated size of list without necessarily.
void resize(const label len)
Adjust allocated size of list.
void clear()
Clear the list, i.e. set size to zero.
Output inter-processor communications stream.
unsigned int get() const
Get value as unsigned, no range-checking.
Buffers for inter-processor communications streams (UOPstream, UIPstream).
UPstream::rangeType allProcs() const noexcept
Range of ranks indices associated with PstreamBuffers.
label nProcs() const noexcept
Number of ranks associated with PstreamBuffers.
label recvDataCount(const label proci) const
void finishedSends(const bool wait=true)
Mark sends as done.
virtual bool read()
Re-read model coefficients if they have changed.
A List obtained as a section of another List.
A List with indirect addressing. Like IndirectList but does not store addressing.
char * data_bytes() noexcept
Return pointer to the underlying array serving as data storage,.
void deepCopy(const UList< T > &list)
Copy elements of the given UList. Sizes must match!
bool empty() const noexcept
True if the UList is empty (ie, size() is zero)
const T * cdata() const noexcept
Return pointer to the underlying array serving as data storage.
const char * cdata_bytes() const noexcept
Return pointer to the underlying array serving as data storage,.
void size(const label n)
Older name for setAddressableSize.
std::streamsize size_bytes() const noexcept
Number of contiguous bytes for the List data.
T & last()
Return the last element of the list.
commsTypes
Types of communications.
@ nonBlocking
"nonBlocking"
static void gather(const char *sendData, int sendCount, char *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, const label communicator=worldComm)
Receive variable length char data from all ranks.
static label nRequests()
Get number of outstanding requests.
static bool broadcast(char *buf, const std::streamsize bufSize, const label communicator=worldComm, const int rootProcNo=masterNo())
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
static bool & parRun() noexcept
Test if this a parallel run.
virtual bool write()
Write the output fields.
Calculates a unique integer (label so might not have enough room - 2G max) for processor + local inde...
static void gatherInplaceOp(List< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm)
Inplace collect data in processor order on master (in serial: a no-op).
static void mpiGatherInplaceOp(List< Type > &fld, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType())
Use MPI_Gatherv call to inplace collect contiguous data when possible. (in serial: a no-op).
label localSize() const
My local size.
labelRange range() const
Return start/size range of local processor data.
label localStart() const
My local start.
label nProcs() const noexcept
The number of processors covered by the offsets.
static void gather(const labelUList &offsets, const label comm, const ProcIDsContainer &procIDs, const UList< Type > &fld, List< Type > &allFld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Collect data in processor order on master (== procIDs[0]).
void gatherInplace(List< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm) const
Inplace collect data in processor order on master (in serial: a no-op).
static void scatter(const labelUList &offsets, const label comm, const ProcIDsContainer &procIDs, const UList< Type > &allFld, UList< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Distribute data in processor order.
void mpiGather(const UList< Type > &sendData, OutputContainer &allData, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType()) const
Use MPI_Gatherv call for contiguous data when possible (in serial: performs a simple copy).
static void mpiGatherOp(const UList< Type > &sendData, OutputContainer &allData, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType())
Use MPI_Gatherv call to collect contiguous data when possible (in serial: performs a simple copy).
label totalSize() const
Global sum of localSizes.
static void gatherOp(const UList< Type > &sendData, List< Type > &allData, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm)
Collect data in processor order on master (in serial: performs a simple copy).
static void gatherValues(const label comm, const ProcIDsContainer &procIDs, const Type &localValue, List< Type > &allValues, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Which processor does global id come from?
void mpiGatherInplace(List< Type > &fld, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType()) const
Use MPI_Gatherv call to inplace collect contiguous data when possible. (in serial: a no-op).
int myProcNo() const noexcept
Return processor number.
splitCell * master() const
OBJstream os(runTime.globalPath()/outputName)
List< T > values(const HashTable< T, Key, Hash > &tbl, const bool doSort=false)
List of values from HashTable, optionally sorted.
List< label > labelList
A List of labels.
A template class to specify if a data type is composed solely of Foam::label elements.
A template class to specify if a data type is composed solely of Foam::scalar elements.
A template class to specify that a data type can be considered as being contiguous in memory.