gatherScatterList.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9  Copyright (C) 2015-2021 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 Description
28  Gather data from all processors onto single processor according to some
29  communication schedule (usually linear-to-master or tree-to-master).
30  The gathered data will be a list with element procID the data from processor
31  procID. Before calling every processor should insert its value into
32  Values[UPstream::myProcNo(comm)].
33  Note: after gather every processor only knows its own data and that of the
34  processors below it. Only the 'master' of the communication schedule holds
35  a fully filled List. Use scatter to distribute the data.
36 
37 \*---------------------------------------------------------------------------*/
38 
39 #include "IPstream.H"
40 #include "OPstream.H"
41 #include "contiguous.H"
42 
43 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
44 
45 namespace Foam
46 {
47 
48 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
49 
50 template<class T>
52 (
53  const List<UPstream::commsStruct>& comms,
54  List<T>& Values,
55  const int tag,
56  const label comm
57 )
58 {
59  if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
60  {
61  if (Values.size() != UPstream::nProcs(comm))
62  {
64  << "Size of list:" << Values.size()
65  << " does not equal the number of processors:"
66  << UPstream::nProcs(comm)
68  }
69 
70  // Get my communication order
71  const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
72 
73  // Receive from my downstairs neighbours
74  forAll(myComm.below(), belowI)
75  {
76  label belowID = myComm.below()[belowI];
77  const labelList& belowLeaves = comms[belowID].allBelow();
78 
80  {
81  List<T> receivedValues(belowLeaves.size() + 1);
82 
84  (
86  belowID,
87  receivedValues.data_bytes(),
88  receivedValues.size_bytes(),
89  tag,
90  comm
91  );
92 
93  Values[belowID] = receivedValues[0];
94 
95  forAll(belowLeaves, leafI)
96  {
97  Values[belowLeaves[leafI]] = receivedValues[leafI + 1];
98  }
99  }
100  else
101  {
102  IPstream fromBelow
103  (
105  belowID,
106  0,
107  tag,
108  comm
109  );
110  fromBelow >> Values[belowID];
111 
112  if (debug & 2)
113  {
114  Pout<< " received through "
115  << belowID << " data from:" << belowID
116  << " data:" << Values[belowID] << endl;
117  }
118 
119  // Receive from all other processors below belowID
120  forAll(belowLeaves, leafI)
121  {
122  label leafID = belowLeaves[leafI];
123  fromBelow >> Values[leafID];
124 
125  if (debug & 2)
126  {
127  Pout<< " received through "
128  << belowID << " data from:" << leafID
129  << " data:" << Values[leafID] << endl;
130  }
131  }
132  }
133  }
134 
135  // Send up from Values:
136  // - my own value first
137  // - all belowLeaves next
138  if (myComm.above() != -1)
139  {
140  const labelList& belowLeaves = myComm.allBelow();
141 
142  if (debug & 2)
143  {
144  Pout<< " sending to " << myComm.above()
145  << " data from me:" << UPstream::myProcNo(comm)
146  << " data:" << Values[UPstream::myProcNo(comm)] << endl;
147  }
148 
150  {
151  List<T> sendingValues(belowLeaves.size() + 1);
152  sendingValues[0] = Values[UPstream::myProcNo(comm)];
153 
154  forAll(belowLeaves, leafI)
155  {
156  sendingValues[leafI + 1] = Values[belowLeaves[leafI]];
157  }
158 
160  (
162  myComm.above(),
163  sendingValues.cdata_bytes(),
164  sendingValues.size_bytes(),
165  tag,
166  comm
167  );
168  }
169  else
170  {
171  OPstream toAbove
172  (
174  myComm.above(),
175  0,
176  tag,
177  comm
178  );
179  toAbove << Values[UPstream::myProcNo(comm)];
180 
181  forAll(belowLeaves, leafI)
182  {
183  label leafID = belowLeaves[leafI];
184 
185  if (debug & 2)
186  {
187  Pout<< " sending to "
188  << myComm.above() << " data from:" << leafID
189  << " data:" << Values[leafID] << endl;
190  }
191  toAbove << Values[leafID];
192  }
193  }
194  }
195  }
196 }
197 
198 
199 template<class T>
200 void Pstream::gatherList(List<T>& Values, const int tag, const label comm)
201 {
203  {
204  gatherList(UPstream::linearCommunication(comm), Values, tag, comm);
205  }
206  else
207  {
208  gatherList(UPstream::treeCommunication(comm), Values, tag, comm);
209  }
210 }
211 
212 
213 template<class T>
215 (
216  const List<UPstream::commsStruct>& comms,
217  List<T>& Values,
218  const int tag,
219  const label comm
220 )
221 {
222  if (UPstream::parRun() && UPstream::nProcs(comm) > 1)
223  {
224  if (Values.size() != UPstream::nProcs(comm))
225  {
227  << "Size of list:" << Values.size()
228  << " does not equal the number of processors:"
229  << UPstream::nProcs(comm)
231  }
232 
233  // Get my communication order
234  const commsStruct& myComm = comms[UPstream::myProcNo(comm)];
235 
236  // Receive from up
237  if (myComm.above() != -1)
238  {
239  const labelList& notBelowLeaves = myComm.allNotBelow();
240 
242  {
243  List<T> receivedValues(notBelowLeaves.size());
244 
246  (
248  myComm.above(),
249  receivedValues.data_bytes(),
250  receivedValues.size_bytes(),
251  tag,
252  comm
253  );
254 
255  forAll(notBelowLeaves, leafI)
256  {
257  Values[notBelowLeaves[leafI]] = receivedValues[leafI];
258  }
259  }
260  else
261  {
262  IPstream fromAbove
263  (
265  myComm.above(),
266  0,
267  tag,
268  comm
269  );
270 
271  forAll(notBelowLeaves, leafI)
272  {
273  label leafID = notBelowLeaves[leafI];
274  fromAbove >> Values[leafID];
275 
276  if (debug)
277  {
278  Pout<< " received through "
279  << myComm.above() << " data for:" << leafID
280  << " data:" << Values[leafID] << endl;
281  }
282  }
283  }
284  }
285 
286  // Send to my downstairs neighbours
287  forAllReverse(myComm.below(), belowI)
288  {
289  label belowID = myComm.below()[belowI];
290  const labelList& notBelowLeaves = comms[belowID].allNotBelow();
291 
293  {
294  List<T> sendingValues(notBelowLeaves.size());
295 
296  forAll(notBelowLeaves, leafI)
297  {
298  sendingValues[leafI] = Values[notBelowLeaves[leafI]];
299  }
300 
302  (
304  belowID,
305  sendingValues.cdata_bytes(),
306  sendingValues.size_bytes(),
307  tag,
308  comm
309  );
310  }
311  else
312  {
313  OPstream toBelow
314  (
316  belowID,
317  0,
318  tag,
319  comm
320  );
321 
322  // Send data destined for all other processors below belowID
323  forAll(notBelowLeaves, leafI)
324  {
325  label leafID = notBelowLeaves[leafI];
326  toBelow << Values[leafID];
327 
328  if (debug)
329  {
330  Pout<< " sent through "
331  << belowID << " data for:" << leafID
332  << " data:" << Values[leafID] << endl;
333  }
334  }
335  }
336  }
337  }
338 }
339 
340 
341 template<class T>
342 void Pstream::scatterList(List<T>& Values, const int tag, const label comm)
343 {
345  {
346  scatterList(UPstream::linearCommunication(comm), Values, tag, comm);
347  }
348  else
349  {
350  scatterList(UPstream::treeCommunication(comm), Values, tag, comm);
351  }
352 }
353 
354 
355 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
356 
357 } // End namespace Foam
358 
359 // ************************************************************************* //
Foam::expressions::patchExpr::debug
int debug
Static debugging option.
Foam::UPstream::linearCommunication
static const List< commsStruct > & linearCommunication(const label communicator=worldComm)
Communication schedule for linear all-to-master (proc 0)
Definition: UPstream.H:523
Foam::UOPstream::write
static bool write(const commsTypes commsType, const int toProcNo, const char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label communicator=UPstream::worldComm)
Write given buffer to given processor.
Definition: UOPwrite.C:36
Foam::OPstream
Output inter-processor communications stream.
Definition: OPstream.H:53
Foam::Pstream::scatterList
static void scatterList(const List< commsStruct > &comms, List< T > &Values, const int tag, const label comm)
Scatter data. Reverse of gatherList.
Definition: gatherScatterList.C:215
OPstream.H
Foam::endl
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:369
Foam::Pout
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.
Foam::UPstream::commsStruct::below
const labelList & below() const noexcept
Definition: UPstream.H:135
Foam::UIPstream::read
static label read(const commsTypes commsType, const int fromProcNo, char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label communicator=UPstream::worldComm)
Read into given buffer from given processor.
Definition: UIPread.C:81
forAll
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:296
IPstream.H
Foam::UPstream::commsStruct::allNotBelow
const labelList & allNotBelow() const noexcept
Definition: UPstream.H:145
Foam::FatalError
error FatalError
Foam
Namespace for OpenFOAM.
Definition: atmBoundaryLayer.C:33
Foam::abort
errorManip< error > abort(error &err)
Definition: errorManip.H:144
Foam::UPstream::commsStruct
Structure for communicating between processors.
Definition: UPstream.H:83
Foam::UPstream::nProcsSimpleSum
static int nProcsSimpleSum
Definition: UPstream.H:278
FatalErrorInFunction
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:453
Foam::Pstream::gatherList
static void gatherList(const List< commsStruct > &comms, List< T > &Values, const int tag, const label comm)
Gather data but keep individual values separate.
Definition: gatherScatterList.C:52
Foam::UPstream::myProcNo
static int myProcNo(const label communicator=worldComm)
Number of this process (starting from masterNo() = 0)
Definition: UPstream.H:463
Foam::UPstream::commsTypes::scheduled
contiguous.H
Foam::UPstream::parRun
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:433
Foam::List
A 1D array of objects of type <T>, where the size of the vector is known and used for subscript bound...
Definition: BitOps.H:63
Foam::UPstream::commsStruct::allBelow
const labelList & allBelow() const noexcept
Definition: UPstream.H:140
Foam::UPstream::commsStruct::above
label above() const noexcept
Definition: UPstream.H:130
forAllReverse
#define forAllReverse(list, i)
Reverse loop across all elements in list.
Definition: stdFoam.H:309
Foam::IPstream
Input inter-processor communications stream.
Definition: IPstream.H:53
Foam::UPstream::treeCommunication
static const List< commsStruct > & treeCommunication(const label communicator=worldComm)
Communication schedule for tree all-to-master (proc 0)
Definition: UPstream.H:532
Foam::UPstream::nProcs
static label nProcs(const label communicator=worldComm)
Number of processes in parallel run, and 1 for serial run.
Definition: UPstream.H:445
Foam::is_contiguous
A template class to specify that a data type can be considered as being contiguous in memory.
Definition: contiguous.H:75