UPstream.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9  Copyright (C) 2016-2019 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 \*---------------------------------------------------------------------------*/
28 
29 #include "Pstream.H"
30 #include "PstreamReduceOps.H"
31 #include "PstreamGlobals.H"
32 #include "profilingPstream.H"
33 #include "SubList.H"
34 #include "allReduce.H"
35 #include "int.H"
36 #include "collatedFileOperation.H"
37 
38 #include <mpi.h>
39 #include <cstring>
40 #include <cstdlib>
41 #include <csignal>
42 
43 #if defined(WM_SP) || defined(WM_SPDP)
44  #define MPI_SCALAR MPI_FLOAT
45 #elif defined(WM_DP)
46  #define MPI_SCALAR MPI_DOUBLE
47 #endif
48 
49 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
50 
51 // The min value and default for MPI buffers length
52 constexpr int minBufLen = 20000000;
53 
54 // Track if we have attached MPI buffers
55 static bool ourBuffers = false;
56 
57 // Track if we initialized MPI
58 static bool ourMpi = false;
59 
60 
61 // * * * * * * * * * * * * * * * Local Functions * * * * * * * * * * * * * * //
62 
63 static void attachOurBuffers()
64 {
65  if (ourBuffers)
66  {
67  return; // Already attached
68  }
69  ourBuffers = true;
70 
71  // Use UPstream::mpiBufferSize (optimisationSwitch),
72  // but allow override with MPI_BUFFER_SIZE env variable (int value)
73 
74 #ifndef SGIMPI
75  int len = 0;
76 
77  const std::string str(Foam::getEnv("MPI_BUFFER_SIZE"));
78  if (str.empty() || !Foam::read(str, len) || len <= 0)
79  {
81  }
82 
83  if (len < minBufLen)
84  {
85  len = minBufLen;
86  }
87 
89  {
90  Foam::Pout<< "UPstream::init : buffer-size " << len << '\n';
91  }
92 
93  char* buf = new char[len];
94 
95  if (MPI_SUCCESS != MPI_Buffer_attach(buf, len))
96  {
97  delete[] buf;
98  Foam::Pout<< "UPstream::init : could not attach buffer\n";
99  }
100 #endif
101 }
102 
103 
104 static void detachOurBuffers()
105 {
106  if (!ourBuffers)
107  {
108  return; // Nothing to detach
109  }
110  ourBuffers = false;
111 
112  // Some MPI notes suggest that the return code is MPI_SUCCESS when
113  // no buffer is attached.
114  // Be extra careful and require a non-zero size as well.
115 
116 #ifndef SGIMPI
117  int len = 0;
118  char* buf = nullptr;
119 
120  if (MPI_SUCCESS == MPI_Buffer_detach(&buf, &len) && len)
121  {
122  delete[] buf;
123  }
124 #endif
125 }
126 
127 
128 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
129 
130 // NOTE:
131 // valid parallel options vary between implementations, but flag common ones.
132 // if they are not removed by MPI_Init(), the subsequent argument processing
133 // will notice that they are wrong
134 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
135 {
136  validParOptions.insert("np", "");
137  validParOptions.insert("p4pg", "PI file");
138  validParOptions.insert("p4wd", "directory");
139  validParOptions.insert("p4amslave", "");
140  validParOptions.insert("p4yourname", "hostname");
141  validParOptions.insert("machinefile", "machine file");
142 }
143 
144 
146 {
147  int flag = 0;
148 
149  MPI_Finalized(&flag);
150  if (flag)
151  {
152  // Already finalized - this is an error
154  << "MPI was already finalized - cannot perform MPI_Init\n"
156 
157  return false;
158  }
159 
160  MPI_Initialized(&flag);
161  if (flag)
162  {
163  if (debug)
164  {
165  Pout<< "UPstream::initNull : was already initialized\n";
166  }
167  }
168  else
169  {
170  // Not already initialized
171 
172  MPI_Init_thread
173  (
174  nullptr, // argc
175  nullptr, // argv
176  MPI_THREAD_SINGLE,
177  &flag // provided_thread_support
178  );
179 
180  ourMpi = true;
181  }
182 
183  // Could also attach buffers etc.
184 
185  return true;
186 }
187 
188 
189 bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
190 {
191  int numprocs = 0, myRank = 0;
192  int provided_thread_support = 0;
193  int flag = 0;
194 
195  MPI_Finalized(&flag);
196  if (flag)
197  {
198  // Already finalized - this is an error
200  << "MPI was already finalized - cannot perform MPI_Init" << endl
202 
203  return false;
204  }
205 
206  MPI_Initialized(&flag);
207  if (flag)
208  {
209  // Already initialized.
210  // Warn if we've called twice, but skip if initialized externally
211 
212  if (ourMpi)
213  {
215  << "MPI was already initialized - cannot perform MPI_Init" << nl
216  << "This could indicate an application programming error!"
217  << endl;
218 
219  return true;
220  }
221  else if (debug)
222  {
223  Pout<< "UPstream::init : was already initialized\n";
224  }
225  }
226  else
227  {
228  MPI_Init_thread
229  (
230  &argc,
231  &argv,
232  (
233  needsThread
234  ? MPI_THREAD_MULTIPLE
235  : MPI_THREAD_SINGLE
236  ),
237  &provided_thread_support
238  );
239 
240  ourMpi = true;
241  }
242 
243  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
244  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
245 
246  if (debug)
247  {
248  Pout<< "UPstream::init : procs=" << numprocs
249  << " rank:" << myRank << endl;
250  }
251 
252  if (numprocs <= 1)
253  {
255  << "attempt to run parallel on 1 processor"
257  }
258 
259  // Initialise parallel structure
260  setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
261 
263 
264  return true;
265 }
266 
267 
268 void Foam::UPstream::exit(int errnum)
269 {
270  if (debug)
271  {
272  Pout<< "UPstream::exit\n";
273  }
274 
275  int flag = 0;
276 
277  MPI_Initialized(&flag);
278  if (!flag)
279  {
280  // Not initialized - just exit
281  std::exit(errnum);
282  return;
283  }
284 
285  MPI_Finalized(&flag);
286  if (flag)
287  {
288  // Already finalized elsewhere?
289  if (ourMpi)
290  {
292  << "MPI was already finalized (by a connected program?)\n";
293  }
294  else if (debug)
295  {
296  Pout<< "UPstream::exit : was already finalized\n";
297  }
298  }
299  else
300  {
302  }
303 
304 
305  const label nOutstanding = PstreamGlobals::outstandingRequests_.size();
306  if (nOutstanding)
307  {
309 
311  << "There were still " << nOutstanding
312  << " outstanding MPI_Requests." << nl
313  << "Which means your code exited before doing a "
314  << " UPstream::waitRequests()." << nl
315  << "This should not happen for a normal code exit."
316  << nl;
317  }
318 
319  // Clean mpi communicators
320  forAll(myProcNo_, communicator)
321  {
322  if (myProcNo_[communicator] != -1)
323  {
324  freePstreamCommunicator(communicator);
325  }
326  }
327 
328  if (!flag)
329  {
330  // MPI not already finalized
331 
332  if (!ourMpi)
333  {
335  << "Finalizing MPI, but was initialized elsewhere\n";
336  }
337 
338  if (errnum == 0)
339  {
340  MPI_Finalize();
341  }
342  else
343  {
344  MPI_Abort(MPI_COMM_WORLD, errnum);
345  }
346  }
347 
348  std::exit(errnum);
349 }
350 
351 
353 {
354  MPI_Abort(MPI_COMM_WORLD, 1);
355 }
356 
357 
358 void Foam::reduce
359 (
360  scalar& Value,
361  const sumOp<scalar>& bop,
362  const int tag,
363  const label communicator
364 )
365 {
366  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
367  {
368  Pout<< "** reducing:" << Value << " with comm:" << communicator
369  << " warnComm:" << UPstream::warnComm
370  << endl;
372  }
373  allReduce(Value, 1, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
374 }
375 
376 
377 void Foam::reduce
378 (
379  scalar& Value,
380  const minOp<scalar>& bop,
381  const int tag,
382  const label communicator
383 )
384 {
385  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
386  {
387  Pout<< "** reducing:" << Value << " with comm:" << communicator
388  << " warnComm:" << UPstream::warnComm
389  << endl;
391  }
392  allReduce(Value, 1, MPI_SCALAR, MPI_MIN, bop, tag, communicator);
393 }
394 
395 
396 void Foam::reduce
397 (
398  vector2D& Value,
399  const sumOp<vector2D>& bop,
400  const int tag,
401  const label communicator
402 )
403 {
404  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
405  {
406  Pout<< "** reducing:" << Value << " with comm:" << communicator
407  << " warnComm:" << UPstream::warnComm
408  << endl;
410  }
411  allReduce(Value, 2, MPI_SCALAR, MPI_SUM, bop, tag, communicator);
412 }
413 
414 
415 void Foam::sumReduce
416 (
417  scalar& Value,
418  label& Count,
419  const int tag,
420  const label communicator
421 )
422 {
423  if (UPstream::warnComm != -1 && communicator != UPstream::warnComm)
424  {
425  Pout<< "** reducing:" << Value << " with comm:" << communicator
426  << " warnComm:" << UPstream::warnComm
427  << endl;
429  }
430  vector2D twoScalars(Value, scalar(Count));
431  reduce(twoScalars, sumOp<vector2D>(), tag, communicator);
432 
433  Value = twoScalars.x();
434  Count = twoScalars.y();
435 }
436 
437 
438 void Foam::reduce
439 (
440  scalar& Value,
441  const sumOp<scalar>& bop,
442  const int tag,
443  const label communicator,
444  label& requestID
445 )
446 {
447 #ifdef MPIX_COMM_TYPE_SHARED
448  // Assume mpich2 with non-blocking collectives extensions. Once mpi3
449  // is available this will change.
450  MPI_Request request;
451  scalar v = Value;
452  MPIX_Ireduce
453  (
454  &v,
455  &Value,
456  1,
457  MPI_SCALAR,
458  MPI_SUM,
459  0, //root
460  PstreamGlobals::MPICommunicators_[communicator],
461  &request
462  );
463 
464  requestID = PstreamGlobals::outstandingRequests_.size();
465  PstreamGlobals::outstandingRequests_.append(request);
466 
467  if (UPstream::debug)
468  {
469  Pout<< "UPstream::allocateRequest for non-blocking reduce"
470  << " : request:" << requestID
471  << endl;
472  }
473 #else
474  // Non-blocking not yet implemented in mpi
475  reduce(Value, bop, tag, communicator);
476  requestID = -1;
477 #endif
478 }
479 
480 
482 (
483  const labelUList& sendData,
484  labelUList& recvData,
485  const label communicator
486 )
487 {
488  label np = nProcs(communicator);
489 
490  if (sendData.size() != np || recvData.size() != np)
491  {
493  << "Size of sendData " << sendData.size()
494  << " or size of recvData " << recvData.size()
495  << " is not equal to the number of processors in the domain "
496  << np
498  }
499 
500  if (!UPstream::parRun())
501  {
502  recvData.deepCopy(sendData);
503  }
504  else
505  {
507 
508  if
509  (
510  MPI_Alltoall
511  (
512  // NOTE: const_cast is a temporary hack for
513  // backward-compatibility with versions of OpenMPI < 1.7.4
514  const_cast<label*>(sendData.begin()),
515  sizeof(label),
516  MPI_BYTE,
517  recvData.begin(),
518  sizeof(label),
519  MPI_BYTE,
521  )
522  )
523  {
525  << "MPI_Alltoall failed for " << sendData
526  << " on communicator " << communicator
528  }
529 
531  }
532 }
533 
534 
536 (
537  const char* sendData,
538  const UList<int>& sendSizes,
539  const UList<int>& sendOffsets,
540 
541  char* recvData,
542  const UList<int>& recvSizes,
543  const UList<int>& recvOffsets,
544 
545  const label communicator
546 )
547 {
548  label np = nProcs(communicator);
549 
550  if
551  (
552  sendSizes.size() != np
553  || sendOffsets.size() != np
554  || recvSizes.size() != np
555  || recvOffsets.size() != np
556  )
557  {
559  << "Size of sendSize " << sendSizes.size()
560  << ", sendOffsets " << sendOffsets.size()
561  << ", recvSizes " << recvSizes.size()
562  << " or recvOffsets " << recvOffsets.size()
563  << " is not equal to the number of processors in the domain "
564  << np
566  }
567 
568  if (!UPstream::parRun())
569  {
570  if (recvSizes[0] != sendSizes[0])
571  {
573  << "Bytes to send " << sendSizes[0]
574  << " does not equal bytes to receive " << recvSizes[0]
576  }
577  std::memmove(recvData, &sendData[sendOffsets[0]], recvSizes[0]);
578  }
579  else
580  {
582 
583  if
584  (
585  MPI_Alltoallv
586  (
587  const_cast<char*>(sendData),
588  const_cast<int*>(sendSizes.begin()),
589  const_cast<int*>(sendOffsets.begin()),
590  MPI_BYTE,
591  recvData,
592  const_cast<int*>(recvSizes.begin()),
593  const_cast<int*>(recvOffsets.begin()),
594  MPI_BYTE,
596  )
597  )
598  {
600  << "MPI_Alltoallv failed for sendSizes " << sendSizes
601  << " recvSizes " << recvSizes
602  << " communicator " << communicator
604  }
605 
607  }
608 }
609 
610 
612 (
613  const char* sendData,
614  int sendSize,
615 
616  char* recvData,
617  const UList<int>& recvSizes,
618  const UList<int>& recvOffsets,
619  const label communicator
620 )
621 {
622  label np = nProcs(communicator);
623 
624  if
625  (
626  UPstream::master(communicator)
627  && (recvSizes.size() != np || recvOffsets.size() < np)
628  )
629  {
630  // Note: allow recvOffsets to be e.g. 1 larger than np so we
631  // can easily loop over the result
632 
634  << "Size of recvSizes " << recvSizes.size()
635  << " or recvOffsets " << recvOffsets.size()
636  << " is not equal to the number of processors in the domain "
637  << np
639  }
640 
641  if (!UPstream::parRun())
642  {
643  std::memmove(recvData, sendData, sendSize);
644  }
645  else
646  {
648 
649  if
650  (
651  MPI_Gatherv
652  (
653  const_cast<char*>(sendData),
654  sendSize,
655  MPI_BYTE,
656  recvData,
657  const_cast<int*>(recvSizes.begin()),
658  const_cast<int*>(recvOffsets.begin()),
659  MPI_BYTE,
660  0,
661  MPI_Comm(PstreamGlobals::MPICommunicators_[communicator])
662  )
663  )
664  {
666  << "MPI_Gatherv failed for sendSize " << sendSize
667  << " recvSizes " << recvSizes
668  << " communicator " << communicator
670  }
671 
673  }
674 }
675 
676 
678 (
679  const char* sendData,
680  const UList<int>& sendSizes,
681  const UList<int>& sendOffsets,
682 
683  char* recvData,
684  int recvSize,
685  const label communicator
686 )
687 {
688  label np = nProcs(communicator);
689 
690  if
691  (
692  UPstream::master(communicator)
693  && (sendSizes.size() != np || sendOffsets.size() != np)
694  )
695  {
697  << "Size of sendSizes " << sendSizes.size()
698  << " or sendOffsets " << sendOffsets.size()
699  << " is not equal to the number of processors in the domain "
700  << np
702  }
703 
704  if (!UPstream::parRun())
705  {
706  std::memmove(recvData, sendData, recvSize);
707  }
708  else
709  {
711 
712  if
713  (
714  MPI_Scatterv
715  (
716  const_cast<char*>(sendData),
717  const_cast<int*>(sendSizes.begin()),
718  const_cast<int*>(sendOffsets.begin()),
719  MPI_BYTE,
720  recvData,
721  recvSize,
722  MPI_BYTE,
723  0,
724  MPI_Comm(PstreamGlobals::MPICommunicators_[communicator])
725  )
726  )
727  {
729  << "MPI_Scatterv failed for sendSizes " << sendSizes
730  << " sendOffsets " << sendOffsets
731  << " communicator " << communicator
733  }
734 
736  }
737 }
738 
739 
740 void Foam::UPstream::allocatePstreamCommunicator
741 (
742  const label parentIndex,
743  const label index
744 )
745 {
746  if (index == PstreamGlobals::MPIGroups_.size())
747  {
748  // Extend storage with dummy values
749  MPI_Group newGroup = MPI_GROUP_NULL;
750  PstreamGlobals::MPIGroups_.append(newGroup);
751  MPI_Comm newComm = MPI_COMM_NULL;
752  PstreamGlobals::MPICommunicators_.append(newComm);
753  }
754  else if (index > PstreamGlobals::MPIGroups_.size())
755  {
757  << "PstreamGlobals out of sync with UPstream data. Problem."
759  }
760 
761 
762  if (parentIndex == -1)
763  {
764  // Allocate world communicator
765 
766  if (index != UPstream::worldComm)
767  {
769  << "world communicator should always be index "
771  }
772 
773  PstreamGlobals::MPICommunicators_[index] = MPI_COMM_WORLD;
774  MPI_Comm_group(MPI_COMM_WORLD, &PstreamGlobals::MPIGroups_[index]);
775  MPI_Comm_rank
776  (
778  &myProcNo_[index]
779  );
780 
781  // Set the number of processes to the actual number
782  int numProcs;
783  MPI_Comm_size(PstreamGlobals::MPICommunicators_[index], &numProcs);
784 
785  //procIDs_[index] = identity(numProcs);
786  procIDs_[index].setSize(numProcs);
787  forAll(procIDs_[index], i)
788  {
789  procIDs_[index][i] = i;
790  }
791  }
792  else
793  {
794  // Create new group
795  MPI_Group_incl
796  (
797  PstreamGlobals::MPIGroups_[parentIndex],
798  procIDs_[index].size(),
799  procIDs_[index].begin(),
801  );
802 
803  // Create new communicator
804  MPI_Comm_create
805  (
809  );
810 
811  if (PstreamGlobals::MPICommunicators_[index] == MPI_COMM_NULL)
812  {
813  myProcNo_[index] = -1;
814  }
815  else
816  {
817  if
818  (
819  MPI_Comm_rank
820  (
822  &myProcNo_[index]
823  )
824  )
825  {
827  << "Problem :"
828  << " when allocating communicator at " << index
829  << " from ranks " << procIDs_[index]
830  << " of parent " << parentIndex
831  << " cannot find my own rank"
833  }
834  }
835  }
836 }
837 
838 
839 void Foam::UPstream::freePstreamCommunicator(const label communicator)
840 {
841  if (communicator != UPstream::worldComm)
842  {
843  if (PstreamGlobals::MPICommunicators_[communicator] != MPI_COMM_NULL)
844  {
845  // Free communicator. Sets communicator to MPI_COMM_NULL
846  MPI_Comm_free(&PstreamGlobals::MPICommunicators_[communicator]);
847  }
848  if (PstreamGlobals::MPIGroups_[communicator] != MPI_GROUP_NULL)
849  {
850  // Free greoup. Sets group to MPI_GROUP_NULL
851  MPI_Group_free(&PstreamGlobals::MPIGroups_[communicator]);
852  }
853  }
854 }
855 
856 
858 {
860 }
861 
862 
864 {
866  {
868  }
869 }
870 
871 
873 {
874  if (UPstream::debug)
875  {
876  Pout<< "UPstream::waitRequests : starting wait for "
878  << " outstanding requests starting at " << start << endl;
879  }
880 
882  {
883  SubList<MPI_Request> waitRequests
884  (
887  start
888  );
889 
891 
892  if
893  (
894  MPI_Waitall
895  (
896  waitRequests.size(),
897  waitRequests.begin(),
898  MPI_STATUSES_IGNORE
899  )
900  )
901  {
903  << "MPI_Waitall returned with error" << Foam::endl;
904  }
905 
907 
908  resetRequests(start);
909  }
910 
911  if (debug)
912  {
913  Pout<< "UPstream::waitRequests : finished wait." << endl;
914  }
915 }
916 
917 
919 {
920  if (debug)
921  {
922  Pout<< "UPstream::waitRequest : starting wait for request:" << i
923  << endl;
924  }
925 
926  if (i >= PstreamGlobals::outstandingRequests_.size())
927  {
929  << "There are " << PstreamGlobals::outstandingRequests_.size()
930  << " outstanding send requests and you are asking for i=" << i
931  << nl
932  << "Maybe you are mixing blocking/non-blocking comms?"
934  }
935 
937 
938  if
939  (
940  MPI_Wait
941  (
943  MPI_STATUS_IGNORE
944  )
945  )
946  {
948  << "MPI_Wait returned with error" << Foam::endl;
949  }
950 
952 
953  if (debug)
954  {
955  Pout<< "UPstream::waitRequest : finished wait for request:" << i
956  << endl;
957  }
958 }
959 
960 
962 {
963  if (debug)
964  {
965  Pout<< "UPstream::finishedRequest : checking request:" << i
966  << endl;
967  }
968 
969  if (i >= PstreamGlobals::outstandingRequests_.size())
970  {
972  << "There are " << PstreamGlobals::outstandingRequests_.size()
973  << " outstanding send requests and you are asking for i=" << i
974  << nl
975  << "Maybe you are mixing blocking/non-blocking comms?"
977  }
978 
979  int flag;
980  MPI_Test
981  (
983  &flag,
984  MPI_STATUS_IGNORE
985  );
986 
987  if (debug)
988  {
989  Pout<< "UPstream::finishedRequest : finished request:" << i
990  << endl;
991  }
992 
993  return flag != 0;
994 }
995 
996 
998 {
999  int tag;
1000  if (PstreamGlobals::freedTags_.size())
1001  {
1003  }
1004  else
1005  {
1006  tag = PstreamGlobals::nTags_++;
1007  }
1008 
1009  if (debug)
1010  {
1011  //if (UPstream::lateBlocking > 0)
1012  //{
1013  // string& poutp = Pout.prefix();
1014  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = 'X';
1015  // Perr.prefix() = Pout.prefix();
1016  //}
1017  Pout<< "UPstream::allocateTag " << s
1018  << " : tag:" << tag
1019  << endl;
1020  }
1021 
1022  return tag;
1023 }
1024 
1025 
1027 {
1028  int tag;
1029  if (PstreamGlobals::freedTags_.size())
1030  {
1032  }
1033  else
1034  {
1035  tag = PstreamGlobals::nTags_++;
1036  }
1037 
1038  if (debug)
1039  {
1040  //if (UPstream::lateBlocking > 0)
1041  //{
1042  // string& poutp = Pout.prefix();
1043  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = 'X';
1044  // Perr.prefix() = Pout.prefix();
1045  //}
1046  Pout<< "UPstream::allocateTag " << s
1047  << " : tag:" << tag
1048  << endl;
1049  }
1050 
1051  return tag;
1052 }
1053 
1054 
1055 void Foam::UPstream::freeTag(const char* s, const int tag)
1056 {
1057  if (debug)
1058  {
1059  //if (UPstream::lateBlocking > 0)
1060  //{
1061  // string& poutp = Pout.prefix();
1062  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = ' ';
1063  // Perr.prefix() = Pout.prefix();
1064  //}
1065  Pout<< "UPstream::freeTag " << s << " tag:" << tag << endl;
1066  }
1068 }
1069 
1070 
1071 void Foam::UPstream::freeTag(const word& s, const int tag)
1072 {
1073  if (debug)
1074  {
1075  //if (UPstream::lateBlocking > 0)
1076  //{
1077  // string& poutp = Pout.prefix();
1078  // poutp[poutp.size()-2*(UPstream::lateBlocking+2)+tag] = ' ';
1079  // Perr.prefix() = Pout.prefix();
1080  //}
1081  Pout<< "UPstream::freeTag " << s << " tag:" << tag << endl;
1082  }
1084 }
1085 
1086 
1087 // ************************************************************************* //
Foam::expressions::patchExpr::debug
int debug
Static debugging option.
Foam::UPstream::allocateTag
static int allocateTag(const char *)
Definition: UPstream.C:997
Foam::UPstream::warnComm
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
Definition: UPstream.H:288
Foam::UPstream::resetRequests
static void resetRequests(const label sz)
Truncate number of outstanding requests.
Definition: UPstream.C:158
int.H
System signed integer.
collatedFileOperation.H
Foam::profilingPstream::addWaitTime
static void addWaitTime()
Add time increment to waitTime.
Definition: profilingPstream.H:159
Foam::error::printStack
static void printStack(Ostream &os)
Helper function to print a stack.
Definition: dummyPrintStack.C:36
SubList.H
Foam::word
A class for handling words, derived from Foam::string.
Definition: word.H:62
stdFoam::begin
constexpr auto begin(C &c) -> decltype(c.begin())
Return iterator to the beginning of the container c.
Definition: stdFoam.H:91
s
gmvFile<< "tracers "<< particles.size()<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().x()<< " ";}gmvFile<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().y()<< " ";}gmvFile<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().z()<< " ";}gmvFile<< nl;forAll(lagrangianScalarNames, i){ word name=lagrangianScalarNames[i];IOField< scalar > s(IOobject(name, runTime.timeName(), cloud::prefix, mesh, IOobject::MUST_READ, IOobject::NO_WRITE))
Definition: gmvOutputSpray.H:25
Foam::UPstream::exit
static void exit(int errnum=1)
Exit program.
Definition: UPstream.C:59
Foam::read
bool read(const char *buf, int32_t &val)
Same as readInt32.
Definition: int32.H:108
Foam::UPstream::parRun
static bool & parRun()
Is this a parallel run?
Definition: UPstream.H:414
Foam::UPstream::waitRequests
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
Definition: UPstream.C:162
Foam::profilingPstream::beginTiming
static void beginTiming()
Update timer prior to measurement.
Definition: profilingPstream.H:123
Foam::PstreamGlobals::outstandingRequests_
DynamicList< MPI_Request > outstandingRequests_
Outstanding non-blocking operations.
Definition: PstreamGlobals.C:32
Foam::UPstream::abort
static void abort()
Abort program.
Definition: UPstream.C:66
Foam::endl
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:337
Foam::Pout
prefixOSstream Pout
An Ostream wrapper for parallel output to std::cout.
Foam::UPstream::allToAll
static void allToAll(const labelUList &sendData, labelUList &recvData, const label communicator=0)
Exchange label with all processors (in the communicator).
Definition: UPstream.C:100
Foam::UPstream::gather
static void gather(const char *sendData, int sendSize, char *recvData, const UList< int > &recvSizes, const UList< int > &recvOffsets, const label communicator=0)
Receive data from all processors on the master.
Definition: UPstream.C:111
Foam::UList::begin
iterator begin()
Return an iterator to begin traversing the UList.
Definition: UListI.H:276
Foam::getEnv
string getEnv(const std::string &envName)
Get environment value for given envName.
Definition: MSwindows.C:371
forAll
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:290
Foam::profilingPstream::addScatterTime
static void addScatterTime()
Add time increment to scatterTime.
Definition: profilingPstream.H:147
Foam::PstreamGlobals::MPICommunicators_
DynamicList< MPI_Comm > MPICommunicators_
Definition: PstreamGlobals.C:38
Foam::allReduce
void allReduce(Type &Value, int count, MPI_Datatype MPIType, MPI_Op op, const BinaryOp &bop, const int tag, const label communicator)
Definition: allReduceTemplates.C:36
Foam::PstreamGlobals::MPIGroups_
DynamicList< MPI_Group > MPIGroups_
Definition: PstreamGlobals.C:39
ourMpi
static bool ourMpi
Definition: UPstream.C:58
Foam::reduce
void reduce(const List< UPstream::commsStruct > &comms, T &Value, const BinaryOp &bop, const int tag, const label comm)
Definition: PstreamReduceOps.H:51
Foam::UPstream::mpiBufferSize
static const int mpiBufferSize
MPI buffer-size (bytes)
Definition: UPstream.H:282
Foam::label
intWM_LABEL_SIZE_t label
A label is an int32_t or int64_t as specified by the pre-processor macro WM_LABEL_SIZE.
Definition: label.H:62
Foam::profilingPstream::addGatherTime
static void addGatherTime()
Add time increment to gatherTime.
Definition: profilingPstream.H:141
Foam::DynamicList::append
DynamicList< T, SizeMin > & append(const T &val)
Append an element to the end of this list.
Definition: DynamicListI.H:472
Foam::UPstream::waitRequest
static void waitRequest(const label i)
Wait until request i has finished.
Definition: UPstream.C:166
Foam::UPstream::addValidParOptions
static void addValidParOptions(HashTable< string > &validParOptions)
Definition: UPstream.C:34
Foam::UPstream::scatter
static void scatter(const char *sendData, const UList< int > &sendSizes, const UList< int > &sendOffsets, char *recvData, int recvSize, const label communicator=0)
Send data to all processors from the root of the communicator.
Definition: UPstream.C:126
Foam::FatalError
error FatalError
Pstream.H
Foam::abort
errorManip< error > abort(error &err)
Definition: errorManip.H:137
detachOurBuffers
static void detachOurBuffers()
Definition: UPstream.C:104
PstreamReduceOps.H
Inter-processor communication reduction functions.
allReduce.H
Various functions to wrap MPI_Allreduce.
Foam::exit
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:130
Foam::UPstream::master
static bool master(const label communicator=0)
Am I the master process.
Definition: UPstream.H:438
PstreamGlobals.H
Foam::profilingPstream::addAllToAllTime
static void addAllToAllTime()
Add time increment to allToAllTime.
Definition: profilingPstream.H:165
FatalErrorInFunction
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:355
Foam::UPstream::nRequests
static label nRequests()
Get number of outstanding requests.
Definition: UPstream.C:152
Foam::nl
constexpr char nl
Definition: Ostream.H:372
Foam::vector2D
Vector2D< scalar > vector2D
A 2D vector of scalars obtained from the generic Vector2D.
Definition: vector2D.H:51
Foam::UPstream::initNull
static bool initNull()
Special purpose initialisation function.
Definition: UPstream.C:38
Foam::UPstream::worldComm
static label worldComm
Default communicator (all processors)
Definition: UPstream.H:285
Foam::start
label ListType::const_reference const label start
Definition: ListOps.H:408
Foam::UList
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
Definition: HashTable.H:103
Foam::UPstream::finishedRequest
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished?
Definition: UPstream.C:170
Foam::DynamicList::remove
T remove()
Remove and return the last element. Fatal on an empty list.
Definition: DynamicListI.H:651
Foam::PstreamGlobals::freedTags_
DynamicList< int > freedTags_
Free'd message tags.
Definition: PstreamGlobals.C:36
Foam::UList::size
void size(const label n) noexcept
Override size to be inconsistent with allocated storage.
Definition: UListI.H:360
ourBuffers
static bool ourBuffers
Definition: UPstream.C:55
attachOurBuffers
static void attachOurBuffers()
Definition: UPstream.C:63
minBufLen
constexpr int minBufLen
Definition: UPstream.C:52
Foam::sumReduce
void sumReduce(T &Value, label &Count, const int tag=Pstream::msgType(), const label comm=UPstream::worldComm)
Definition: PstreamReduceOps.H:133
Foam::UPstream::communicator
Helper class for allocating/freeing communicators.
Definition: UPstream.H:321
Foam::labelUList
UList< label > labelUList
A UList of labels.
Definition: UList.H:79
Foam::UPstream::init
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
Definition: UPstream.C:48
Foam::PstreamGlobals::nTags_
int nTags_
Max outstanding message tag operations.
Definition: PstreamGlobals.C:34
WarningInFunction
#define WarningInFunction
Report a warning using Foam::Warning.
Definition: messageStream.H:294
profilingPstream.H
Foam::UPstream::freeTag
static void freeTag(const char *, const int tag)
Definition: UPstream.C:1055