FreeFOAM The Cross-Platform CFD Toolkit
mpiPstreamImpl.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | Copyright (C) 1991-2010 OpenCFD Ltd.
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9  This file is part of OpenFOAM.
10 
11  OpenFOAM is free software: you can redistribute it and/or modify it
12  under the terms of the GNU General Public License as published by
13  the Free Software Foundation, either version 3 of the License, or
14  (at your option) any later version.
15 
16  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19  for more details.
20 
21  You should have received a copy of the GNU General Public License
22  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23 
24 \*---------------------------------------------------------------------------*/
25 
26 #include "mpi.h"
27 
28 #include "mpiPstreamImpl.H"
29 #include <OpenFOAM/Pstream.H>
30 #include <OpenFOAM/OSspecific.H>
32 
33 #include <cstring>
34 #include <cstdlib>
35 #include <csignal>
36 
37 #if defined(SP)
38 # define MPI_SCALAR MPI_FLOAT
39 #elif defined(DP)
40 # define MPI_SCALAR MPI_DOUBLE
41 #endif
42 
43 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
44 
45 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
46 
47 namespace Foam
48 {
49 
50 defineTypeNameAndDebug(mpiPstreamImpl, 0);
51 addToRunTimeSelectionTable(PstreamImpl, mpiPstreamImpl, dictionary);
52 
53 }
54 
55 
56 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
57 
58 // NOTE:
59 // valid parallel options vary between implementations, but flag common ones.
60 // if they are not removed by MPI_Init(), the subsequent argument processing
61 // will notice that they are wrong
63 {
64  validParOptions.insert("np", "");
65  validParOptions.insert("p4pg", "PI file");
66  validParOptions.insert("p4wd", "directory");
67  validParOptions.insert("p4amslave", "");
68  validParOptions.insert("p4yourname", "hostname");
69  validParOptions.insert("GAMMANP", "number of instances");
70  validParOptions.insert("machinefile", "machine file");
71 }
72 
73 
74 bool Foam::mpiPstreamImpl::init(int& argc, char**& argv, int& myProcNo, List<int>& procIDs, bool& isParallel)
75 {
76  MPI_Init(&argc, &argv);
77 
78  int numprocs;
79  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
80  MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo);
81 
82  if (numprocs <= 1)
83  {
84  FatalErrorIn("mpiPstreamImpl::init(int& argc, char**& argv)")
85  << "bool mpiPstreamImpl::init(int& argc, char**& argv) : "
86  "attempt to run parallel on 1 processor"
88  }
89 
90  procIDs.setSize(numprocs);
91 
92  forAll(procIDs, procNo)
93  {
94  procIDs[procNo] = procNo;
95  }
96 
97  setParRun(isParallel);
98 
99 # ifndef SGIMPI
100  //FIX <themiwi@users.sf.net>
101  // Use default bufferSize and let the user override it
102  // using $MPI_BUFFER_SIZE if she wants to.
103  int bufferSize = 20000000;
104 
105  string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
106 
107  if (bufferSizeName.size())
108  {
109  int tmpBufferSize = atoi(bufferSizeName.c_str());
110 
111  if (tmpBufferSize)
112  {
113  bufferSize = tmpBufferSize;
114  }
115  }
116  MPI_Buffer_attach(new char[bufferSize], bufferSize);
117 # endif
118 
119  int processorNameLen;
120  char processorName[MPI_MAX_PROCESSOR_NAME];
121 
122  MPI_Get_processor_name(processorName, &processorNameLen);
123 
124  //signal(SIGABRT, stop);
125 
126  // Now that nprocs is known construct communication tables.
128 
129  return true;
130 }
131 
132 
134 {
135 # ifndef SGIMPI
136  int size;
137  char* buff;
138  MPI_Buffer_detach(&buff, &size);
139  delete[] buff;
140 # endif
141 
142  if (errnum == 0)
143  {
144  MPI_Finalize();
145  ::exit(errnum);
146  }
147  else
148  {
149  MPI_Abort(MPI_COMM_WORLD, errnum);
150  }
151 }
152 
153 
155 {
156  MPI_Abort(MPI_COMM_WORLD, 1);
157 }
158 
159 void Foam::mpiPstreamImpl::reduce(scalar& Value, const sumOp<scalar>& bop)
160 {
161  if (!Pstream::parRun())
162  {
163  return;
164  }
165 
167  {
168  if (Pstream::master())
169  {
170  for
171  (
172  int slave=Pstream::firstSlave();
173  slave<=Pstream::lastSlave();
174  slave++
175  )
176  {
177  scalar value;
178 
179  if
180  (
181  MPI_Recv
182  (
183  &value,
184  1,
185  MPI_SCALAR,
186  Pstream::procID(slave),
188  MPI_COMM_WORLD,
189  MPI_STATUS_IGNORE
190  )
191  )
192  {
194  (
195  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
196  ) << "MPI_Recv failed"
198  }
199 
200  Value = bop(Value, value);
201  }
202  }
203  else
204  {
205  if
206  (
207  MPI_Send
208  (
209  &Value,
210  1,
211  MPI_SCALAR,
214  MPI_COMM_WORLD
215  )
216  )
217  {
219  (
220  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
221  ) << "MPI_Send failed"
223  }
224  }
225 
226 
227  if (Pstream::master())
228  {
229  for
230  (
231  int slave=Pstream::firstSlave();
232  slave<=Pstream::lastSlave();
233  slave++
234  )
235  {
236  if
237  (
238  MPI_Send
239  (
240  &Value,
241  1,
242  MPI_SCALAR,
243  Pstream::procID(slave),
245  MPI_COMM_WORLD
246  )
247  )
248  {
250  (
251  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
252  ) << "MPI_Send failed"
254  }
255  }
256  }
257  else
258  {
259  if
260  (
261  MPI_Recv
262  (
263  &Value,
264  1,
265  MPI_SCALAR,
268  MPI_COMM_WORLD,
269  MPI_STATUS_IGNORE
270  )
271  )
272  {
274  (
275  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
276  ) << "MPI_Recv failed"
278  }
279  }
280  }
281  else
282  {
283  scalar sum;
284  MPI_Allreduce(&Value, &sum, 1, MPI_SCALAR, MPI_SUM, MPI_COMM_WORLD);
285  Value = sum;
286 
287  /*
288  int myProcNo = Pstream::myProcNo();
289  int nProcs = Pstream::nProcs();
290 
291  //
292  // receive from children
293  //
294  int level = 1;
295  int thisLevelOffset = 2;
296  int childLevelOffset = thisLevelOffset/2;
297  int childProcId = 0;
298 
299  while
300  (
301  (childLevelOffset < nProcs)
302  && (myProcNo % thisLevelOffset) == 0
303  )
304  {
305  childProcId = myProcNo + childLevelOffset;
306 
307  scalar value;
308 
309  if (childProcId < nProcs)
310  {
311  if
312  (
313  MPI_Recv
314  (
315  &value,
316  1,
317  MPI_SCALAR,
318  Pstream::procID(childProcId),
319  Pstream::msgType(),
320  MPI_COMM_WORLD,
321  MPI_STATUS_IGNORE
322  )
323  )
324  {
325  FatalErrorIn
326  (
327  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
328  ) << "MPI_Recv failed"
329  << Foam::abort(FatalError);
330  }
331 
332  Value = bop(Value, value);
333  }
334 
335  level++;
336  thisLevelOffset <<= 1;
337  childLevelOffset = thisLevelOffset/2;
338  }
339 
340  //
341  // send and receive from parent
342  //
343  if (!Pstream::master())
344  {
345  int parentId = myProcNo - (myProcNo % thisLevelOffset);
346 
347  if
348  (
349  MPI_Send
350  (
351  &Value,
352  1,
353  MPI_SCALAR,
354  Pstream::procID(parentId),
355  Pstream::msgType(),
356  MPI_COMM_WORLD
357  )
358  )
359  {
360  FatalErrorIn
361  (
362  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
363  ) << "MPI_Send failed"
364  << Foam::abort(FatalError);
365  }
366 
367  if
368  (
369  MPI_Recv
370  (
371  &Value,
372  1,
373  MPI_SCALAR,
374  Pstream::procID(parentId),
375  Pstream::msgType(),
376  MPI_COMM_WORLD,
377  MPI_STATUS_IGNORE
378  )
379  )
380  {
381  FatalErrorIn
382  (
383  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
384  ) << "MPI_Recv failed"
385  << Foam::abort(FatalError);
386  }
387  }
388 
389 
390  //
391  // distribute to my children
392  //
393  level--;
394  thisLevelOffset >>= 1;
395  childLevelOffset = thisLevelOffset/2;
396 
397  while (level > 0)
398  {
399  childProcId = myProcNo + childLevelOffset;
400 
401  if (childProcId < nProcs)
402  {
403  if
404  (
405  MPI_Send
406  (
407  &Value,
408  1,
409  MPI_SCALAR,
410  Pstream::procID(childProcId),
411  Pstream::msgType(),
412  MPI_COMM_WORLD
413  )
414  )
415  {
416  FatalErrorIn
417  (
418  "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
419  ) << "MPI_Send failed"
420  << Foam::abort(FatalError);
421  }
422  }
423 
424  level--;
425  thisLevelOffset >>= 1;
426  childLevelOffset = thisLevelOffset/2;
427  }
428  */
429  }
430 }
431 
432 
433 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
434 
435 // ************************ vim: set sw=4 sts=4 et: ************************ //