00001 /* 00002 * *************************************************************************** 00003 * MALOC = < Minimal Abstraction Layer for Object-oriented C > 00004 * Copyright (C) 1994--2008 Michael Holst 00005 * 00006 * This library is free software; you can redistribute it and/or 00007 * modify it under the terms of the GNU Lesser General Public 00008 * License as published by the Free Software Foundation; either 00009 * version 2.1 of the License, or (at your option) any later version. 00010 * 00011 * This library is distributed in the hope that it will be useful, 00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00014 * Lesser General Public License for more details. 00015 * 00016 * You should have received a copy of the GNU Lesser General Public 00017 * License along with this library; if not, write to the Free Software 00018 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 00019 * 00020 * rcsid="$Id: vmpi.c,v 1.21 2008/03/12 05:13:58 fetk Exp $" 00021 * *************************************************************************** 00022 */ 00023 00024 /* 00025 * *************************************************************************** 00026 * File: vmpi.c 00027 * 00028 * Purpose: Class Vmpi: methods. 00029 * 00030 * Notes: Class Vmpi is a thin object-oriented Clean C layer on top of the 00031 * MPI communication library. Vmpi provides access to the minimal 00032 * set of ten MPI primitives required to implement the Bank-Holst 00033 * parallel adaptive algorithm, using either the Bank-Holst Oracle 00034 * library, or directly. 00035 * 00036 * Vmpi is also just about the smallest possible communication 00037 * library that still contains just enough functionality to solve 00038 * an elliptic equation in parallel using a typical parallel 00039 * algorithm such as domain decomposition or parallel multigrid. 00040 * 00041 * This minimal functionality is provided by Vmpi in ten core 00042 * methods: startup, shutdown, size, rank, send, receive, 00043 * broadcast, reduce, barrier, and non-blocking send (sometimes 00044 * very useful). That is it, and it all fits in about 90 lines 00045 * of object-oriented Clean C code (not counting comments...). 00046 * To fit into such a small amount of code, Vmpi assumes that the 00047 * user does all of his own datapacking into pure character arrays. 00048 * The best way to make use of Vmpi is to pack every message into 00049 * a character buffer in XDR format (RPC's answer to 00050 * machine-independent binary format). You can accomplish this 00051 * e.g. using the Vio library that sits at the bottom of MC. 00052 * 00053 * The 6 primary MPI routines (according to Ian Foster): 00054 * ----------------------------------------------------- 00055 * 00056 * MPI_Init : Initiate an MPI computation 00057 * MPI_Finalize : Terminate a computation 00058 * MPI_Comm_size : Determine number of processes 00059 * MPI_Comm_rank : Determine my process identifier 00060 * MPI_Send : Send a message (blocking) 00061 * MPI_Recv : Receive a message (blocking) 00062 * 00063 * The 4 additional useful MPI routines (according to me): 00064 * ------------------------------------------------------- 00065 * 00066 * MPI_Barrier : Synchronizes all processes 00067 * MPI_Bcast : Sends data from one process to all processes 00068 * MPI_Reduce : Sums, etc., distributed data (result in root) 00069 * MPI_Isend : Send a message (non-blocking) 00070 * 00071 * Author: Michael Holst 00072 * *************************************************************************** 00073 */ 00074 00075 #include "vmpi_p.h" 00076 00077 #if defined(HAVE_MPI_H) 00078 # include <mpi.h> 00079 #endif 00080 00081 VEMBED(rcsid="$Id: vmpi.c,v 1.21 2008/03/12 05:13:58 fetk Exp $") 00082 00083 /* 00084 * *************************************************************************** 00085 * Class Vmpi: Inlineable methods 00086 * *************************************************************************** 00087 */ 00088 #if !defined(VINLINE_MALOC) 00089 00090 #endif /* if !defined(VINLINE_MALOC) */ 00091 /* 00092 * *************************************************************************** 00093 * Class Vmpi: Non-inlineable methods 00094 * *************************************************************************** 00095 */ 00096 00097 /* 00098 * *************************************************************************** 00099 * Routine: Vmpi_init 00100 * 00101 * Purpose: The Vmp initializer. 00102 * 00103 * Author: Michael Holst 00104 * *************************************************************************** 00105 */ 00106 VPUBLIC int Vmpi_init(int *argc, char ***argv) 00107 { 00108 #if defined(HAVE_MPI_H) 00109 return (MPI_SUCCESS == MPI_Init(argc,argv)); 00110 #else 00111 return 1; 00112 #endif 00113 } 00114 00115 /* 00116 * *************************************************************************** 00117 * Routine: Vmpi_finalize 00118 * 00119 * Purpose: The Vmp finalizerr. 00120 * 00121 * Author: Michael Holst 00122 * *************************************************************************** 00123 */ 00124 VPUBLIC int Vmpi_finalize(void) 00125 { 00126 #if defined(HAVE_MPI_H) 00127 return (MPI_SUCCESS == MPI_Finalize()); 00128 #else 00129 return 1; 00130 #endif 00131 } 00132 00133 /* 00134 * *************************************************************************** 00135 * Routine: Vmpi_ctor 00136 * 00137 * Purpose: The Vmpi constructor. 00138 * 00139 * Author: Michael Holst 00140 * *************************************************************************** 00141 */ 00142 VPUBLIC Vmpi* Vmpi_ctor(void) 00143 { 00144 #if defined(HAVE_MPI_H) 00145 int dummy; 00146 #endif 00147 Vmpi *thee = VNULL; 00148 VDEBUGIO("Vmpi_ctor: CREATING object.."); 00149 thee = Vmem_malloc( VNULL, 1, sizeof(Vmpi) ); 00150 thee->mpi_rank = 0; 00151 thee->mpi_size = 0; 00152 #if defined(HAVE_MPI_H) 00153 VASSERT( MPI_SUCCESS == MPI_Initialized(&dummy) ); 00154 VASSERT( MPI_SUCCESS == MPI_Comm_rank(MPI_COMM_WORLD, &(thee->mpi_rank)) ); 00155 VASSERT( MPI_SUCCESS == MPI_Comm_size(MPI_COMM_WORLD, &(thee->mpi_size)) ); 00156 Vnm_setIoTag(thee->mpi_rank, thee->mpi_size); 00157 Vnm_print(2,"Vmpi_ctor: process %d of %d is ALIVE!\n", 00158 thee->mpi_rank, thee->mpi_size); 00159 #endif 00160 VDEBUGIO("..done.\n"); 00161 return thee; 00162 } 00163 00164 /* 00165 * *************************************************************************** 00166 * Routine: Vmpi_dtor 00167 * 00168 * Purpose: The Vmpi destructor. 00169 * 00170 * Author: Michael Holst 00171 * *************************************************************************** 00172 */ 00173 VPUBLIC void Vmpi_dtor(Vmpi **thee) 00174 { 00175 VDEBUGIO("Vmpi_dtor: DESTROYING object.."); 00176 Vmem_free( VNULL, 1, sizeof(Vmpi), (void**)thee ); 00177 #if defined(HAVE_MPI_H) 00178 #if 0 00179 VASSERT( MPI_SUCCESS == MPI_Finalize() ); 00180 #endif 00181 #endif 00182 VDEBUGIO("..done.\n"); 00183 } 00184 00185 /* 00186 * *************************************************************************** 00187 * Routine: Vmpi_rank 00188 * 00189 * Purpose: Return my processor ID. 00190 * 00191 * Author: Michael Holst 00192 * *************************************************************************** 00193 */ 00194 VPUBLIC int Vmpi_rank(Vmpi *thee) 00195 { 00196 return thee->mpi_rank; 00197 } 00198 00199 /* 00200 * *************************************************************************** 00201 * Routine: Vmpi_size 00202 * 00203 * Purpose: Return the number of processors involved. 00204 * 00205 * Author: Michael Holst 00206 * *************************************************************************** 00207 */ 00208 VPUBLIC int Vmpi_size(Vmpi *thee) 00209 { 00210 return thee->mpi_size; 00211 } 00212 00213 /* 00214 * *************************************************************************** 00215 * Routine: Vmpi_barr 00216 * 00217 * Purpose: An MPI barrier. 00218 * 00219 * Author: Michael Holst 00220 * *************************************************************************** 00221 */ 00222 VPUBLIC int Vmpi_barr(Vmpi *thee) 00223 { 00224 int rc=0; 00225 #if defined(HAVE_MPI_H) 00226 rc = (MPI_SUCCESS == MPI_Barrier(MPI_COMM_WORLD)); 00227 #endif 00228 return rc; 00229 } 00230 00231 /* 00232 * *************************************************************************** 00233 * Routine: Vmpi_send 00234 * 00235 * Purpose: An MPI blocking send. 00236 * 00237 * Author: Michael Holst 00238 * *************************************************************************** 00239 */ 00240 VPUBLIC int Vmpi_send(Vmpi *thee, int des, char *buf, int bufsize) 00241 { 00242 int rc=0; 00243 #if defined(HAVE_MPI_H) 00244 int tag=0; 00245 rc = (MPI_SUCCESS == MPI_Send(buf,bufsize,MPI_CHAR,des,tag,MPI_COMM_WORLD)); 00246 #endif 00247 return rc; 00248 } 00249 00250 /* 00251 * *************************************************************************** 00252 * Routine: Vmpi_recv 00253 * 00254 * Purpose: An MPI blocking receive. 00255 * 00256 * Author: Michael Holst 00257 * *************************************************************************** 00258 */ 00259 VPUBLIC int Vmpi_recv(Vmpi *thee, int src, char *buf, int bufsize) 00260 { 00261 int rc=0; 00262 #if defined(HAVE_MPI_H) 00263 int rsrc=0; 00264 int tag=0; 00265 MPI_Status stat; 00266 if (src < 0) { 00267 rsrc = MPI_ANY_SOURCE; 00268 } else { 00269 rsrc = src; 00270 } 00271 rc = (MPI_SUCCESS == MPI_Recv(buf,bufsize,MPI_CHAR,rsrc,tag, 00272 MPI_COMM_WORLD,&stat)); 00273 #endif 00274 return rc; 00275 } 00276 00277 /* 00278 * *************************************************************************** 00279 * Routine: Vmpi_bcast 00280 * 00281 * Purpose: An MPI broadcast. 00282 * 00283 * Author: Michael Holst 00284 * *************************************************************************** 00285 */ 00286 VPUBLIC int Vmpi_bcast(Vmpi *thee, char *buf, int bufsize) 00287 { 00288 int rc=0; 00289 #if defined(HAVE_MPI_H) 00290 rc = (MPI_SUCCESS == MPI_Bcast(buf,bufsize,MPI_CHAR,0,MPI_COMM_WORLD)); 00291 #endif 00292 return rc; 00293 } 00294 00295 /* 00296 * *************************************************************************** 00297 * Routine: Vmpi_reduce 00298 * 00299 * Purpose: An MPI reduce. 00300 * 00301 * Author: Michael Holst 00302 * *************************************************************************** 00303 */ 00304 VPUBLIC int Vmpi_reduce(Vmpi *thee, char *sbuf, char *rbuf, int bufsize) 00305 { 00306 int rc=0; 00307 #if defined(HAVE_MPI_H) 00308 rc = (MPI_SUCCESS == 00309 MPI_Reduce(sbuf,rbuf,bufsize,MPI_CHAR,MPI_SUM,0,MPI_COMM_WORLD)); 00310 #endif 00311 return rc; 00312 } 00313 00314 /* 00315 * *************************************************************************** 00316 * Routine: Vmpi_isend 00317 * 00318 * Purpose: An MPI non-blocking send. 00319 * 00320 * Author: Michael Holst 00321 * *************************************************************************** 00322 */ 00323 VPUBLIC int Vmpi_isend(Vmpi *thee, int des, char *buf, int bufsize) 00324 { 00325 int rc=0; 00326 #if defined(HAVE_MPI_H) 00327 int tag=0; 00328 MPI_Request requ; 00329 rc = (MPI_SUCCESS == MPI_Isend(buf,bufsize,MPI_CHAR,des,tag,MPI_COMM_WORLD, 00330 &requ)); 00331 #endif 00332 return rc; 00333 } 00334