6 #include "GmshConfig.h"
10 #define MPI_GMSH_COMPUTE_VIEW 1
11 #define MPI_GMSH_DATA_READY 2
12 #define MPI_GMSH_VARRAY 3
13 #define MPI_GMSH_VARRAY_LEN 4
14 #define MPI_GMSH_SHUTDOWN 5
15 #define MPI_GMSH_PARSE_STRING 6
16 #define MPI_GMSH_MERGE_FILE 7
22 #if defined(HAVE_ONELAB) && defined(HAVE_POST)
34 static void ComputeAndSendVertexArrays(
GmshClient *client,
bool compute =
true)
36 for(std::size_t i = 0; i <
PView::list.size(); i++) {
48 for(
int type = 0; type < 4; type++) {
64 static void ComputeAndSendVertexArrays()
72 MPI_Send(&nbArrays, 1, MPI_INT, 0, MPI_GMSH_DATA_READY, MPI_COMM_WORLD);
74 for(std::size_t i = 0; i <
PView::list.size(); i++) {
85 for(
int type = 0; type < 4; type++) {
92 MPI_Send(&len, 1, MPI_INT, 0, MPI_GMSH_VARRAY_LEN, MPI_COMM_WORLD);
93 MPI_Send(str, len, MPI_CHAR, 0, MPI_GMSH_VARRAY, MPI_COMM_WORLD);
101 static void AddToVertexArrays(
int length,
const char *bytes,
int swap)
104 int num, type, numSteps;
105 double min, max, time, xmin, ymin, zmin, xmax, ymax, zmax;
107 numSteps, time, xmin, ymin, zmin, xmax, ymax, zmax);
126 if(type == 4) type = 2;
134 static void GatherAndSendVertexArrays(
GmshClient *client,
bool swap)
136 #if defined(HAVE_MPI)
140 int mpi_msg = MPI_GMSH_COMPUTE_VIEW;
141 MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
143 for(std::size_t i = 0; i <
PView::list.size(); i++)
146 for(
int i = 0; i < nbDaemon - 1; i++) {
149 MPI_Recv(&nbArrays, 1, MPI_INT, MPI_ANY_SOURCE, MPI_GMSH_DATA_READY,
150 MPI_COMM_WORLD, &status);
154 for(
int j = 0; j < nbArrays; j++) {
157 MPI_Recv(&len, 1, MPI_INT, status.MPI_SOURCE, MPI_GMSH_VARRAY_LEN,
158 MPI_COMM_WORLD, &status2);
160 MPI_Recv(str, len, MPI_CHAR, status.MPI_SOURCE, MPI_GMSH_VARRAY,
161 MPI_COMM_WORLD, &status2);
162 AddToVertexArrays(len, str,
swap);
165 ComputeAndSendVertexArrays(client,
false);
176 if(!client && rank == 0)
return 0;
178 if(client && nbDaemon < 2)
179 ComputeAndSendVertexArrays(client);
180 else if(client && nbDaemon >= 2 && rank == 0)
181 GatherAndSendVertexArrays(client,
false);
187 int ret = client->
Select(300, 0);
189 client->
Info(
"Timeout: stopping remote Gmsh...");
193 client->
Error(
"Error on select: stopping remote Gmsh...");
200 "Did not receive message header: stopping remote Gmsh...");
204 char *msg =
new char[
length + 1];
206 client->
Error(
"Did not receive message body: stopping remote Gmsh...");
212 client->
Info(
"Stopping remote Gmsh...");
218 #if !defined(HAVE_MPI)
219 ComputeAndSendVertexArrays(client);
221 int mpi_msg = MPI_GMSH_PARSE_STRING;
222 MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
223 MPI_Bcast(&
length, 1, MPI_INT, 0, MPI_COMM_WORLD);
224 MPI_Bcast(msg,
length, MPI_CHAR, 0, MPI_COMM_WORLD);
225 GatherAndSendVertexArrays(client,
swap);
230 #if !defined(HAVE_MPI)
231 ComputeAndSendVertexArrays(client);
233 int mpi_msg = MPI_GMSH_MERGE_FILE;
234 MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
235 MPI_Bcast(&
length, 1, MPI_INT, 0, MPI_COMM_WORLD);
236 MPI_Bcast(msg,
length, MPI_CHAR, 0, MPI_COMM_WORLD);
237 GatherAndSendVertexArrays(client,
swap);
242 #if defined(HAVE_MPI)
243 int mpi_msg = MPI_GMSH_PARSE_STRING;
244 MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
245 MPI_Bcast(&
length, 1, MPI_INT, 0, MPI_COMM_WORLD);
246 MPI_Bcast(msg,
length, MPI_CHAR, 0, MPI_COMM_WORLD);
250 client->
Info(
"Sending huge array");
251 std::string huge(500000000,
'a');
255 client->
Error(
"Ignoring unknown message");
261 #if defined(HAVE_MPI)
263 MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
264 if(mpi_msg == MPI_GMSH_COMPUTE_VIEW)
265 ComputeAndSendVertexArrays();
266 else if(mpi_msg == MPI_GMSH_SHUTDOWN)
268 else if(mpi_msg == MPI_GMSH_PARSE_STRING) {
270 MPI_Bcast(&
length, 1, MPI_INT, 0, MPI_COMM_WORLD);
272 MPI_Bcast(msg,
length, MPI_CHAR, 0, MPI_COMM_WORLD);
275 else if(mpi_msg == MPI_GMSH_MERGE_FILE) {
277 MPI_Bcast(&
length, 1, MPI_INT, 0, MPI_COMM_WORLD);
279 MPI_Bcast(msg,
length, MPI_CHAR, 0, MPI_COMM_WORLD);
286 #if defined(HAVE_MPI)
287 int mpi_msg = MPI_GMSH_SHUTDOWN;
288 MPI_Bcast(&mpi_msg, 1, MPI_INT, 0, MPI_COMM_WORLD);
298 Msg::Error(
"GmshRemote requires Post and ONELAB modules");