/******************************************************** % % Written by: % -- % John L. Weatherwax 2006-05-29 % % email: wax@alum.mit.edu % % Please send comments and especially bug reports to the % above email address. % %----- */ #include #include #include "mpi.h" #include "global_grid.h" #include "sim_consts.h" #include "exportParticles.h" /* */ void exportParticles(void){ const int TAG_OFFSET=4*9+1; MPI_Status status; /* Send NW <-> Receive SE */ /*printf("Send NW <-> Receive SE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[NW] , 2*procInfo.numToExport[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+0, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[SE], 2*procInfo.numToReceive[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+0, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[SE], 2*procInfo.numToReceive[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+0, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[NW] , 2*procInfo.numToExport[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+0, MPI_COMM_WORLD); } /* Send N <-> Receive S */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[N], 2*procInfo.numToExport[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+1, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[S], 2*procInfo.numToReceive[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+1, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[S], 2*procInfo.numToReceive[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+1, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[N], 2*procInfo.numToExport[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+1, MPI_COMM_WORLD); } /* Send NE <-> Receive SW */ /*printf("Send NE <-> Receive SW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[NE], 2*procInfo.numToExport[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+2, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[SW], 2*procInfo.numToReceive[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+2, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[SW], 2*procInfo.numToReceive[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+2, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[NE], 2*procInfo.numToExport[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+2, MPI_COMM_WORLD); } /* Send W <-> Receive E */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[W], 2*procInfo.numToExport[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+3, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[E], 2*procInfo.numToReceive[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+3, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[E], 2*procInfo.numToReceive[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+3, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[W], 2*procInfo.numToExport[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+3, MPI_COMM_WORLD); } /* No sending to the IDENTITY */ /* Send E <-> Receive W */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[E], 2*procInfo.numToExport[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+5, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[W], 2*procInfo.numToReceive[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+5, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[W], 2*procInfo.numToReceive[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+5, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[E], 2*procInfo.numToExport[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+5, MPI_COMM_WORLD); } /* Send SW <-> Receive NE */ /*printf("Send SW <-> Receive NE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[SW], 2*procInfo.numToExport[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+6, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[NE], 2*procInfo.numToReceive[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+6, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[NE], 2*procInfo.numToReceive[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+6, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[SW], 2*procInfo.numToExport[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+6, MPI_COMM_WORLD); } /* Send S <-> Receive N */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[S], 2*procInfo.numToExport[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+7, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[N], 2*procInfo.numToReceive[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+7, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[N], 2*procInfo.numToReceive[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+7, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[S], 2*procInfo.numToExport[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+7, MPI_COMM_WORLD); } /* Send SE <-> Receive NW */ /*printf("Send SE <-> Receive NW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToExport[SE], 2*procInfo.numToExport[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+8, MPI_COMM_WORLD); MPI_Recv(procInfo.partDataToReceive[NW], 2*procInfo.numToReceive[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+8, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partDataToReceive[NW], 2*procInfo.numToReceive[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+8, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToExport[SE], 2*procInfo.numToExport[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+8, MPI_COMM_WORLD); } }