/******************************************************** % % Written by: % -- % John L. Weatherwax 2006-05-29 % % email: wax@alum.mit.edu % % Please send comments and especially bug reports to the % above email address. % %----- */ #include #include #include "mpi.h" #include "global_grid.h" #include "sim_consts.h" #include "exhangeNumberOfParticlesToSend.h" /* pass the number of particles each processor will obtain in various MPI_Send sweeps ... this is required so that each processor can create the space for all the particles that it will be receiving */ void exhangeNumberOfParticlesToSend(void){ const int TAG_OFFSET=0; MPI_Status status; /* Send NW <-> Receive SE */ /*printf("Send NW <-> Receive SE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[NW],1,MPI_INT,procInfo.neighbor[NW],TAG_OFFSET+0,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[SE],1,MPI_INT,procInfo.neighbor[SE],TAG_OFFSET+0,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[SE],1,MPI_INT,procInfo.neighbor[SE],TAG_OFFSET+0,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[NW],1,MPI_INT,procInfo.neighbor[NW],TAG_OFFSET+0,MPI_COMM_WORLD); } /* Send N <-> Receive S */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[N],1,MPI_INT,procInfo.neighbor[N],TAG_OFFSET+1,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[S],1,MPI_INT,procInfo.neighbor[S],TAG_OFFSET+1,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[S],1,MPI_INT,procInfo.neighbor[S],TAG_OFFSET+1,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[N],1,MPI_INT,procInfo.neighbor[N],TAG_OFFSET+1,MPI_COMM_WORLD); } /* Send NE <-> Receive SW */ /*printf("Send NE <-> Receive SW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[NE],1,MPI_INT,procInfo.neighbor[NE],TAG_OFFSET+2,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[SW],1,MPI_INT,procInfo.neighbor[SW],TAG_OFFSET+2,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[SW],1,MPI_INT,procInfo.neighbor[SW],TAG_OFFSET+2,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[NE],1,MPI_INT,procInfo.neighbor[NE],TAG_OFFSET+2,MPI_COMM_WORLD); } /* Send W <-> Receive E */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[W],1,MPI_INT,procInfo.neighbor[W],TAG_OFFSET+3,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[E],1,MPI_INT,procInfo.neighbor[E],TAG_OFFSET+3,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[E],1,MPI_INT,procInfo.neighbor[E],TAG_OFFSET+3,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[W],1,MPI_INT,procInfo.neighbor[W],TAG_OFFSET+3,MPI_COMM_WORLD); } /* No sending to the IDENTITY */ /* Send E <-> Receive W */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[E],1,MPI_INT,procInfo.neighbor[E],TAG_OFFSET+5,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[W],1,MPI_INT,procInfo.neighbor[W],TAG_OFFSET+5,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[W],1,MPI_INT,procInfo.neighbor[W],TAG_OFFSET+5,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[E],1,MPI_INT,procInfo.neighbor[E],TAG_OFFSET+5,MPI_COMM_WORLD); } /* Send SW <-> Receive NE */ /*printf("Send SW <-> Receive NE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[SW],1,MPI_INT,procInfo.neighbor[SW],TAG_OFFSET+6,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[NE],1,MPI_INT,procInfo.neighbor[NE],TAG_OFFSET+6,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[NE],1,MPI_INT,procInfo.neighbor[NE],TAG_OFFSET+6,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[SW],1,MPI_INT,procInfo.neighbor[SW],TAG_OFFSET+6,MPI_COMM_WORLD); } /* Send S <-> Receive N */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[S],1,MPI_INT,procInfo.neighbor[S],TAG_OFFSET+7,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[N],1,MPI_INT,procInfo.neighbor[N],TAG_OFFSET+7,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[N],1,MPI_INT,procInfo.neighbor[N],TAG_OFFSET+7,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[S],1,MPI_INT,procInfo.neighbor[S],TAG_OFFSET+7,MPI_COMM_WORLD); } /* Send SE <-> Receive NW */ /*printf("Send SE <-> Receive NW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(&procInfo.numToExport[SE],1,MPI_INT,procInfo.neighbor[SE],TAG_OFFSET+8,MPI_COMM_WORLD); MPI_Recv(&procInfo.numToReceive[NW],1,MPI_INT,procInfo.neighbor[NW],TAG_OFFSET+8,MPI_COMM_WORLD,&status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(&procInfo.numToReceive[NW],1,MPI_INT,procInfo.neighbor[NW],TAG_OFFSET+8,MPI_COMM_WORLD,&status); MPI_Send(&procInfo.numToExport[SE],1,MPI_INT,procInfo.neighbor[SE],TAG_OFFSET+8,MPI_COMM_WORLD); } }