/******************************************************** % % Written by: % -- % John L. Weatherwax 2006-05-29 % % email: wax@alum.mit.edu % % Please send comments and especially bug reports to the % above email address. % %----- */ #include #include #include "mpi.h" #include "global_grid.h" #include "sim_consts.h" #include "evictParticles.h" /* */ void evictParticles(void){ const int TAG_OFFSET=3*9+1; MPI_Status status; /* Send NW <-> Receive SE */ /*printf("Send NW <-> Receive SE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[NW] , 4*procInfo.numToEvict[NW] , MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+0, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[SE], 4*procInfo.numRefugeeToReceive[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+0, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[SE], 4*procInfo.numRefugeeToReceive[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+0, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[NW] , 4*procInfo.numToEvict[NW] , MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+0, MPI_COMM_WORLD); } /* Send N <-> Receive S */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[N] , 4*procInfo.numToEvict[N] , MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+1, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[S], 4*procInfo.numRefugeeToReceive[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+1, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[S], 4*procInfo.numRefugeeToReceive[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+1, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[N] , 4*procInfo.numToEvict[N] , MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+1, MPI_COMM_WORLD); } /* Send NE <-> Receive SW */ /*printf("Send NE <-> Receive SW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[NE] , 4*procInfo.numToEvict[NE] , MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+2, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[SW], 4*procInfo.numRefugeeToReceive[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+2, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[SW], 4*procInfo.numRefugeeToReceive[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+2, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[NE] , 4*procInfo.numToEvict[NE] , MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+2, MPI_COMM_WORLD); } /* Send W <-> Receive E */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[W] , 4*procInfo.numToEvict[W] , MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+3, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[E], 4*procInfo.numRefugeeToReceive[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+3, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[E], 4*procInfo.numRefugeeToReceive[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+3, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[W] , 4*procInfo.numToEvict[W] , MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+3, MPI_COMM_WORLD); } /* No sending to the IDENTITY */ /* Send E <-> Receive W */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[E] , 4*procInfo.numToEvict[E] , MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+5, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[W], 4*procInfo.numRefugeeToReceive[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+5, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[W], 4*procInfo.numRefugeeToReceive[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+5, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[E] , 4*procInfo.numToEvict[E] , MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+5, MPI_COMM_WORLD); } /* Send SW <-> Receive NE */ /*printf("Send SW <-> Receive NE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[SW] , 4*procInfo.numToEvict[SW] , MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+6, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[NE], 4*procInfo.numRefugeeToReceive[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+6, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[NE], 4*procInfo.numRefugeeToReceive[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+6, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[SW] , 4*procInfo.numToEvict[SW] , MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+6, MPI_COMM_WORLD); } /* Send S <-> Receive N */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[S] , 4*procInfo.numToEvict[S] , MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+7, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[N], 4*procInfo.numRefugeeToReceive[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+7, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[N], 4*procInfo.numRefugeeToReceive[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+7, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[S] , 4*procInfo.numToEvict[S] , MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+7, MPI_COMM_WORLD); } /* Send SE <-> Receive NW */ /*printf("Send SE <-> Receive NW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.partDataToEvict[SE] , 4*procInfo.numToEvict[SE] , MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+8, MPI_COMM_WORLD); MPI_Recv(procInfo.partRefugeeDataToReceive[NW], 4*procInfo.numRefugeeToReceive[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+8, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.partRefugeeDataToReceive[NW], 4*procInfo.numRefugeeToReceive[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+8, MPI_COMM_WORLD, &status); MPI_Send(procInfo.partDataToEvict[SE] , 4*procInfo.numToEvict[SE] , MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+8, MPI_COMM_WORLD); } }