/******************************************************** % % Written by: % -- % John L. Weatherwax 2006-08-07 % % email: wax@alum.mit.edu % % Please send comments and especially bug reports to the % above email address. % %----- */ #include #include #include "mpi.h" #include "global_grid.h" #include "sim_consts.h" #include "exportForces.h" /* */ void exportForces(void){ const int TAG_OFFSET=1*9+1; MPI_Status status; /* Send NW <-> Receive SE */ /*printf("Send NW <-> Receive SE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[NW], 2*procInfo.numToReceive[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+0, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[SE], 2*procInfo.numToExport[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+0, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[SE], 2*procInfo.numToExport[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+0, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[NW], 2*procInfo.numToReceive[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+0, MPI_COMM_WORLD); } /*printf("NW=%d; N=%d; NE=%d; W=%d; I=%d; E=%d; SW=%d; S=%d; SE=%d\n",NW,N,NE,W,I,E,SW,S,SE); */ /* if( procInfo.my_rank==2 ){ printf("NW<->SE: NW = %d; procInfo.forcesOnExportedPart[2][0]=%20.8f; procInfo.forcesOnReceivedPart[2][1]=%20.8f\n", NW,procInfo.forcesOnExportedPart[2][0],procInfo.forcesOnExportedPart[2][1]); }*/ /* Send N <-> Receive S */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[N], 2*procInfo.numToReceive[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+1, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[S], 2*procInfo.numToExport[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+1, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[S], 2*procInfo.numToExport[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+1, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[N], 2*procInfo.numToReceive[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+1, MPI_COMM_WORLD); } /* if( procInfo.my_rank==2 ){ printf("N<->S: procInfo.forcesOnExportedPart[1][0]=%20.8f; procInfo.forcesOnReceivedPart[1][1]=%20.8f\n", procInfo.forcesOnExportedPart[1][0],procInfo.forcesOnExportedPart[1][1]); }*/ /* Send NE <-> Receive SW */ /*printf("Send NE <-> Receive SW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[NE], 2*procInfo.numToReceive[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+2, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[SW], 2*procInfo.numToExport[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+2, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[SW], 2*procInfo.numToExport[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+2, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[NE], 2*procInfo.numToReceive[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+2, MPI_COMM_WORLD); } /* Send W <-> Receive E */ /* if( procInfo.my_rank==3 ){ printf("before send: procInfo.forcesOnReceivedPart[3][0]=%20.8f; procInfo.forcesOnReceivedPart[3][1]=%20.8f\n", procInfo.forcesOnReceivedPart[3][0],procInfo.forcesOnReceivedPart[3][1]); printf("procInfo.numToReceive[W]=%d\n",procInfo.numToReceive[W]); } if( procInfo.my_rank==2 ){ printf("procInfo.numToExport[E]=%d\n",procInfo.numToExport[E]); } */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[W], 2*procInfo.numToReceive[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+3, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[E], 2*procInfo.numToExport[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+3, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[E], 2*procInfo.numToExport[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+3, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[W], 2*procInfo.numToReceive[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+3, MPI_COMM_WORLD); } /* if( procInfo.my_rank==2 ){ printf("procInfo.forcesOnExportedPart[3][0]=%20.8f; procInfo.forcesOnReceivedPart[3][1]=%20.8f\n", procInfo.forcesOnExportedPart[3][0],procInfo.forcesOnExportedPart[3][1]); } */ /* No sending to the IDENTITY */ /* Send E <-> Receive W */ if( procInfo.my_col % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[E], 2*procInfo.numToReceive[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+5, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[W], 2*procInfo.numToExport[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+5, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[W], 2*procInfo.numToExport[W], MPI_DOUBLE, procInfo.neighbor[W], TAG_OFFSET+5, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[E], 2*procInfo.numToReceive[E], MPI_DOUBLE, procInfo.neighbor[E], TAG_OFFSET+5, MPI_COMM_WORLD); } /* Send SW <-> Receive NE */ /*printf("Send SW <-> Receive NE\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[SW], 2*procInfo.numToReceive[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+6, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[NE], 2*procInfo.numToExport[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+6, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[NE], 2*procInfo.numToExport[NE], MPI_DOUBLE, procInfo.neighbor[NE], TAG_OFFSET+6, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[SW], 2*procInfo.numToReceive[SW], MPI_DOUBLE, procInfo.neighbor[SW], TAG_OFFSET+6, MPI_COMM_WORLD); } /* Send S <-> Receive N */ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[S], 2*procInfo.numToReceive[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+7, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[N], 2*procInfo.numToExport[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+7, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[N], 2*procInfo.numToExport[N], MPI_DOUBLE, procInfo.neighbor[N], TAG_OFFSET+7, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[S], 2*procInfo.numToReceive[S], MPI_DOUBLE, procInfo.neighbor[S], TAG_OFFSET+7, MPI_COMM_WORLD); } /* Send SE <-> Receive NW */ /*printf("Send SE <-> Receive NW\n");*/ if( procInfo.my_row % 2 == 0 ){ /* these processors will SEND and then RECEIVE */ MPI_Send(procInfo.forcesOnReceivedPart[SE], 2*procInfo.numToReceive[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+8, MPI_COMM_WORLD); MPI_Recv(procInfo.forcesOnExportedPart[NW], 2*procInfo.numToExport[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+8, MPI_COMM_WORLD, &status); }else{ /* these processors will RECEIVE and then SEND */ MPI_Recv(procInfo.forcesOnExportedPart[NW], 2*procInfo.numToExport[NW], MPI_DOUBLE, procInfo.neighbor[NW], TAG_OFFSET+8, MPI_COMM_WORLD, &status); MPI_Send(procInfo.forcesOnReceivedPart[SE], 2*procInfo.numToReceive[SE], MPI_DOUBLE, procInfo.neighbor[SE], TAG_OFFSET+8, MPI_COMM_WORLD); } }