/* --------------------------------------------------------------- * pi_send.c * FILES: pi_send.c, dboard.c, make.pi.c * DESCRIPTION: MPI pi calculation example program. C Version. * This program calculates pi using a "dartboard" algorithm. See * Fox et al.(1988) Solving Problems on Concurrent Processors, vol.1 * page 207. All processes contribute to the calculation, with the * master averaging the values for pi. * * SPMD version: Conditional statements check if the process * is the master or a worker. * * This version uses low level sends and receives to collect results * * AUTHOR: Roslyn Leibensperger * REVISED: 09/15/93 for latest API changes. BMB * 01/10/94 changed API to MPL. SMJP * 05/18/94 replaced blocking with non-blocking send. RYL * CONVERTED TO MPI: 11/12/94 by Xainneng Shen * --------------------------------------------------------------- */ #include #include #include "mpi.h" double dboard (int darts); #define DARTS 5000 /* number of throws at dartboard */ #define ROUNDS 10 /* number of times "darts" is iterated */ #define MASTER 0 /* task ID of master task */ MPI_Status status; MPI_Request request; main(int argc, char **argv) { double homepi, /* value of pi calculated by current task */ pi, /* average of pi after "darts" is thrown */ avepi, /* average pi value for all iterations */ pirecv, /* pi received from worker */ pisum; /* sum of workers pi values */ int mytid, /* task ID - also used as seed number */ nproc, /* number of tasks */ source, /* source of incoming message */ mtype, /* message type */ msgid, /* message identifier */ nbytes, /* size of message */ rcode, /* return code */ i, n; /* Obtain number of tasks and task ID */ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &mytid); MPI_Comm_size(MPI_COMM_WORLD, &nproc); printf ("MPI task ID = %d\n", mytid); /* Set seed for random number generator equal to task ID */ srandom (mytid); avepi = 0; for (i = 0; i < ROUNDS; i++) { /* All tasks calculate pi using dartboard algorithm */ homepi = dboard(DARTS); /* Workers send homepi to master */ /* - Message type will be set to the iteration count */ /* - A non-blocking send is followed by mpi_wait */ /* this is safe programming practice */ if (mytid != MASTER) { mtype = i; MPI_Isend(&homepi, 1, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &request); MPI_Wait(&request, &status); } /* Master receives messages from all workers */ /* - Message type will be set to the iteration count */ /* a message can be received from any task, as long as the */ /* message types match */ /* - The return code will be checked, and a message displayed */ /* if a problem occurred */ else { mtype = i; pisum = 0; for (n = 1; n < nproc; n++) { MPI_Recv(&pirecv, 1, MPI_DOUBLE, MPI_ANY_SOURCE, mtype, MPI_COMM_WORLD, &status); /* keep running total of pi */ pisum = pisum + pirecv; } /* Master calculates the average value of pi for this iteration */ pi = (pisum + homepi)/nproc; /* Master calculates the average value of pi over all iterations */ avepi = ((avepi * i) + pi)/(i + 1); printf(" After %3d throws, average value of pi = %10.8f\n", (DARTS * (i + 1)),avepi); } } MPI_Finalize(); }