/****************************************************************************** * FILE: mpi_mm.c * DESCRIPTION: * MPI Matrix Multiply - C Version * In this code, the master task distributes a matrix multiply * operation to numtasks-1 worker tasks. * Original Source: https://computing.llnl.gov/tutorials/mpi/exercise.html#Exercise1 * How to Compile and Run @euclid: [FROM any 103ws??] scp * ucycsu02@euclid.cyi.ac.cy:~/epl372 [FROM any 103ws??] ssh ucycsu02@euclid.cyi.ac.cy [ucycsu02@euclid]$ cd epl372 [ucycsu02@euclid epl372]$ module load goolf/1.6.10 [ucycsu02@euclid epl372]$ mpicc mpi_mm.c -o mpi_mm.out [ucycsu02@euclid epl372]$ sbatch mpirun.sh [ucycsu02@euclid epl372]$ squeue #!/bin/bash #SBATCH --job-name=MPI_MM #SBATCH --nodes=2 #SBATCH --ntasks-per-node=5 #SBATCH --time=00:05:00 #SBATCH --error=mpi_mm.error.out #SBATCH --output=mpi_mm.output.out module load goolf/1.6.10 mpirun -np 8 ./mpi_mm.out **** ALWAYS Compare the results with the Serial Version to VERIFY ***** ******************************************************************************/ #include "mpi.h" #include #include #define NRA 10 /* number of rows in matrix A */ #define NCA 10 /* number of columns in matrix A */ #define NCB 10 /* number of columns in matrix B */ #define MASTER 0 /* taskid of first task */ #define FROM_MASTER 1 /* setting a message type */ #define FROM_WORKER 2 /* setting a message type */ int main (int argc, char *argv[]) { int numtasks, /* number of tasks in partition */ taskid, /* a task identifier */ numworkers, /* number of worker tasks */ source, /* task id of message source */ dest, /* task id of message destination */ mtype, /* message type */ rows, /* rows of matrix A sent to each worker */ averow, extra, offset, /* used to determine rows sent to each worker */ i, j, k, rc; /* misc */ int len; char hostname[MPI_MAX_PROCESSOR_NAME]; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ MPI_Status status; MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD,&taskid); MPI_Comm_size(MPI_COMM_WORLD,&numtasks); MPI_Get_processor_name(hostname, &len); if (numtasks < 2 ) { printf("Need at least two MPI tasks. Quitting...\n"); MPI_Abort(MPI_COMM_WORLD, rc); exit(1); } numworkers = numtasks-1; /**************************** master task ************************************/ if (taskid == MASTER) { printf("mpi_mm has started with %d tasks.\n",numtasks); printf("Initializing arrays...A[%d][%d]*B[%d][%d]=C[%d][%d]\n",NRA,NCA,NCA,NCB,NRA,NCB); for (i=0; i MASTER) { mtype = FROM_MASTER; MPI_Recv(&offset, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); MPI_Recv(&rows, 1, MPI_INT, MASTER, mtype, MPI_COMM_WORLD, &status); MPI_Recv(&a, rows*NCA, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); //MPI_Recv(&b, NCA*NCB, MPI_DOUBLE, MASTER, mtype, MPI_COMM_WORLD, &status); /* Or you can Boadcast matrix B. SAME LINE AS MASTER (Source) */ MPI_Bcast(&b, NCA*NCB, MPI_DOUBLE, MASTER, MPI_COMM_WORLD); printf("Task %d on %s is working on offset %d, rows %d:\n",taskid, hostname, offset,rows); for (k=0; k