- 12th May 2019
- 00:08 am
- Adan Salman
Challenge - C Program that does Old-Even sort with MPI (i.e. cluster computing)
Program -
#include #include #include #include #define N 400 void swap(int* x, int* y) { int swp = *x; *x = *y; *y = swp; } void print_array(int *array, int n) { // print array's content 10 elements per row for (int i = 0; i < n; i++) { if (i % 10 == 0) printf("\n"); printf("%d, ", array[i]); } printf("\n"); } // serial selection sort void sort(int *array, int n) { for (int i = 0; i < n - 1; i++) { int k = i; for (int j = i + 1; j < n; j++) if (array[k] > array[j]) k = j; swap(&array[i], &array[k]); } } int main(int argc, char **argv) { // Initialize the MPI environment MPI_Init(&argc, &argv); // Get the number of processes int world_size; MPI_Comm_size(MPI_COMM_WORLD, &world_size); // Get the rank of the process int world_rank; MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); // a pointer to the array to be sorted int *array = NULL; if (world_rank == 0) // root process { srand(42); // seed random number generator array = malloc(sizeof(int) * N); // populate array with random numbers in -100 .. 100 range for (int i = 0; i < N; i++) { array[i] = rand() % 200 - 100; } printf("\n\nBefore sorting: "); print_array(array, N); } // the number of array elements per process int chunk_size = N / world_size; // allocate memory for 2 arrays: one to hold a local // copy of the array chunk and // another for a neighbor's local copy int *local_array = malloc(sizeof(int) * chunk_size); int *other_array = malloc(sizeof(int) * chunk_size); // distribute array chunks between processes MPI_Scatter(array, chunk_size, MPI_INT, local_array, chunk_size, MPI_INT, 0, MPI_COMM_WORLD); for (int k = 0; k < world_size; k++) { // sort local array sort(local_array, chunk_size); // find the neighbor process for current (k-th) phase int other_rank = (k % 2 == world_rank % 2) ? world_rank + 1 : world_rank - 1; // skipping invalid neighbor rank if (other_rank < 0 || other_rank >= world_size) continue; // exchange local array data with neighbor (even process sends first) if (world_rank % 2 == 0) { MPI_Send(local_array, chunk_size, MPI_INT, other_rank, 0, MPI_COMM_WORLD); MPI_Recv(other_array, chunk_size, MPI_INT, other_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } else { MPI_Recv(other_array, chunk_size, MPI_INT, other_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Send(local_array, chunk_size, MPI_INT, other_rank, 0, MPI_COMM_WORLD); } // order local and other arrays based on the relative order of their processes' ranks int *smaller_array = world_rank < other_rank ? local_array : other_array; int *larger_array = world_rank < other_rank ? other_array : local_array; // keep smaller elements in smaller_array and // larger elements in larger_array // larger array is iterated from the beginning // and smaller array is iterated from the end for (int i = 0; i < chunk_size; i++) { int j = chunk_size - 1 - i; if (smaller_array[j] <= larger_array[i]) break; swap(&smaller_array[j], &larger_array[i]); } } // send ordered and sorted local_arrays to the root process MPI_Gather(local_array, chunk_size, MPI_INT, array, chunk_size, MPI_INT, 0, MPI_COMM_WORLD); // deallocate memory free(other_array); free(local_array); if (world_rank == 0) { printf("\n\nAfter sorting: "); print_array(array, N); free(array); // deallocate memory } // Finalize the MPI environment. MPI_Finalize(); return 0; }