csci5451/assignments/02/qs_mpi.c

555 lines
17 KiB
C
Raw Permalink Normal View History

2023-10-23 00:38:42 +00:00
#include <mpi.h>
2023-10-29 21:34:22 +00:00
#include <stdio.h>
#include <stdlib.h>
2023-10-31 04:43:40 +00:00
#include <time.h>
2023-10-30 09:09:03 +00:00
#include <unistd.h>
2023-10-29 21:34:22 +00:00
2023-10-31 04:37:41 +00:00
// https://stackoverflow.com/a/75458495
#define check_mpi_error(n) __check_mpi_error(__FILE__, __LINE__, n)
void __check_mpi_error(const char *file, const int line, const int n) {
char errbuffer[MPI_MAX_ERROR_STRING];
int errlen;
if (n != MPI_SUCCESS) {
MPI_Error_string(n, errbuffer, &errlen);
printf("MPI-error: %s\n", errbuffer);
printf("Location: %s:%i\n", file, line);
MPI_Abort(MPI_COMM_WORLD, n);
}
}
2023-10-31 04:43:40 +00:00
double monotonic_seconds() {
struct timespec ts;
clock_gettime(CLOCK_MONOTONIC, &ts);
return ts.tv_sec + ts.tv_nsec * 1e-9;
}
2023-10-30 03:04:21 +00:00
#define ORDER_FORWARDS 1
#define ORDER_BACKWARDS 2
2023-10-31 01:34:37 +00:00
#define CTL_SIZE 4
2023-10-31 04:37:41 +00:00
#define ROOT_RANK 0
2023-10-30 03:04:21 +00:00
#define GENERIC_MAX(x, y) ((x) > (y) ? (x) : (y))
#define GENERIC_MIN(x, y) ((x) < (y) ? (x) : (y))
#define ENSURE_int(i) _Generic((i), int : (i))
#define ENSURE_float(f) _Generic((f), float : (f))
#define MAX(type, x, y) (type) GENERIC_MAX(ENSURE_##type(x), ENSURE_##type(y))
#define MIN(type, x, y) (type) GENERIC_MIN(ENSURE_##type(x), ENSURE_##type(y))
2023-10-31 01:43:43 +00:00
void init_ctl(int *ctl, int len);
2023-10-29 21:34:22 +00:00
void local_quicksort(int *arr, int lo, int hi);
char *string_of_list(int *arr, int len);
2023-10-31 04:37:41 +00:00
void recursive_quicksort(int *integers, int n, int segment_capac,
int segment_len, int *integers_out, MPI_Comm comm);
2023-10-23 00:38:42 +00:00
int main(int argc, char **argv) {
2023-10-29 21:34:22 +00:00
int rank, p;
2023-10-23 00:38:42 +00:00
MPI_Init(&argc, &argv);
2023-10-29 21:34:22 +00:00
int n = atoi(argv[1]);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
// Generate integers
int n_over_p = n / p;
int integers[n_over_p];
2023-10-31 00:48:27 +00:00
// Minor implementation detail: srand(0) is specially handled by glibc to
2023-10-29 21:34:22 +00:00
// behave as if it was called with srand(1). To get around this, I'm seeding
// with rank + 1
//
// See more: https://stackoverflow.com/a/27386563
srand(rank + 1);
for (int i = 0; i < n_over_p; ++i) {
2023-10-31 04:43:40 +00:00
integers[i] = rand();
2023-10-29 21:34:22 +00:00
}
2023-10-31 04:43:40 +00:00
double start = monotonic_seconds();
2023-10-31 04:37:41 +00:00
int new_integers[n_over_p];
recursive_quicksort(integers, n, n_over_p, n_over_p, new_integers,
MPI_COMM_WORLD);
2023-10-31 04:43:40 +00:00
double end = monotonic_seconds();
printf("Sort Time: %0.04fs\n", end - start);
2023-10-30 09:35:04 +00:00
// The first node is responsible for collecting all the data and then
2023-10-31 04:37:41 +00:00
// printing it out to the file
FILE *fp;
if (rank == ROOT_RANK)
fp = fopen(argv[2], "w");
for (int i = 0; i < p; i += 1) {
if (rank == ROOT_RANK) {
if (i != ROOT_RANK) {
MPI_Recv(new_integers, n_over_p, MPI_INT, i, 129, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
for (int j = 0; j < n_over_p; ++j) {
fprintf(fp, "%d\n", new_integers[j]);
}
} else if (rank == i) {
MPI_Send(new_integers, n_over_p, MPI_INT, ROOT_RANK, 129, MPI_COMM_WORLD);
2023-10-31 00:48:27 +00:00
}
2023-10-30 09:35:04 +00:00
}
2023-10-31 04:37:41 +00:00
if (rank == ROOT_RANK)
fclose(fp);
2023-10-30 09:35:04 +00:00
MPI_Finalize();
return 0;
}
// hi not inclusive
void local_quicksort(int *arr, int lo, int hi) {
int temp;
if (lo >= hi || lo < 0)
return;
int pivot = arr[hi - 1];
int pivot_idx = lo - 1;
for (int j = lo; j < hi; ++j) {
if (arr[j] < pivot) {
pivot_idx += 1;
temp = arr[j];
arr[j] = arr[pivot_idx];
arr[pivot_idx] = temp;
}
}
pivot_idx += 1;
temp = arr[hi - 1];
arr[hi - 1] = arr[pivot_idx];
arr[pivot_idx] = temp;
// Recursive call
local_quicksort(arr, lo, pivot_idx);
local_quicksort(arr, pivot_idx + 1, hi);
}
2023-10-31 04:37:41 +00:00
// char *string_of_list(int *arr, int len) {
// char *buffer = calloc(sizeof(char), 1000);
// int offset = 0; // Keep track of the current position in the buffer
// for (int i = 0; i < len; i++) {
// offset += sprintf(buffer + offset, "%d", arr[i]);
// if (i < len - 1) {
// // Add a separator (e.g., comma or space) if it's not the last
// element offset += sprintf(buffer + offset, " ");
// }
// }
// return buffer;
// }
void recursive_quicksort(int *integers, int total_elems, int segment_capac,
int segment_len, int *integers_out, MPI_Comm comm) {
int err, rank, p;
2023-10-30 09:35:04 +00:00
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &rank);
2023-10-31 04:37:41 +00:00
if (p <= 1) {
2023-10-30 09:35:04 +00:00
// Recursion base case: just sort it serially
2023-10-31 04:37:41 +00:00
local_quicksort(integers, 0, total_elems);
for (int i = 0; i < total_elems; ++i) {
integers_out[i] = integers[i];
}
2023-10-30 09:35:04 +00:00
return;
}
2023-10-29 21:34:22 +00:00
// Select a pivot.
// This pivot is broadcasted to all nodes
int pivot;
2023-10-31 00:48:27 +00:00
{
// First, select a random element
2023-10-31 04:37:41 +00:00
int rand_el = integers[rand() % segment_len];
2023-10-29 21:34:22 +00:00
2023-10-31 00:48:27 +00:00
// Gather it
int rand_els[p];
2023-10-31 04:37:41 +00:00
MPI_Gather(&rand_el, 1, MPI_INT, rand_els, 1, MPI_INT, ROOT_RANK, comm);
2023-10-31 00:48:27 +00:00
// Get the median
2023-10-31 04:37:41 +00:00
if (rank == ROOT_RANK) {
// Get the middle element after sorting
2023-10-31 00:48:27 +00:00
local_quicksort(rand_els, 0, p);
pivot = rand_els[p / 2];
}
2023-10-31 04:37:41 +00:00
MPI_Bcast(&pivot, 1, MPI_INT, ROOT_RANK, comm);
2023-10-31 00:48:27 +00:00
}
2023-10-29 21:34:22 +00:00
// Determine where the boundary between S (lower) and L (higher) lies
2023-10-31 04:37:41 +00:00
int boundary = 0;
for (int i = 0; i < segment_len; ++i) {
2023-10-29 21:34:22 +00:00
if (integers[i] >= pivot) {
boundary = i;
break;
}
}
2023-10-31 04:37:41 +00:00
2023-10-30 09:09:03 +00:00
int S_lo = 0, S_hi = boundary;
2023-10-31 04:37:41 +00:00
int L_lo = boundary, L_hi = segment_len;
2023-10-30 09:09:03 +00:00
int S_size = S_hi - S_lo, L_size = L_hi - L_lo;
2023-10-29 21:34:22 +00:00
// Perform global arrangement
2023-10-31 04:37:41 +00:00
int S_global_end = -1, L_reverse_end = -1, S_global_max_end = -1;
2023-10-31 00:48:27 +00:00
MPI_Scan(&S_size, &S_global_end, 1, MPI_INT, MPI_SUM, comm);
MPI_Scan(&L_size, &L_reverse_end, 1, MPI_INT, MPI_SUM, comm);
2023-10-29 21:34:22 +00:00
2023-10-31 04:37:41 +00:00
int index;
MPI_Scan(&segment_len, &index, 1, MPI_INT, MPI_SUM, comm);
2023-10-31 00:48:27 +00:00
MPI_Allreduce(&S_global_end, &S_global_max_end, 1, MPI_INT, MPI_MAX, comm);
2023-10-30 09:35:04 +00:00
2023-10-29 21:34:22 +00:00
int S_global_start = S_global_end - S_size,
2023-10-30 03:04:21 +00:00
L_reverse_start = L_reverse_end - L_size,
2023-10-31 04:37:41 +00:00
L_global_start = total_elems - L_reverse_end,
L_global_end = total_elems - L_reverse_start;
// Determine which process S's and L's destination will start in,
// respectively
int S_starting_process, L_starting_process;
int p_of_split, split_point;
int indexes[p];
{
MPI_Allgather(&index, 1, MPI_INT, indexes, 1, MPI_INT, comm);
for (int i = 0; i < p; ++i) {
int lo = i == 0 ? 0 : indexes[i - 1];
int hi = indexes[i];
if (S_global_start >= lo && S_global_start < hi)
S_starting_process = i;
if (L_global_start >= lo && L_global_start < hi)
L_starting_process = i;
if (S_global_max_end >= lo && S_global_max_end < hi) {
p_of_split = i;
split_point = S_global_max_end - lo;
}
}
}
2023-10-30 09:09:03 +00:00
2023-10-31 04:37:41 +00:00
int S_offset = S_global_start % segment_len,
L_offset = L_global_start % segment_len;
2023-10-30 09:09:03 +00:00
int S_ctl[p * CTL_SIZE];
int L_ctl[p * CTL_SIZE];
2023-10-31 01:34:37 +00:00
int S_send_ctl[p * CTL_SIZE];
int L_send_ctl[p * CTL_SIZE];
2023-10-30 09:09:03 +00:00
int ctl_send_counts[p];
int ctl_send_displs[p];
int send_counts[p];
int send_displs[p];
int recv_counts[p];
int recv_displs[p];
2023-10-31 01:43:43 +00:00
init_ctl(S_ctl, p);
init_ctl(L_ctl, p);
init_ctl(S_send_ctl, p);
init_ctl(L_send_ctl, p);
2023-10-31 04:37:41 +00:00
int SPACE = segment_capac;
2023-10-30 09:09:03 +00:00
for (int i = 0; i < p; ++i) {
2023-10-31 04:37:41 +00:00
send_counts[i] = SPACE;
send_displs[i] = i * SPACE;
2023-10-30 09:09:03 +00:00
ctl_send_counts[i] = CTL_SIZE;
ctl_send_displs[i] = i * CTL_SIZE;
recv_counts[i] = CTL_SIZE;
recv_displs[i] = i * CTL_SIZE;
}
2023-10-29 21:34:22 +00:00
2023-10-30 09:09:03 +00:00
// Send S to the correct target
2023-10-31 04:37:41 +00:00
if (S_size) {
2023-10-30 06:58:07 +00:00
for (int i = S_lo, dest_pos = S_global_start,
processor = S_starting_process;
i < S_hi;) {
2023-10-31 04:37:41 +00:00
int next_break =
MIN(int, S_global_end,
MIN(int, dest_pos + (S_hi - S_lo),
(dest_pos / segment_len) * segment_len + segment_len));
2023-10-30 06:58:07 +00:00
int count = next_break - dest_pos;
2023-10-30 09:09:03 +00:00
int from_local_start = i, from_local_end = i + count;
2023-10-31 04:37:41 +00:00
int from_global_start = rank * segment_len + from_local_start,
2023-10-30 09:09:03 +00:00
from_global_end = from_global_start + count;
2023-10-30 06:58:07 +00:00
2023-10-30 09:09:03 +00:00
int to_global_start = dest_pos, to_global_end = dest_pos + count;
2023-10-31 04:37:41 +00:00
int to_local_start = to_global_start - processor * segment_len,
to_local_end = to_global_end - processor * segment_len;
2023-10-31 01:34:37 +00:00
S_send_ctl[processor * CTL_SIZE] = count;
S_send_ctl[processor * CTL_SIZE + 1] = from_global_start;
S_send_ctl[processor * CTL_SIZE + 2] = to_local_start;
S_send_ctl[processor * CTL_SIZE + 3] = from_local_start;
2023-10-30 06:58:07 +00:00
i += count;
dest_pos += count;
processor += 1;
2023-10-30 07:08:35 +00:00
}
2023-10-30 09:09:03 +00:00
}
2023-10-31 04:37:41 +00:00
MPI_Alltoallv(S_send_ctl, ctl_send_counts, ctl_send_displs, MPI_INT, S_ctl,
recv_counts, recv_displs, MPI_INT, comm);
2023-10-30 09:09:03 +00:00
// Send L to the correct target
2023-10-31 04:37:41 +00:00
if (L_size) {
2023-10-30 09:09:03 +00:00
for (int i = L_lo, dest_pos = L_global_start,
processor = L_starting_process;
i < L_hi;) {
2023-10-31 04:37:41 +00:00
int next_break =
MIN(int, L_global_end,
MIN(int, dest_pos + (L_hi - L_lo),
(dest_pos / segment_len) * segment_len + segment_len));
2023-10-30 09:09:03 +00:00
int count = next_break - dest_pos;
int from_local_start = i, from_local_end = i + count;
2023-10-31 04:37:41 +00:00
int from_global_start = rank * segment_len + from_local_start,
2023-10-30 09:09:03 +00:00
from_global_end = from_global_start + count;
int to_global_start = dest_pos, to_global_end = dest_pos + count;
2023-10-31 04:37:41 +00:00
int to_local_start = to_global_start - processor * segment_len,
to_local_end = to_global_end - processor * segment_len;
2023-10-31 01:34:37 +00:00
L_send_ctl[processor * CTL_SIZE] = count;
L_send_ctl[processor * CTL_SIZE + 1] = from_global_start;
L_send_ctl[processor * CTL_SIZE + 2] = to_local_start;
L_send_ctl[processor * CTL_SIZE + 3] = from_local_start;
2023-10-30 09:09:03 +00:00
i += count;
dest_pos += count;
processor += 1;
}
2023-10-30 03:04:21 +00:00
}
2023-10-29 21:34:22 +00:00
2023-10-31 04:37:41 +00:00
MPI_Alltoallv(L_send_ctl, ctl_send_counts, ctl_send_displs, MPI_INT, L_ctl,
recv_counts, recv_displs, MPI_INT, comm);
2023-10-30 09:09:03 +00:00
// After sending S and L information
for (int i = 0; i < p; ++i) {
2023-10-31 04:37:41 +00:00
recv_counts[i] = segment_len;
recv_displs[i] = i * segment_len;
2023-10-30 09:09:03 +00:00
}
2023-10-31 04:37:41 +00:00
// Algorithm for sending S and L between all processes without O(n)
2023-10-30 09:09:03 +00:00
2023-10-31 04:37:41 +00:00
int integers_recv_2[segment_capac];
int integers_recv_3[segment_capac];
for (int i = 0; i < segment_len; ++i) {
2023-10-31 01:34:37 +00:00
integers_recv_2[i] = -1;
integers_recv_3[i] = integers[i];
2023-10-31 00:48:27 +00:00
}
2023-10-31 01:34:37 +00:00
for (int host_p = 0; host_p < p; ++host_p) {
if (rank == host_p) {
// Your {S,L}_ctl is a mapping from source_processor -> ctl
// Everyone already knows who needs to send to who now
for (int sender_p = 0; sender_p < p; ++sender_p) {
int S_count = S_ctl[sender_p * CTL_SIZE];
if (S_count > 0) {
int to_local_start = S_ctl[sender_p * CTL_SIZE + 2];
int from_local_start = S_ctl[sender_p * CTL_SIZE + 3];
if (sender_p == host_p) {
for (int k = 0; k < S_count; ++k) {
integers_recv_3[to_local_start + k] =
integers[from_local_start + k];
}
continue;
}
2023-10-31 04:37:41 +00:00
err = MPI_Recv(&integers_recv_2[to_local_start], S_count, MPI_INT,
sender_p, 124, comm, MPI_STATUS_IGNORE);
check_mpi_error(err);
2023-10-31 01:34:37 +00:00
for (int k = 0; k < S_count; ++k) {
integers_recv_3[to_local_start + k] =
integers_recv_2[to_local_start + k];
}
}
}
} else {
// Your {S,L}_send_ctl contains a mapping from dest_processor -> ctl
for (int dest_p = 0; dest_p < p; ++dest_p) {
int S_count = S_send_ctl[dest_p * CTL_SIZE];
if (S_count > 0 && dest_p == host_p) {
int from_local_start = S_send_ctl[dest_p * CTL_SIZE + 3];
MPI_Send(&integers[from_local_start], S_count, MPI_INT, dest_p, 124,
comm);
}
2023-10-30 09:09:03 +00:00
}
}
}
2023-10-31 01:34:37 +00:00
for (int host_p = 0; host_p < p; ++host_p) {
if (rank == host_p) {
// Your {S,L}_ctl is a mapping from source_processor -> ctl
// Everyone already knows who needs to send to who now
for (int sender_p = 0; sender_p < p; ++sender_p) {
int L_count = L_ctl[sender_p * CTL_SIZE];
if (L_count > 0) {
int to_local_start = L_ctl[sender_p * CTL_SIZE + 2];
int from_local_start = L_ctl[sender_p * CTL_SIZE + 3];
if (sender_p == host_p) {
for (int k = 0; k < L_count; ++k) {
integers_recv_3[to_local_start + k] =
integers[from_local_start + k];
}
continue;
}
2023-10-31 04:37:41 +00:00
err = MPI_Recv(&integers_recv_2[to_local_start], L_count, MPI_INT,
sender_p, 125, comm, MPI_STATUS_IGNORE);
check_mpi_error(err);
2023-10-31 01:34:37 +00:00
for (int k = 0; k < L_count; ++k) {
integers_recv_3[to_local_start + k] =
integers_recv_2[to_local_start + k];
}
}
}
} else {
// Your {S,L}_send_ctl contains a mapping from dest_processor -> ctl
for (int dest_p = 0; dest_p < p; ++dest_p) {
int L_count = L_send_ctl[dest_p * CTL_SIZE];
if (L_count > 0 && dest_p == host_p) {
int from_local_start = L_send_ctl[dest_p * CTL_SIZE + 3];
MPI_Send(&integers[from_local_start], L_count, MPI_INT, dest_p, 125,
comm);
}
2023-10-30 09:09:03 +00:00
}
}
}
2023-10-31 01:34:37 +00:00
2023-10-31 04:37:41 +00:00
// ###################################################################################
// SUBDIVIDING
// Now, determine which processes should be responsible for taking the S and
// L arrays. Specifically, the part where it's split, break the tie to see
// if it goes down or up
int child_len = segment_len;
int difference = segment_len - split_point;
int transfer[split_point];
2023-10-31 00:48:27 +00:00
2023-10-31 04:37:41 +00:00
int has_split = 0;
if (p_of_split == 0 || p_of_split == p - 1) {
// Super unfortunate, bad pivot
} else if (split_point == 0) {
// Super lucky, it's split evenly!
2023-10-31 00:48:27 +00:00
} else {
2023-10-31 04:37:41 +00:00
has_split = 1;
// Let's just say that if there's any split, the block itself counts as L
// and then add the rest to the previous block
if (rank == p_of_split - 1) {
child_len += split_point;
err = MPI_Recv(transfer, split_point, MPI_INT, p_of_split, 126, comm,
MPI_STATUS_IGNORE);
check_mpi_error(err);
} else if (rank == p_of_split) {
child_len = difference;
err = MPI_Send(integers, split_point, MPI_INT, p_of_split - 1, 126, comm);
check_mpi_error(err);
}
2023-10-31 00:48:27 +00:00
}
2023-10-29 21:34:22 +00:00
2023-10-31 04:37:41 +00:00
// Which group is this child going into?
int color;
if (rank < p_of_split)
color = 100;
else
color = 200;
MPI_Comm child_comm;
MPI_Comm_split(comm, color, rank, &child_comm);
// Figure out what the max is
int max_child_buf_len, total_child_elems;
err = MPI_Allreduce(&child_len, &max_child_buf_len, 1, MPI_INT, MPI_MAX,
child_comm);
check_mpi_error(err);
err = MPI_Allreduce(&child_len, &total_child_elems, 1, MPI_INT, MPI_SUM,
child_comm);
check_mpi_error(err);
// Copy into a new buf
int new_buf[max_child_buf_len];
int whichCase = 999;
for (int i = 0; i < max_child_buf_len; ++i) {
if (has_split && rank == p_of_split - 1) {
whichCase = 1001;
if (i < segment_len)
new_buf[i] = integers_recv_3[i];
else if (i < segment_len + split_point)
new_buf[i] = transfer[i - segment_len];
else
new_buf[i] = -1;
} else if (has_split && rank == p_of_split) {
whichCase = 1002;
if (i < difference)
new_buf[i] = integers_recv_3[i + split_point];
else
new_buf[i] = -1;
} else {
whichCase = 1003;
if (i < child_len)
new_buf[i] = integers_recv_3[i];
else
new_buf[i] = -1;
}
}
int integers_out_buf[total_child_elems];
recursive_quicksort(new_buf, total_child_elems, max_child_buf_len, child_len,
integers_out_buf, child_comm);
// Ok now copy the new items back
switch (whichCase) {
case 1001:
// In this case, p is right before the split, so it got extra elements
// To reverse this, we can send the elements back to the second
for (int i = 0; i < total_child_elems; ++i) {
if (i < segment_len)
integers_out[i] = integers_out_buf[i];
else
transfer[i - segment_len] = integers_out_buf[i];
}
MPI_Send(transfer, split_point, MPI_INT, p_of_split, 127, comm);
break;
case 1002:
// The original array got shortened, so copy the transferred ones back in
// first, then copy the result from the child quicksorting after it
MPI_Recv(transfer, split_point, MPI_INT, p_of_split - 1, 127, comm,
MPI_STATUS_IGNORE);
for (int i = 0; i < split_point; ++i) {
integers_out[i] = transfer[i];
}
for (int i = 0; i < total_child_elems; ++i) {
integers_out[i + split_point] = integers_out_buf[i];
}
break;
case 1003:
// This is just the regular case
for (int i = 0; i < total_child_elems; ++i) {
integers_out[i] = integers_out_buf[i];
2023-10-29 21:34:22 +00:00
}
2023-10-31 04:37:41 +00:00
break;
2023-10-29 21:34:22 +00:00
}
2023-10-31 04:37:41 +00:00
MPI_Comm_free(&child_comm);
2023-10-31 01:43:43 +00:00
}
void init_ctl(int *ctl, int len) {
for (int i = 0; i < len; ++i) {
2023-10-31 04:37:41 +00:00
ctl[i * CTL_SIZE] = 0;
for (int j = 1; j < CTL_SIZE; ++j) {
2023-10-31 01:43:43 +00:00
ctl[i * CTL_SIZE + j] = -1;
}
}
2023-10-29 21:34:22 +00:00
}