diff --git a/assignments/03/bench.sh b/assignments/03/bench.sh index 18f2039..97b66f6 100755 --- a/assignments/03/bench.sh +++ b/assignments/03/bench.sh @@ -1,8 +1,8 @@ -for dataset in $(echo "1000.txt" "10000.txt" "1000000.txt" "1000000.txt"); do +for dataset in $(echo "1000.txt" "10000.txt" "100000.txt" "1000000.txt"); do for processors in $(echo 1 2 4 8 16 | tr ' ' '\n'); do # file="dataset/both_$dataset" file="/export/scratch/CSCI5451_F23/assignment-3/dataset/$dataset" echo $processors $file; - mpirun -n $processors ./lpa $file >> out.txt + mpirun -n $processors ./lpa $file graphout.txt >> stdout.txt done done \ No newline at end of file diff --git a/assignments/03/lpa.cpp b/assignments/03/lpa.cpp index 0f3686e..f84a85a 100644 --- a/assignments/03/lpa.cpp +++ b/assignments/03/lpa.cpp @@ -362,28 +362,28 @@ int main(int argc, char **argv) { // disk. #pragma region if (rank == 0) { - std::vector all_assignments(total_num_nodes); + FILE *fp = fopen(argv[2], "w"); std::map label_count; - int ctr = 0; for (int process_idx = 0; process_idx < p; ++process_idx) { pair this_node_range = node_ranges[process_idx]; int count = this_node_range.snd - this_node_range.fst; if (process_idx == 0) { for (int j = 0; j < count; ++j) { - all_assignments[this_node_range.fst + j] = - node_label_assignment_vec[j]; - label_count[all_assignments[this_node_range.fst + j]]++; + // all_assignments[this_node_range.fst + j] = + // node_label_assignment_vec[j]; + fprintf(fp, "%d\n", node_label_assignment_vec[j]); + label_count[node_label_assignment_vec[j]]++; } } else { - MPI_Recv(&all_assignments[this_node_range.fst], count, MPI_INT, - process_idx, TAG_SEND_FINAL_RESULT, MPI_COMM_WORLD, NULL); + int recvbuf[count]; + MPI_Recv(&recvbuf, count, MPI_INT, process_idx, TAG_SEND_FINAL_RESULT, + MPI_COMM_WORLD, NULL); for (int j = 0; j < count; ++j) { - label_count[all_assignments[this_node_range.fst + j]]++; + fprintf(fp, "%d\n", recvbuf[j]); + label_count[recvbuf[j]]++; } } } - - std::cout << "Done! " << label_count.size() << std::endl; } else { MPI_Send(node_label_assignment_vec, num_my_nodes, MPI_INT, 0, TAG_SEND_FINAL_RESULT, MPI_COMM_WORLD);