replace some c++ with c
This commit is contained in:
parent
d0d0f8140a
commit
d7d251bac5
2 changed files with 69 additions and 51 deletions
|
@ -7,6 +7,9 @@ CFLAGS += -O3 -g
|
|||
clean:
|
||||
rm -f lpa
|
||||
|
||||
lpac: lpa.cpp
|
||||
mpicc $(CFLAGS) $(LDFLAGS) -o $@ $<
|
||||
|
||||
lpa: lpa.cpp
|
||||
mpic++ $(CFLAGS) $(LDFLAGS) -o $@ $<
|
||||
|
||||
|
|
|
@ -36,9 +36,14 @@ void pair_vector_init(struct pair_vector *);
|
|||
void pair_vector_clear(struct pair_vector *);
|
||||
void pair_vector_push(struct pair_vector *v, int fst, int snd);
|
||||
|
||||
pair compute_node_range(int p, int total_num_nodes, int each_num_nodes,
|
||||
int process);
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
MPI::Init(argc, argv);
|
||||
int rank = MPI::COMM_WORLD.Get_rank(), p = MPI::COMM_WORLD.Get_size();
|
||||
MPI_Init(&argc, &argv);
|
||||
int rank, p;
|
||||
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
|
||||
MPI_Comm_size(MPI_COMM_WORLD, &p);
|
||||
|
||||
MPI_Datatype IntPairType;
|
||||
init_pair_type(&IntPairType);
|
||||
|
@ -53,15 +58,15 @@ int main(int argc, char **argv) {
|
|||
pair params;
|
||||
|
||||
if (rank == 0) {
|
||||
|
||||
printf("Hello\n");
|
||||
fp = fopen(argv[1], "r");
|
||||
if ((read = getline(&line, &len, fp)) != -1)
|
||||
|
||||
// Read the first line
|
||||
if (getline(&line, &len, fp) != -1)
|
||||
sscanf(line, "%d %d", ¶ms.fst, ¶ms.snd);
|
||||
}
|
||||
|
||||
// Send the params
|
||||
MPI_Bcast(¶ms, 1, IntPairType, 0, MPI::COMM_WORLD);
|
||||
MPI_Bcast(¶ms, 1, IntPairType, 0, MPI_COMM_WORLD);
|
||||
int total_num_nodes = params.fst;
|
||||
int total_num_edges = params.snd;
|
||||
int each_num_nodes = total_num_nodes / p;
|
||||
|
@ -71,12 +76,12 @@ int main(int argc, char **argv) {
|
|||
rank == p - 1 ? total_num_nodes - rank * each_num_nodes : each_num_nodes;
|
||||
int my_nodes[num_my_nodes];
|
||||
|
||||
std::function<std::pair<int, int>(int)> node_range =
|
||||
[p, total_num_nodes, each_num_nodes](int process) {
|
||||
int start = process * each_num_nodes;
|
||||
int end = process == p - 1 ? total_num_nodes : start + each_num_nodes;
|
||||
return std::make_pair(start, end);
|
||||
};
|
||||
// std::function<std::pair<int, int>(int)> node_range =
|
||||
// [p, total_num_nodes, each_num_nodes](int process) {
|
||||
// int start = process * each_num_nodes;
|
||||
// int end = process == p - 1 ? total_num_nodes : start +
|
||||
// each_num_nodes; return std::make_pair(start, end);
|
||||
// };
|
||||
|
||||
// Read the edges
|
||||
int num_my_edges;
|
||||
|
@ -90,30 +95,33 @@ int main(int argc, char **argv) {
|
|||
|
||||
// For the current process, what's the last node we're expecting to see?
|
||||
int current_process = 0;
|
||||
std::pair<int, int> current_node_range = node_range(current_process);
|
||||
pair current_node_range =
|
||||
compute_node_range(p, total_num_nodes, each_num_nodes, current_process);
|
||||
int edge_counter = 0;
|
||||
|
||||
for (int i = 0; i < total_num_edges; ++i) {
|
||||
getline(&line, &len, fp);
|
||||
if (getline(&line, &len, fp) == -1)
|
||||
break;
|
||||
|
||||
int fst, snd;
|
||||
sscanf(line, "%d %d", &fst, &snd);
|
||||
|
||||
if (fst >= current_node_range.second) {
|
||||
if (fst >= current_node_range.snd) {
|
||||
if (current_process == 0) {
|
||||
num_my_edges = edge_counter;
|
||||
my_edges = (pair *)calloc(num_my_edges, sizeof(pair));
|
||||
memcpy(my_edges, all_edges.ptr, edge_counter * sizeof(pair));
|
||||
} else {
|
||||
MPI_Send(&edge_counter, 1, MPI_INT, current_process,
|
||||
TAG_SEND_NUM_EDGES, MPI::COMM_WORLD);
|
||||
TAG_SEND_NUM_EDGES, MPI_COMM_WORLD);
|
||||
MPI_Send(all_edges.ptr, edge_counter, IntPairType, current_process,
|
||||
TAG_SEND_EDGES, MPI::COMM_WORLD);
|
||||
TAG_SEND_EDGES, MPI_COMM_WORLD);
|
||||
}
|
||||
|
||||
// We're starting on the next process
|
||||
current_process += 1;
|
||||
current_node_range = node_range(current_process);
|
||||
current_node_range = compute_node_range(
|
||||
p, total_num_nodes, each_num_nodes, current_process);
|
||||
edge_counter = 0;
|
||||
pair_vector_clear(&all_edges);
|
||||
}
|
||||
|
@ -125,15 +133,17 @@ int main(int argc, char **argv) {
|
|||
// We have to send the last one again here, since it didn't get caught in
|
||||
// the loop above
|
||||
MPI_Send(&edge_counter, 1, MPI_INT, current_process, TAG_SEND_NUM_EDGES,
|
||||
MPI::COMM_WORLD);
|
||||
MPI_COMM_WORLD);
|
||||
MPI_Send(all_edges.ptr, edge_counter, IntPairType, current_process,
|
||||
TAG_SEND_EDGES, MPI::COMM_WORLD);
|
||||
TAG_SEND_EDGES, MPI_COMM_WORLD);
|
||||
|
||||
free(all_edges.ptr);
|
||||
} else {
|
||||
MPI_Recv(&num_my_edges, 1, MPI_INT, 0, TAG_SEND_NUM_EDGES, MPI::COMM_WORLD,
|
||||
MPI_Recv(&num_my_edges, 1, MPI_INT, 0, TAG_SEND_NUM_EDGES, MPI_COMM_WORLD,
|
||||
NULL);
|
||||
my_edges = (pair *)calloc(num_my_edges, sizeof(pair));
|
||||
MPI_Recv(my_edges, num_my_edges, IntPairType, 0, TAG_SEND_EDGES,
|
||||
MPI::COMM_WORLD, NULL);
|
||||
MPI_COMM_WORLD, NULL);
|
||||
}
|
||||
|
||||
char *buf = (char *)calloc(sizeof(char), 1000);
|
||||
|
@ -155,17 +165,18 @@ int main(int argc, char **argv) {
|
|||
#pragma endregion
|
||||
|
||||
// STEP 2 TIMER STARTS HERE
|
||||
MPI::COMM_WORLD.Barrier();
|
||||
double step_2_start_time = MPI::Wtime();
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
double step_2_start_time = MPI_Wtime();
|
||||
|
||||
// Each process analyzes the non-local edges that are contained in its portion
|
||||
// of the graph.
|
||||
#pragma region
|
||||
std::map<int, int> node_label_assignment;
|
||||
std::pair<int, int> my_node_range = node_range(rank);
|
||||
pair my_node_range =
|
||||
compute_node_range(p, total_num_nodes, each_num_nodes, rank);
|
||||
|
||||
// Initial node assignment
|
||||
for (int i = my_node_range.first; i < my_node_range.second; ++i) {
|
||||
for (int i = my_node_range.fst; i < my_node_range.snd; ++i) {
|
||||
node_label_assignment[i] = i;
|
||||
}
|
||||
|
||||
|
@ -177,12 +188,12 @@ int main(int argc, char **argv) {
|
|||
pair edge = my_edges[i];
|
||||
adj[edge.fst].insert(edge.snd);
|
||||
|
||||
if (!(my_node_range.first <= edge.fst && edge.fst < my_node_range.second)) {
|
||||
if (!(my_node_range.fst <= edge.fst && edge.fst < my_node_range.snd)) {
|
||||
non_local_nodes.insert(edge.fst);
|
||||
non_local_edges.insert(std::make_pair(edge.snd, edge.fst));
|
||||
}
|
||||
|
||||
if (!(my_node_range.first <= edge.snd && edge.snd < my_node_range.second)) {
|
||||
if (!(my_node_range.fst <= edge.snd && edge.snd < my_node_range.snd)) {
|
||||
non_local_nodes.insert(edge.snd);
|
||||
non_local_edges.insert(std::make_pair(edge.fst, edge.snd));
|
||||
}
|
||||
|
@ -214,8 +225,8 @@ int main(int argc, char **argv) {
|
|||
#pragma endregion
|
||||
|
||||
// STEP 5 TIMER STARTS HERE
|
||||
MPI::COMM_WORLD.Barrier();
|
||||
double step_5_start_time = MPI::Wtime();
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
double step_5_start_time = MPI_Wtime();
|
||||
|
||||
// The processes perform the transfers of non-local labels and updates of
|
||||
// local labels until convergence.
|
||||
|
@ -254,13 +265,9 @@ int main(int argc, char **argv) {
|
|||
}
|
||||
|
||||
std::vector<int> recvbuf(recv_total, 0);
|
||||
// std::cout << fmt::format("[{}] {} \t|| \t{}", rank,
|
||||
// fmt::join(send_counts, ", "),
|
||||
// fmt::join(recv_counts, ", "))
|
||||
// << std::endl;
|
||||
MPI::COMM_WORLD.Alltoallv(sendbuf.data(), send_counts.data(),
|
||||
send_displs.data(), MPI_INT, recvbuf.data(),
|
||||
recv_counts.data(), recv_displs.data(), MPI_INT);
|
||||
MPI_Alltoallv(sendbuf.data(), send_counts.data(), send_displs.data(),
|
||||
MPI_INT, recvbuf.data(), recv_counts.data(),
|
||||
recv_displs.data(), MPI_INT, MPI_COMM_WORLD);
|
||||
|
||||
std::map<int, int> total_node_label_assignment(node_label_assignment);
|
||||
for (int i = 0; i < p; ++i) {
|
||||
|
@ -274,7 +281,7 @@ int main(int argc, char **argv) {
|
|||
|
||||
// For each local node, determine the minimum label out of its neighbors
|
||||
std::map<int, int> new_labels;
|
||||
for (int i = my_node_range.first; i < my_node_range.second; ++i) {
|
||||
for (int i = my_node_range.fst; i < my_node_range.snd; ++i) {
|
||||
int current_value = total_node_label_assignment[i];
|
||||
int min = current_value;
|
||||
|
||||
|
@ -313,8 +320,8 @@ int main(int argc, char **argv) {
|
|||
#pragma endregion
|
||||
|
||||
// END TIMERS
|
||||
MPI::COMM_WORLD.Barrier();
|
||||
double end_time = MPI::Wtime();
|
||||
MPI_Barrier(MPI_COMM_WORLD);
|
||||
double end_time = MPI_Wtime();
|
||||
|
||||
if (rank == 0) {
|
||||
printf("2-5 Time: %0.04fs\n", end_time - step_2_start_time);
|
||||
|
@ -329,19 +336,20 @@ int main(int argc, char **argv) {
|
|||
std::map<int, int> label_count;
|
||||
int ctr = 0;
|
||||
for (int i = 0; i < p; ++i) {
|
||||
std::pair<int, int> this_node_range = node_range(i);
|
||||
int count = this_node_range.second - this_node_range.first;
|
||||
pair this_node_range =
|
||||
compute_node_range(p, total_num_nodes, each_num_nodes, i);
|
||||
int count = this_node_range.snd - this_node_range.fst;
|
||||
if (i == 0) {
|
||||
for (int j = 0; j < count; ++j) {
|
||||
all_assignments[this_node_range.first + j] =
|
||||
node_label_assignment[this_node_range.first + j];
|
||||
label_count[all_assignments[this_node_range.first + j]]++;
|
||||
all_assignments[this_node_range.fst + j] =
|
||||
node_label_assignment[this_node_range.fst + j];
|
||||
label_count[all_assignments[this_node_range.fst + j]]++;
|
||||
}
|
||||
} else {
|
||||
MPI::COMM_WORLD.Recv(&all_assignments[this_node_range.first], count,
|
||||
MPI::INT, i, TAG_SEND_FINAL_RESULT);
|
||||
MPI::COMM_WORLD.Recv(&all_assignments[this_node_range.fst], count,
|
||||
MPI_INT, i, TAG_SEND_FINAL_RESULT);
|
||||
for (int j = 0; j < count; ++j) {
|
||||
label_count[all_assignments[this_node_range.first + j]]++;
|
||||
label_count[all_assignments[this_node_range.fst + j]]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -349,15 +357,15 @@ int main(int argc, char **argv) {
|
|||
std::cout << "Done! " << label_count.size() << std::endl;
|
||||
} else {
|
||||
std::vector<int> flat_assignments;
|
||||
for (int i = my_node_range.first; i < my_node_range.second; ++i) {
|
||||
for (int i = my_node_range.fst; i < my_node_range.snd; ++i) {
|
||||
flat_assignments.push_back(node_label_assignment[i]);
|
||||
}
|
||||
MPI::COMM_WORLD.Send(flat_assignments.data(), flat_assignments.size(),
|
||||
MPI::INT, 0, TAG_SEND_FINAL_RESULT);
|
||||
MPI_INT, 0, TAG_SEND_FINAL_RESULT);
|
||||
}
|
||||
#pragma endregion
|
||||
|
||||
MPI::Finalize();
|
||||
MPI_Finalize();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -398,3 +406,10 @@ void pair_vector_push(struct pair_vector *v, int fst, int snd) {
|
|||
v->ptr[v->len].snd = snd;
|
||||
v->len++;
|
||||
}
|
||||
|
||||
pair compute_node_range(int p, int total_num_nodes, int each_num_nodes,
|
||||
int process) {
|
||||
int start = process * each_num_nodes;
|
||||
int end = process == p - 1 ? total_num_nodes : start + each_num_nodes;
|
||||
return {.fst = start, .snd = end};
|
||||
}
|
Loading…
Reference in a new issue