start assignment 3

This commit is contained in:
Michael Zhang 2023-11-22 07:58:05 +00:00
parent 09da2cf70a
commit a373fd4b83
8 changed files with 159 additions and 16 deletions

20
Cargo.lock generated
View File

@ -65,6 +65,16 @@ version = "1.0.75"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
[[package]]
name = "assignment-01"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"itertools",
"rayon",
]
[[package]]
name = "assignment-03"
version = "0.1.0"
@ -518,16 +528,6 @@ version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
[[package]]
name = "rust"
version = "0.1.0"
dependencies = [
"anyhow",
"clap",
"itertools",
"rayon",
]
[[package]]
name = "rustc-hash"
version = "1.1.0"

View File

@ -30,5 +30,6 @@ COPY --from=typst /bin/typst /usr/bin/typst
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
RUN /root/.cargo/bin/cargo install cargo-watch
RUN /root/.cargo/bin/cargo install watchexec
RUN echo 'eval "$(direnv hook bash)"' >> /root/.bashrc

View File

@ -1,5 +1,5 @@
[package]
name = "rust"
name = "assignment-01"
version = "0.1.0"
edition = "2021"

2
assignments/03/.clangd Normal file
View File

@ -0,0 +1,2 @@
CompileFlags:
Add: -I/usr/lib/aarch64-linux-gnu/openmpi/include -I/usr/lib/aarch64-linux-gnu/openmpi/include/openmpi

3
assignments/03/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
lpa
compile_commands.json
.cache

7
assignments/03/Makefile Normal file
View File

@ -0,0 +1,7 @@
.PHONY: run
lpa: lpa.c
mpicc -o $@ -g $<
run:
watchexec -c clear 'make lpa && mpirun -n 4 ./lpa dataset/1000.txt'

View File

@ -1 +1,122 @@
int main() {}
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
typedef struct {
int fst;
int snd;
} pair;
void init_pair_type(MPI_Datatype *out);
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank, p;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &p);
MPI_Datatype IntPairType;
init_pair_type(&IntPairType);
// One process reads the file and distributes the data to the other processes
// using a 1D decomposition (each rank gets approx same number of vertices).
#pragma region
FILE *fp;
char *line = NULL;
size_t len;
ssize_t read;
pair params;
if (rank == 0) {
printf("Hello\n");
fp = fopen(argv[1], "r");
if ((read = getline(&line, &len, fp)) != -1)
sscanf(line, "%d %d", &params.fst, &params.snd);
}
// Send this pair
MPI_Bcast(&params, 1, IntPairType, 0, MPI_COMM_WORLD);
int num_nodes = params.fst;
int num_edges = params.snd;
int max_num_my_edges = (num_edges / p) + p;
pair my_edges[max_num_my_edges];
// Read the edges
pair edges[num_edges];
int my_count;
int counts[p], displs[p];
if (rank == 0) {
line = NULL;
for (int i = 0; i < num_edges; ++i) {
getline(&line, &len, fp);
sscanf(line, "%d %d", &edges[i].fst, &edges[i].snd);
}
int step = num_edges / p;
for (int i = 0; i < p; ++i) {
int start = i * step;
int end = i == p - 1 ? num_edges : start + step;
int count = end - start;
counts[i] = count;
displs[i] = start;
}
}
MPI_Scatter(counts, 1, MPI_INT, &my_count, 1, MPI_INT, 0, MPI_COMM_WORLD);
printf("[%d] #: %d\n", rank, my_count);
MPI_Scatterv(edges, counts, displs, IntPairType, my_edges, my_count,
IntPairType, 0, MPI_COMM_WORLD);
if (rank == 0) {
fclose(fp);
if (line)
free(line);
}
#pragma endregion
// Each process analyzes the non-local edges that are contained in its portion
// of the graph.
#pragma region
#pragma endregion
// Each process determines which processors stores the non-local vertices
// corresponding to the non-local edges.
#pragma region
#pragma endregion
// All the processes are communicating to figure out which process needs to
// send what data to the other processes.
#pragma region
#pragma endregion
// The processes perform the transfers of non-local labels and updates of
// local labels until convergence.
#pragma region
#pragma endregion
// The results are gathered to a single process, which writes them to the
// disk.
#pragma region
#pragma endregion
MPI_Finalize();
return 0;
}
void init_pair_type(MPI_Datatype *out) {
int blocklengths[2] = {1, 1};
MPI_Datatype types[2] = {MPI_INT, MPI_INT};
MPI_Aint offsets[2];
offsets[0] = offsetof(pair, fst);
offsets[1] = offsetof(pair, snd);
MPI_Type_create_struct(2, blocklengths, offsets, types, out);
MPI_Type_commit(out);
}

View File

@ -32,7 +32,13 @@ fn main() -> Result<()> {
// One process reads the file and distributes the data to the other processes
// using a 1D decomposition (each rank gets approx same number of vertices).
let mut this_process_edges = Vec::<Edge>::new();
let mut this_process_edges = vec![
Edge {
from_node: 0,
to_node: 0,
};
1000
];
if rank == root_rank {
let file = File::open(opt.graph_file)?;
let mut reader = BufReader::new(file);
@ -45,10 +51,11 @@ fn main() -> Result<()> {
let mut edges = Vec::with_capacity(num_edges);
for _ in 0..num_edges {
line.clear();
reader.read_line(&mut line)?;
let parts = line.trim().split_ascii_whitespace().collect::<Vec<_>>();
let from_node: usize = parts[0].parse()?;
let to_node: usize = parts[0].parse()?;
let to_node: usize = parts[1].parse()?;
let edge = Edge { from_node, to_node };
edges.push(edge);
}
@ -65,16 +72,18 @@ fn main() -> Result<()> {
start + step
};
let this_process_num_edges = end - start;
println!(
"[{process}/{size}] shiet {start}..{end} ({this_process_num_edges})"
);
counts.push(this_process_num_edges as Count);
displs.push(start as Count);
}
println!("Sending {displs:?}...");
let partition =
Partition::<[Edge], _, _>::new(edges.as_slice(), counts, displs);
println!("Sending...");
root_process
.scatter_varcount_into_root(&partition, &mut this_process_edges);
} else {
@ -102,7 +111,7 @@ fn main() -> Result<()> {
Ok(())
}
#[derive(Debug, Equivalence)]
#[derive(Copy, Clone, Debug, Equivalence)]
struct Edge {
from_node: usize,
to_node: usize,