Compare commits
No commits in common. "master" and "sqlite-rewrite" have entirely different histories.
master
...
sqlite-rew
127 changed files with 6906 additions and 5041 deletions
1
.envrc
1
.envrc
|
@ -1 +1,2 @@
|
|||
export DATABASE_URL=sqlite://$(pwd)/test.db
|
||||
use flake
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -6,4 +6,3 @@ target
|
|||
test.db*
|
||||
.env
|
||||
.direnv
|
||||
/proto/generated
|
2294
Cargo.lock
generated
2294
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -1,5 +1,5 @@
|
|||
workspace.resolver = "2"
|
||||
workspace.members = ["ui/src-tauri"]
|
||||
workspace.members = ["apps/*", "crates/*", "ui/src-tauri"]
|
||||
|
||||
[profile.wasm-debug]
|
||||
inherits = "dev"
|
||||
|
|
6
Makefile
6
Makefile
|
@ -1,6 +1,6 @@
|
|||
deploy-docs:
|
||||
(cd docs; BASE_URL=/panorama bun run build) || true
|
||||
rsync -azrP docs/dist/ root@veil:/home/blogDeploy/public/panorama
|
||||
mdbook build docs
|
||||
rsync -azrP docs/book/ root@veil:/home/blogDeploy/public/panorama
|
||||
|
||||
JOURNAL_SOURCES := $(shell find . apps/journal -name "*.rs" -not -path "./target/*")
|
||||
journal: $(JOURNAL_SOURCES)
|
||||
|
@ -10,4 +10,4 @@ journal: $(JOURNAL_SOURCES)
|
|||
--target=wasm32-unknown-unknown
|
||||
|
||||
test-install-apps: journal
|
||||
cargo test -p panorama-core -- tests::test_install_apps
|
||||
cargo test -p panorama-core -- tests::test_install_apps
|
|
@ -1,6 +0,0 @@
|
|||
name: panorama/calendar
|
||||
|
||||
depends:
|
||||
- name: panorama
|
||||
|
||||
# code: dist/index.js
|
175
apps/codetrack/.gitignore
vendored
175
apps/codetrack/.gitignore
vendored
|
@ -1,175 +0,0 @@
|
|||
# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore
|
||||
|
||||
# Logs
|
||||
|
||||
logs
|
||||
_.log
|
||||
npm-debug.log_
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Caches
|
||||
|
||||
.cache
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
|
||||
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
|
||||
|
||||
# Runtime data
|
||||
|
||||
pids
|
||||
_.pid
|
||||
_.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
|
||||
.temp
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
|
||||
.vscode-test
|
||||
|
||||
# yarn v2
|
||||
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.pnp.*
|
||||
|
||||
# IntelliJ based IDEs
|
||||
.idea
|
||||
|
||||
# Finder (MacOS) folder config
|
||||
.DS_Store
|
10
apps/codetrack/Cargo.toml
Normal file
10
apps/codetrack/Cargo.toml
Normal file
|
@ -0,0 +1,10 @@
|
|||
[package]
|
||||
name = "panorama-codetrack"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[[bin]]
|
||||
name = "panorama-codetrack"
|
||||
path = "rust-src/main.rs"
|
||||
|
||||
[dependencies]
|
|
@ -1,15 +0,0 @@
|
|||
# codetrack
|
||||
|
||||
To install dependencies:
|
||||
|
||||
```bash
|
||||
bun install
|
||||
```
|
||||
|
||||
To run:
|
||||
|
||||
```bash
|
||||
bun run index.ts
|
||||
```
|
||||
|
||||
This project was created using `bun init` in bun v1.0.25. [Bun](https://bun.sh) is a fast all-in-one JavaScript runtime.
|
Binary file not shown.
|
@ -1,27 +0,0 @@
|
|||
import type { Context } from "koa";
|
||||
import type {} from "@koa/bodyparser";
|
||||
|
||||
export async function createHeartbeats(ctx: Context) {
|
||||
const results = [];
|
||||
for (const heartbeat of ctx.request.body) {
|
||||
console.log("heartbeat", heartbeat);
|
||||
const time = new Date(heartbeat.time * 1000.0);
|
||||
const resp = await fetch("http://localhost:3000/node", {
|
||||
method: "PUT",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
attributes: [
|
||||
["panorama::time/start", time.toISOString()],
|
||||
["panorama/codetrack::project", heartbeat.project],
|
||||
],
|
||||
}),
|
||||
});
|
||||
const data = await resp.json();
|
||||
results.push({
|
||||
id: data.id,
|
||||
});
|
||||
}
|
||||
ctx.status = 400;
|
||||
// console.log("results", results);
|
||||
ctx.body = {};
|
||||
}
|
|
@ -1,19 +1,30 @@
|
|||
name: panorama/codetrack
|
||||
version: 0.1.0
|
||||
panorama_version: 0.1.0
|
||||
description: Code tracking app similar to WakaTime
|
||||
|
||||
depends:
|
||||
- name: panorama
|
||||
command: cargo run -p panorama-codetrack
|
||||
|
||||
code: dist/index.js
|
||||
node_types:
|
||||
- name: heartbeat
|
||||
|
||||
attributes:
|
||||
- name: heartbeat
|
||||
type: interface
|
||||
requires:
|
||||
- panorama::time/start
|
||||
keys:
|
||||
- name: start_time
|
||||
type: date
|
||||
|
||||
- name: project
|
||||
type: string
|
||||
- name: end_time
|
||||
type: date
|
||||
|
||||
- name: project
|
||||
type: text
|
||||
|
||||
indexes:
|
||||
- type: rtree
|
||||
start: panorama/codetrack/start_time
|
||||
end: panorama/codetrack/start_time
|
||||
|
||||
endpoints:
|
||||
- route: /api/v1/users/current/heartbeats.bulk
|
||||
handler: createHeartbeats
|
||||
|
||||
profiles:
|
||||
release:
|
||||
module: ./main.wasm
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
{
|
||||
"name": "codetrack",
|
||||
"module": "index.ts",
|
||||
"type": "module",
|
||||
"devDependencies": {
|
||||
"@types/bun": "latest",
|
||||
"@types/koa": "^2.15.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"typescript": "^5.0.0"
|
||||
},
|
||||
"dependencies": {
|
||||
"@koa/bodyparser": "^5.1.1"
|
||||
}
|
||||
}
|
3
apps/codetrack/rust-src/main.rs
Normal file
3
apps/codetrack/rust-src/main.rs
Normal file
|
@ -0,0 +1,3 @@
|
|||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
|
@ -1,22 +1,9 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"lib": ["ESNext"],
|
||||
"target": "ESNext",
|
||||
"module": "ESNext",
|
||||
"moduleDetection": "force",
|
||||
"jsx": "react-jsx",
|
||||
"allowJs": true,
|
||||
|
||||
/* Bundler mode */
|
||||
"moduleResolution": "bundler",
|
||||
"allowImportingTsExtensions": true,
|
||||
"verbatimModuleSyntax": true,
|
||||
"noEmit": true,
|
||||
|
||||
/* Linting */
|
||||
"skipLibCheck": true,
|
||||
"strict": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"forceConsistentCasingInFileNames": true
|
||||
}
|
||||
"compilerOptions": {
|
||||
"lib": ["ESNext", "DOM", "DOM.Iterable"],
|
||||
"allowJs": false,
|
||||
"skipLibCheck": true,
|
||||
"target": "ESNext",
|
||||
"module": "ESNext"
|
||||
}
|
||||
}
|
||||
|
|
3
apps/codetrack/web-src/index.ts
Normal file
3
apps/codetrack/web-src/index.ts
Normal file
|
@ -0,0 +1,3 @@
|
|||
export default {
|
||||
nodeTypes: {},
|
||||
};
|
1
apps/journal/.gitignore
vendored
1
apps/journal/.gitignore
vendored
|
@ -1 +0,0 @@
|
|||
index.js
|
Binary file not shown.
|
@ -1,41 +0,0 @@
|
|||
import type { Context } from "koa";
|
||||
import { formatDate } from "date-fns";
|
||||
import { uuidv7 } from "uuidv7";
|
||||
|
||||
export async function today(ctx: Context) {
|
||||
const date = new Date();
|
||||
const day = formatDate(date, "P");
|
||||
|
||||
const resp = await fetch("http://localhost:3000/node/sql", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
query: `
|
||||
select * from node_has_attribute as na
|
||||
join attribute as a on na.attrName = a.name
|
||||
where a.name = 'day' and na.string = '${day}';
|
||||
`,
|
||||
parameters: [],
|
||||
}),
|
||||
});
|
||||
|
||||
const { rows } = await resp.json();
|
||||
if (rows.length === 0) {
|
||||
const id = uuidv7();
|
||||
const resp = await fetch("http://localhost:3000/node/sql", {
|
||||
method: "POST",
|
||||
headers: { "Content-Type": "application/json" },
|
||||
body: JSON.stringify({
|
||||
query: `
|
||||
begin transaction;
|
||||
insert into node (id) values (?);
|
||||
end transaction;
|
||||
`,
|
||||
parameters: [id],
|
||||
}),
|
||||
});
|
||||
const data = await resp.json();
|
||||
console.log("Result", data);
|
||||
}
|
||||
ctx.body = {};
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
name: panorama/journal
|
||||
code: index.js
|
||||
|
||||
attributes:
|
||||
- name: day
|
||||
type: Option<String>
|
||||
|
||||
endpoints:
|
||||
- route: /today
|
||||
handler: today
|
|
@ -1,9 +0,0 @@
|
|||
{
|
||||
"dependencies": {
|
||||
"date-fns": "^3.6.0",
|
||||
"koa": "^2.15.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/koa": "^2.15.0"
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
name: panorama
|
||||
|
||||
attributes:
|
||||
- name: time/start
|
||||
type: datetime
|
||||
- name: time/end
|
||||
type: datetime
|
18
biome.json
18
biome.json
|
@ -1,18 +0,0 @@
|
|||
{
|
||||
"$schema": "https://biomejs.dev/schemas/1.4.1/schema.json",
|
||||
"organizeImports": {
|
||||
"enabled": true
|
||||
},
|
||||
"formatter": {
|
||||
"enabled": true,
|
||||
"indentWidth": 2,
|
||||
"indentStyle": "space",
|
||||
"lineWidth": 80
|
||||
},
|
||||
"linter": {
|
||||
"enabled": true,
|
||||
"rules": {
|
||||
"recommended": true
|
||||
}
|
||||
}
|
||||
}
|
BIN
bun.lockb
BIN
bun.lockb
Binary file not shown.
40
crates/panorama-core/Cargo.toml
Normal file
40
crates/panorama-core/Cargo.toml
Normal file
|
@ -0,0 +1,40 @@
|
|||
[package]
|
||||
name = "panorama-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
anyhow = { version = "1.0.86", features = ["backtrace"] }
|
||||
backoff = { version = "0.4.0", features = ["tokio"] }
|
||||
bimap = "0.6.3"
|
||||
chrono = { version = "0.4.38", features = ["serde"] }
|
||||
futures = "0.3.30"
|
||||
itertools = "0.13.0"
|
||||
schemars = "0.8.21"
|
||||
serde = { version = "1.0.203", features = ["derive"] }
|
||||
serde_json = "1.0.117"
|
||||
serde_yaml = "0.9.34"
|
||||
sqlx = { version = "0.7.4", features = [
|
||||
"runtime-tokio",
|
||||
"tls-rustls",
|
||||
"macros",
|
||||
"sqlite",
|
||||
"uuid",
|
||||
"chrono",
|
||||
"regexp",
|
||||
] }
|
||||
sugars = "3.0.1"
|
||||
tantivy = { version = "0.22.0", features = ["zstd"] }
|
||||
tokio = { version = "1.38.0", features = ["full"] }
|
||||
uuid = { version = "1.8.0", features = ["v7"] }
|
||||
walkdir = "2.5.0"
|
||||
wasmtime = { version = "22.0.0", default-features = false, features = [
|
||||
"runtime",
|
||||
"cranelift",
|
||||
] }
|
||||
wasmtime-wasi = "22.0.0"
|
||||
|
||||
[dependencies.async-imap]
|
||||
version = "0.9.7"
|
||||
default-features = false
|
||||
features = ["runtime-tokio"]
|
4
crates/panorama-core/build.rs
Normal file
4
crates/panorama-core/build.rs
Normal file
|
@ -0,0 +1,4 @@
|
|||
fn main() {
|
||||
println!("cargo:rerun-if-changed=../../apps");
|
||||
println!("cargo:rerun-if-changed=migrations");
|
||||
}
|
40
crates/panorama-core/migrations/00001_initial.sql
Normal file
40
crates/panorama-core/migrations/00001_initial.sql
Normal file
|
@ -0,0 +1,40 @@
|
|||
CREATE TABLE node (
|
||||
node_id TEXT PRIMARY KEY,
|
||||
node_type TEXT NOT NULL,
|
||||
updated_at INTEGER NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
extra_data JSON
|
||||
);
|
||||
|
||||
CREATE TABLE node_has_key (
|
||||
node_id TEXT NOT NULL,
|
||||
full_key TEXT NOT NULL,
|
||||
PRIMARY KEY (node_id, full_key)
|
||||
);
|
||||
CREATE INDEX node_has_key_idx_node_id ON node_has_key(node_id);
|
||||
CREATE INDEX node_has_key_idx_full_key ON node_has_key(full_key);
|
||||
|
||||
-- App-related tables
|
||||
CREATE TABLE app (
|
||||
app_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
app_name TEXT NOT NULL,
|
||||
app_version TEXT NOT NULL,
|
||||
app_version_hash TEXT,
|
||||
app_description TEXT,
|
||||
app_homepage TEXT,
|
||||
app_repository TEXT,
|
||||
app_license TEXT
|
||||
);
|
||||
|
||||
CREATE TABLE app_table_mapping (
|
||||
app_id INTEGER NOT NULL,
|
||||
app_table_name TEXT NOT NULL,
|
||||
db_table_name TEXT NOT NULL
|
||||
);
|
||||
|
||||
CREATE TABLE key_mapping (
|
||||
full_key TEXT NOT NULL,
|
||||
app_id INTEGER NOT NULL,
|
||||
app_table_name TEXT NOT NULL,
|
||||
app_table_field TEXT NOT NULL,
|
||||
is_fts_enabled BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);
|
42
crates/panorama-core/src/lib.rs
Normal file
42
crates/panorama-core/src/lib.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
#[macro_use]
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate sugars;
|
||||
|
||||
pub mod migrations;
|
||||
pub mod state;
|
||||
|
||||
// pub mod mail;
|
||||
pub mod messaging;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
pub use crate::state::AppState;
|
||||
|
||||
use anyhow::{bail, Result};
|
||||
use serde_json::Value;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
pub struct NodeId(pub Uuid);
|
||||
|
||||
impl fmt::Display for NodeId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ensure_ok(s: &str) -> Result<()> {
|
||||
let status: Value = serde_json::from_str(&s)?;
|
||||
let status = status.as_object().unwrap();
|
||||
let ok = status.get("ok").unwrap().as_bool().unwrap_or(false);
|
||||
if !ok {
|
||||
let display = status.get("display").unwrap().as_str().unwrap();
|
||||
bail!("shit (error: {display})")
|
||||
}
|
||||
Ok(())
|
||||
}
|
286
crates/panorama-core/src/mail.rs
Normal file
286
crates/panorama-core/src/mail.rs
Normal file
|
@ -0,0 +1,286 @@
|
|||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
use async_imap::Session;
|
||||
use backoff::{exponential::ExponentialBackoff, SystemClock};
|
||||
use futures::TryStreamExt;
|
||||
use itertools::Itertools;
|
||||
use tokio::{net::TcpStream, time::sleep};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{mail, AppState};
|
||||
|
||||
pub struct MailWorker {
|
||||
state: AppState,
|
||||
}
|
||||
|
||||
impl MailWorker {
|
||||
pub fn new(state: AppState) -> MailWorker {
|
||||
MailWorker { state }
|
||||
}
|
||||
|
||||
pub async fn mail_loop(self) -> Result<()> {
|
||||
loop {
|
||||
let mut policy = ExponentialBackoff::<SystemClock>::default();
|
||||
policy.current_interval = Duration::from_secs(5);
|
||||
policy.initial_interval = Duration::from_secs(5);
|
||||
|
||||
backoff::future::retry(policy, || async {
|
||||
match self.mail_loop_inner().await {
|
||||
Ok(_) => {}
|
||||
Err(err) => {
|
||||
eprintln!("Mail error: {:?}", err);
|
||||
Err(err)?;
|
||||
}
|
||||
}
|
||||
// For now, just sleep 30 seconds and then fetch again
|
||||
// TODO: Run a bunch of connections at once and do IDLE over them (if possible)
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn mail_loop_inner(&self) -> Result<()> {
|
||||
// Fetch the mail configs
|
||||
let configs = self.state.fetch_mail_configs()?;
|
||||
if configs.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// TODO: Do all configs instead of just the first
|
||||
let config = &configs[0];
|
||||
|
||||
let stream =
|
||||
TcpStream::connect((config.imap_hostname.as_str(), config.imap_port))
|
||||
.await?;
|
||||
|
||||
let client = async_imap::Client::new(stream);
|
||||
let mut session = client
|
||||
.login(&config.imap_username, &config.imap_password)
|
||||
.await
|
||||
.map_err(|(err, _)| err)?;
|
||||
|
||||
let all_mailbox_ids = self
|
||||
.fetch_and_store_all_mailboxes(config.node_id.to_string(), &mut session)
|
||||
.await
|
||||
.context("Could not fetch mailboxes")?;
|
||||
|
||||
self
|
||||
.fetch_all_mail_from_single_mailbox(
|
||||
&mut session,
|
||||
&all_mailbox_ids,
|
||||
config.node_id.to_string(),
|
||||
"INBOX",
|
||||
)
|
||||
.await
|
||||
.context("Could not fetch mail from INBOX")?;
|
||||
|
||||
session.logout().await.into_diagnostic()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn fetch_and_store_all_mailboxes(
|
||||
&self,
|
||||
config_node_id: String,
|
||||
session: &mut Session<TcpStream>,
|
||||
) -> Result<HashMap<String, String>> {
|
||||
// println!("Session: {:?}", session);
|
||||
let mailboxes = session
|
||||
.list(None, Some("*"))
|
||||
.await?
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
|
||||
let mut all_mailboxes = HashMap::new();
|
||||
|
||||
// TODO: Make this more efficient by using bulk in query
|
||||
|
||||
for mailbox in mailboxes {
|
||||
let tx = self.state.db.multi_transaction(true);
|
||||
|
||||
let result = tx.run_script(
|
||||
"
|
||||
?[node_id] :=
|
||||
*mailbox{node_id, account_node_id, mailbox_name},
|
||||
account_node_id = $account_node_id,
|
||||
mailbox_name = $mailbox_name,
|
||||
",
|
||||
btmap! {
|
||||
"account_node_id".to_owned()=>DataValue::from(config_node_id.clone()),
|
||||
"mailbox_name".to_owned()=>DataValue::from(mailbox.name().to_string()),
|
||||
},
|
||||
)?;
|
||||
|
||||
let node_id = if result.rows.len() == 0 {
|
||||
let new_node_id = Uuid::now_v7();
|
||||
let new_node_id = new_node_id.to_string();
|
||||
let extra_data = json!({
|
||||
"name": mailbox.name(),
|
||||
});
|
||||
tx.run_script("
|
||||
?[node_id, account_node_id, mailbox_name, extra_data] <-
|
||||
[[$new_node_id, $account_node_id, $mailbox_name, $extra_data]]
|
||||
:put mailbox { node_id, account_node_id, mailbox_name, extra_data }
|
||||
",
|
||||
btmap! {
|
||||
"new_node_id".to_owned() => DataValue::from(new_node_id.clone()),
|
||||
"account_node_id".to_owned() => DataValue::from(config_node_id.clone()),
|
||||
"mailbox_name".to_owned()=>DataValue::from(mailbox.name().to_string()),
|
||||
"extra_data".to_owned()=>DataValue::Json(JsonData(extra_data)),
|
||||
},
|
||||
)?;
|
||||
new_node_id
|
||||
} else {
|
||||
result.rows[0][0].get_str().unwrap().to_owned()
|
||||
};
|
||||
|
||||
tx.commit()?;
|
||||
|
||||
all_mailboxes.insert(mailbox.name().to_owned(), node_id);
|
||||
}
|
||||
|
||||
// println!("All mailboxes: {:?}", all_mailboxes);
|
||||
|
||||
Ok(all_mailboxes)
|
||||
}
|
||||
|
||||
async fn fetch_all_mail_from_single_mailbox(
|
||||
&self,
|
||||
session: &mut Session<TcpStream>,
|
||||
all_mailbox_ids: &HashMap<String, String>,
|
||||
config_node_id: String,
|
||||
mailbox_name: impl AsRef<str>,
|
||||
) -> Result<()> {
|
||||
let mailbox_name = mailbox_name.as_ref();
|
||||
let mailbox = session.select(mailbox_name).await.into_diagnostic()?;
|
||||
let mailbox_node_id = all_mailbox_ids.get(mailbox_name).unwrap();
|
||||
|
||||
let extra_data = json!({
|
||||
"uid_validity": mailbox.uid_validity,
|
||||
"last_seen": mailbox.unseen,
|
||||
});
|
||||
|
||||
// TODO: Validate uid validity here
|
||||
|
||||
let all_uids = session
|
||||
.uid_search("ALL")
|
||||
.await
|
||||
.context("Could not fetch all UIDs")?;
|
||||
|
||||
println!("All UIDs ({}): {:?}", all_uids.len(), all_uids);
|
||||
|
||||
let messages = session
|
||||
.uid_fetch(
|
||||
all_uids.iter().join(","),
|
||||
"(FLAGS ENVELOPE BODY[HEADER] BODY[TEXT] INTERNALDATE)",
|
||||
)
|
||||
.await
|
||||
.into_diagnostic()?
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
.into_diagnostic()
|
||||
.context("Could not fetch messages")?;
|
||||
println!(
|
||||
"messages {:?}",
|
||||
messages.iter().map(|f| f.body()).collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
let mut unique_message_ids = HashSet::new();
|
||||
let data: Vec<_> = messages
|
||||
.iter()
|
||||
.map(|msg| {
|
||||
let message_node_id = Uuid::now_v7();
|
||||
let headers =
|
||||
String::from_utf8(msg.header().unwrap().to_vec()).unwrap();
|
||||
let headers = headers
|
||||
.split("\r\n")
|
||||
.filter_map(|s| {
|
||||
// This is really bad lmao
|
||||
let p = s.split(": ").collect::<Vec<_>>();
|
||||
if p.len() < 2 {
|
||||
None
|
||||
} else {
|
||||
Some((p[0], p[1..].join(": ")))
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let message_id = headers
|
||||
.get("Message-ID")
|
||||
.map(|s| (*s).to_owned())
|
||||
.unwrap_or(message_node_id.to_string());
|
||||
unique_message_ids.insert(message_id.clone());
|
||||
|
||||
DataValue::List(vec![
|
||||
DataValue::from(message_node_id.to_string()),
|
||||
DataValue::from(config_node_id.to_string()),
|
||||
DataValue::from(mailbox_node_id.clone()),
|
||||
DataValue::from(
|
||||
headers
|
||||
.get("Subject")
|
||||
.map(|s| (*s).to_owned())
|
||||
.unwrap_or("Subject".to_owned()),
|
||||
),
|
||||
DataValue::Json(JsonData(serde_json::to_value(&headers).unwrap())),
|
||||
DataValue::Bytes(msg.text().unwrap().to_vec()),
|
||||
DataValue::from(msg.internal_date().unwrap().to_rfc3339()),
|
||||
DataValue::from(message_id),
|
||||
])
|
||||
})
|
||||
.collect();
|
||||
|
||||
println!("Adding {} messages to database...", data.len());
|
||||
let input_data = DataValue::List(data);
|
||||
|
||||
// TODO: Can this be one query?
|
||||
let tx = self.state.db.multi_transaction(true);
|
||||
|
||||
let unique_message_ids_data_value = DataValue::List(
|
||||
unique_message_ids
|
||||
.into_iter()
|
||||
.map(|s| DataValue::from(s))
|
||||
.collect_vec(),
|
||||
);
|
||||
|
||||
let existing_ids = tx.run_script(
|
||||
"
|
||||
?[node_id] := *message { node_id, message_id },
|
||||
is_in(message_id, $message_ids)
|
||||
",
|
||||
btmap! { "message_ids".to_owned() => unique_message_ids_data_value },
|
||||
)?;
|
||||
println!("Existing ids: {:?}", existing_ids);
|
||||
|
||||
self
|
||||
.state
|
||||
.db
|
||||
.run_script(
|
||||
"
|
||||
?[
|
||||
node_id, account_node_id, mailbox_node_id, subject, headers, body,
|
||||
internal_date, message_id
|
||||
] <- $input_data
|
||||
:put message {
|
||||
node_id, account_node_id, mailbox_node_id, subject, headers, body,
|
||||
internal_date, message_id
|
||||
}
|
||||
",
|
||||
btmap! {
|
||||
"input_data".to_owned() => input_data,
|
||||
},
|
||||
ScriptMutability::Mutable,
|
||||
)
|
||||
.context("Could not add message to database")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
4
crates/panorama-core/src/messaging/mod.rs
Normal file
4
crates/panorama-core/src/messaging/mod.rs
Normal file
|
@ -0,0 +1,4 @@
|
|||
//! Panorama uses an internal messaging system to pass content around
|
||||
//!
|
||||
//! This implementation is dead simple, just passes all messages and filters on the other end
|
||||
pub struct Messaging {}
|
197
crates/panorama-core/src/migrations.rs
Normal file
197
crates/panorama-core/src/migrations.rs
Normal file
|
@ -0,0 +1,197 @@
|
|||
use sqlx::migrate::Migrator;
|
||||
|
||||
|
||||
|
||||
pub static MIGRATOR: Migrator = sqlx::migrate!();
|
||||
|
||||
// pub async fn run_migrations(db: &DbInstance) -> Result<()> {
|
||||
// let migration_status = check_migration_status(db).await?;
|
||||
// println!("migration status: {:?}", migration_status);
|
||||
|
||||
// let migrations: Vec<Box<dyn for<'a> Fn(&'a DbInstance) -> Result<()>>> =
|
||||
// vec![Box::new(no_op), Box::new(migration_01)];
|
||||
|
||||
// if let MigrationStatus::NoMigrations = migration_status {
|
||||
// let result = db.run_script_str(
|
||||
// "
|
||||
// { :create migrations { yeah: Int default 0 => version: Int default 0 } }
|
||||
// {
|
||||
// ?[yeah, version] <- [[0, 0]]
|
||||
// :put migrations { yeah, version }
|
||||
// }
|
||||
// ",
|
||||
// "",
|
||||
// false,
|
||||
// );
|
||||
// ensure_ok(&result)?;
|
||||
// }
|
||||
|
||||
// let start_at_migration = match migration_status {
|
||||
// MigrationStatus::NoMigrations => 0,
|
||||
// MigrationStatus::HasVersion(n) => n,
|
||||
// };
|
||||
// let migrations_to_run = migrations
|
||||
// .iter()
|
||||
// .enumerate()
|
||||
// .skip(start_at_migration as usize + 1);
|
||||
// // println!("running {} migrations...", migrations_to_run.len());
|
||||
|
||||
// //TODO: This should all be done in a transaction
|
||||
// for (idx, migration) in migrations_to_run {
|
||||
// println!("running migration {idx}...");
|
||||
|
||||
// migration(db)?;
|
||||
|
||||
// let result = db.run_script_str(
|
||||
// "
|
||||
// ?[yeah, version] <- [[0, $version]]
|
||||
// :put migrations { yeah => version }
|
||||
// ",
|
||||
// &format!("{{\"version\":{}}}", idx),
|
||||
// false,
|
||||
// );
|
||||
|
||||
// ensure_ok(&result)?;
|
||||
|
||||
// println!("succeeded migration {idx}!");
|
||||
// }
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// #[derive(Debug)]
|
||||
// enum MigrationStatus {
|
||||
// NoMigrations,
|
||||
// HasVersion(u64),
|
||||
// }
|
||||
|
||||
// async fn check_migration_status(db: &DbInstance) -> Result<MigrationStatus> {
|
||||
// let status = db.run_script_str(
|
||||
// "
|
||||
// ?[yeah, version] := *migrations[yeah, version]
|
||||
// ",
|
||||
// "",
|
||||
// true,
|
||||
// );
|
||||
// println!("Status: {}", status);
|
||||
|
||||
// let status: Value = serde_json::from_str(&status).into_diagnostic()?;
|
||||
// let status = status.as_object().unwrap();
|
||||
// let ok = status.get("ok").unwrap().as_bool().unwrap_or(false);
|
||||
// if !ok {
|
||||
// let status_code = status.get("code").unwrap().as_str().unwrap();
|
||||
// if status_code == "query::relation_not_found" {
|
||||
// return Ok(MigrationStatus::NoMigrations);
|
||||
// }
|
||||
// }
|
||||
|
||||
// let rows = status.get("rows").unwrap().as_array().unwrap();
|
||||
// let row = rows[0].as_array().unwrap();
|
||||
// let version = row[1].as_number().unwrap().as_u64().unwrap();
|
||||
// println!("row: {row:?}");
|
||||
|
||||
// Ok(MigrationStatus::HasVersion(version))
|
||||
// }
|
||||
|
||||
// fn no_op(_: &DbInstance) -> Result<()> {
|
||||
// Ok(())
|
||||
// }
|
||||
|
||||
// fn migration_01(db: &DbInstance) -> Result<()> {
|
||||
// let result = db.run_script_str(
|
||||
// "
|
||||
// # Primary node type
|
||||
// {
|
||||
// :create node {
|
||||
// id: String
|
||||
// =>
|
||||
// type: String,
|
||||
// created_at: Float default now(),
|
||||
// updated_at: Float default now(),
|
||||
// extra_data: Json default {},
|
||||
// }
|
||||
// }
|
||||
|
||||
// # Inverse mappings for easy querying
|
||||
// { :create node_has_key { key: String => id: String } }
|
||||
// { ::index create node_has_key:inverse { id } }
|
||||
// { :create node_managed_by_app { node_id: String => app: String } }
|
||||
// { :create node_refers_to { node_id: String => other_node_id: String } }
|
||||
// {
|
||||
// :create fqkey_to_dbkey {
|
||||
// key: String
|
||||
// =>
|
||||
// relation: String,
|
||||
// field_name: String,
|
||||
// type: String,
|
||||
// is_fts_enabled: Bool,
|
||||
// }
|
||||
// }
|
||||
// {
|
||||
// ?[key, relation, field_name, type, is_fts_enabled] <- [
|
||||
// ['panorama/journal/page/day', 'journal_day', 'day', 'string', false],
|
||||
// ['panorama/journal/page/title', 'journal', 'title', 'string', true],
|
||||
// ['panorama/journal/page/content', 'journal', 'content', 'string', true],
|
||||
// ['panorama/mail/config/imap_hostname', 'mail_config', 'imap_hostname', 'string', false],
|
||||
// ['panorama/mail/config/imap_port', 'mail_config', 'imap_port', 'int', false],
|
||||
// ['panorama/mail/config/imap_username', 'mail_config', 'imap_username', 'string', false],
|
||||
// ['panorama/mail/config/imap_password', 'mail_config', 'imap_password', 'string', false],
|
||||
// ['panorama/mail/message/body', 'message', 'body', 'string', true],
|
||||
// ['panorama/mail/message/subject', 'message', 'subject', 'string', true],
|
||||
// ['panorama/mail/message/message_id', 'message', 'message_id', 'string', false],
|
||||
// ]
|
||||
// :put fqkey_to_dbkey { key, relation, field_name, type, is_fts_enabled }
|
||||
// }
|
||||
|
||||
// # Create journal type
|
||||
// { :create journal { node_id: String => title: String default '', content: String } }
|
||||
// { :create journal_day { day: String => node_id: String } }
|
||||
|
||||
// # Mail
|
||||
// {
|
||||
// :create mail_config {
|
||||
// node_id: String
|
||||
// =>
|
||||
// imap_hostname: String,
|
||||
// imap_port: Int,
|
||||
// imap_username: String,
|
||||
// imap_password: String,
|
||||
// }
|
||||
// }
|
||||
// {
|
||||
// :create mailbox {
|
||||
// node_id: String
|
||||
// =>
|
||||
// account_node_id: String,
|
||||
// mailbox_name: String,
|
||||
// uid_validity: Int? default null,
|
||||
// extra_data: Json default {},
|
||||
// }
|
||||
// }
|
||||
// { ::index create mailbox:by_account_id_and_name { account_node_id, mailbox_name } }
|
||||
// {
|
||||
// :create message {
|
||||
// node_id: String
|
||||
// =>
|
||||
// message_id: String,
|
||||
// account_node_id: String,
|
||||
// mailbox_node_id: String,
|
||||
// subject: String,
|
||||
// headers: Json?,
|
||||
// body: Bytes,
|
||||
// internal_date: String,
|
||||
// }
|
||||
// }
|
||||
// { ::index create message:message_id { message_id } }
|
||||
// { ::index create message:date { internal_date } }
|
||||
// { ::index create message:by_mailbox_id { mailbox_node_id } }
|
||||
|
||||
// # Calendar
|
||||
// ",
|
||||
// "",
|
||||
// false,
|
||||
// );
|
||||
// ensure_ok(&result)?;
|
||||
|
||||
// Ok(())
|
||||
// }
|
51
crates/panorama-core/src/state/apps/internal.rs
Normal file
51
crates/panorama-core/src/state/apps/internal.rs
Normal file
|
@ -0,0 +1,51 @@
|
|||
use std::io::{stdout, Write};
|
||||
|
||||
use anyhow::Result;
|
||||
use chrono::{DateTime, Utc};
|
||||
use wasmtime::{Caller, InstancePre, Linker, Memory};
|
||||
|
||||
pub struct WasmtimeModule {
|
||||
pub(crate) module: InstancePre<WasmtimeInstanceEnv>,
|
||||
}
|
||||
|
||||
impl WasmtimeModule {
|
||||
pub fn link_imports(linker: &mut Linker<WasmtimeInstanceEnv>) -> Result<()> {
|
||||
macro_rules! link_function {
|
||||
($($module:literal :: $func:ident),* $(,)?) => {
|
||||
linker $(
|
||||
.func_wrap(
|
||||
$module,
|
||||
concat!("_", stringify!($func)),
|
||||
WasmtimeInstanceEnv::$func,
|
||||
)?
|
||||
)*;
|
||||
};
|
||||
}
|
||||
abi_funcs!(link_function);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// This is loosely based on SpacetimeDB's implementation of their host.
|
||||
/// See: https://github.com/clockworklabs/SpacetimeDB/blob/c19c0d45c454db2a4215deb23c7f9f82cb5d7561/crates/core/src/host/wasmtime/wasm_instance_env.rs
|
||||
pub struct WasmtimeInstanceEnv {
|
||||
/// This is only an Option because memory is initialized after this is created so we need to come back and put it in later
|
||||
pub(crate) mem: Option<Memory>,
|
||||
}
|
||||
|
||||
impl WasmtimeInstanceEnv {
|
||||
pub fn print(mut caller: Caller<'_, Self>, len: u64, ptr: u32) {
|
||||
let mem = caller.data().mem.unwrap();
|
||||
let mut buffer = vec![0; len as usize];
|
||||
mem.read(caller, ptr as usize, &mut buffer);
|
||||
let s = String::from_utf8(buffer).unwrap();
|
||||
println!("Called print: {}", s);
|
||||
}
|
||||
|
||||
pub fn get_current_time(_: Caller<'_, Self>) -> i64 {
|
||||
let now = Utc::now();
|
||||
now.timestamp_nanos_opt().unwrap()
|
||||
}
|
||||
|
||||
pub fn register_endpoint(mut caller: Caller<'_, Self>) {}
|
||||
}
|
10
crates/panorama-core/src/state/apps/macros.rs
Normal file
10
crates/panorama-core/src/state/apps/macros.rs
Normal file
|
@ -0,0 +1,10 @@
|
|||
macro_rules! abi_funcs {
|
||||
($macro_name:ident) => {
|
||||
// TODO: Why is this "env"? How do i use another name
|
||||
$macro_name! {
|
||||
"env"::get_current_time,
|
||||
"env"::print,
|
||||
"env"::register_endpoint,
|
||||
}
|
||||
};
|
||||
}
|
7
crates/panorama-core/src/state/apps/memory.rs
Normal file
7
crates/panorama-core/src/state/apps/memory.rs
Normal file
|
@ -0,0 +1,7 @@
|
|||
use anyhow::Result;
|
||||
|
||||
pub struct Memory {
|
||||
pub memory: wasmtime::Memory,
|
||||
}
|
||||
|
||||
impl Memory {}
|
160
crates/panorama-core/src/state/apps/mod.rs
Normal file
160
crates/panorama-core/src/state/apps/mod.rs
Normal file
|
@ -0,0 +1,160 @@
|
|||
#[macro_use]
|
||||
pub mod macros;
|
||||
pub mod internal;
|
||||
pub mod manifest;
|
||||
pub mod memory;
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::{self, File},
|
||||
io::Read,
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context as _, Result};
|
||||
use internal::{WasmtimeInstanceEnv, WasmtimeModule};
|
||||
use itertools::Itertools;
|
||||
use wasmtime::{AsContext, Config, Engine, Linker, Memory, Module, Store};
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
use self::manifest::AppManifest;
|
||||
|
||||
pub type AllAppData = HashMap<String, AppData>;
|
||||
|
||||
impl AppState {
|
||||
pub async fn install_apps_from_search_paths(&self) -> Result<AllAppData> {
|
||||
let search_paths = vec![
|
||||
PathBuf::from("/Users/michael/Projects/panorama/apps"),
|
||||
PathBuf::from("/home/michael/Projects/panorama/apps"),
|
||||
];
|
||||
|
||||
let mut found = Vec::new();
|
||||
|
||||
for path in search_paths {
|
||||
if !path.exists() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let read_dir = fs::read_dir(&path)
|
||||
.with_context(|| format!("could not read {}", path.display()))?;
|
||||
|
||||
for dir_entry in read_dir {
|
||||
let dir_entry = dir_entry?;
|
||||
let path = dir_entry.path();
|
||||
|
||||
let manifest_path = path.join("manifest.yml");
|
||||
if manifest_path.exists() {
|
||||
found.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut all_app_data = HashMap::new();
|
||||
for path in found {
|
||||
let app_data = self.install_app_from_path(&path).await?;
|
||||
println!("App data: {:?}", app_data);
|
||||
all_app_data.insert(
|
||||
path.display().to_string(),
|
||||
AppData {
|
||||
name: "hello".to_string(),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Ok(all_app_data)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AppData {
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
async fn install_app_from_path(&self, path: impl AsRef<Path>) -> Result<()> {
|
||||
let app_path = path.as_ref();
|
||||
let manifest_path = app_path.join("manifest.yml");
|
||||
let manifest: AppManifest = {
|
||||
let file = File::open(&manifest_path)?;
|
||||
serde_yaml::from_reader(file).with_context(|| {
|
||||
format!(
|
||||
"Could not parse config file from {}",
|
||||
manifest_path.display()
|
||||
)
|
||||
})?
|
||||
};
|
||||
println!("Manifest: {:?}", manifest);
|
||||
|
||||
let module_path = app_path.join(manifest.module);
|
||||
|
||||
let installer_program = {
|
||||
let mut file = File::open(&module_path).with_context(|| {
|
||||
format!(
|
||||
"Could not open installer from path: {}",
|
||||
module_path.display()
|
||||
)
|
||||
})?;
|
||||
let mut buf = Vec::new();
|
||||
file.read_to_end(&mut buf)?;
|
||||
buf
|
||||
};
|
||||
|
||||
println!("Installer program: {} bytes", installer_program.len());
|
||||
|
||||
let config = Config::new();
|
||||
let engine = Engine::new(&config)?;
|
||||
let module = Module::new(&engine, &installer_program)?;
|
||||
|
||||
let mut linker = Linker::new(&engine);
|
||||
WasmtimeModule::link_imports(&mut linker)?;
|
||||
let module = linker.instantiate_pre(&module)?;
|
||||
let module = WasmtimeModule { module };
|
||||
|
||||
let mut state = WasmtimeInstanceEnv { mem: None };
|
||||
let mut store = Store::new(&engine, state);
|
||||
println!(
|
||||
"Required imports: {:?}",
|
||||
module
|
||||
.module
|
||||
.module()
|
||||
.imports()
|
||||
.map(|s| s.name())
|
||||
.collect_vec()
|
||||
);
|
||||
let instance = module
|
||||
.module
|
||||
.instantiate(&mut store)
|
||||
.context("Could not instantiate")?;
|
||||
let mem = instance
|
||||
.get_memory(&mut store, "memory")
|
||||
.ok_or_else(|| anyhow!("Fuck!"))?;
|
||||
store.data_mut().mem = Some(mem);
|
||||
|
||||
instance.exports(&mut store).for_each(|export| {
|
||||
println!("Export: {}", export.name());
|
||||
});
|
||||
|
||||
let hello = instance
|
||||
.get_typed_func::<(), i32>(&mut store, "install")
|
||||
.context("Could not get typed function")?;
|
||||
hello.call(&mut store, ()).context("Could not call")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn read_utf_8string<C>(
|
||||
c: C,
|
||||
mem: &Memory,
|
||||
len: usize,
|
||||
offset: usize,
|
||||
) -> Result<String>
|
||||
where
|
||||
C: AsContext,
|
||||
{
|
||||
let mut buffer = vec![0; len];
|
||||
mem.read(c, offset, &mut buffer)?;
|
||||
let string = String::from_utf8(buffer)?;
|
||||
Ok(string)
|
||||
}
|
27
crates/panorama-core/src/state/appsv0/manifest.rs
Normal file
27
crates/panorama-core/src/state/appsv0/manifest.rs
Normal file
|
@ -0,0 +1,27 @@
|
|||
use std::path::PathBuf;
|
||||
|
||||
use schemars::JsonSchema;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct AppManifest {
|
||||
pub name: String,
|
||||
pub version: Option<String>,
|
||||
pub panorama_version: Option<String>,
|
||||
pub description: Option<String>,
|
||||
pub module: PathBuf,
|
||||
|
||||
#[serde(default)]
|
||||
pub endpoints: Vec<AppManifestEndpoint>,
|
||||
#[serde(default)]
|
||||
pub triggers: Vec<AppManifestTriggers>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct AppManifestEndpoint {
|
||||
pub url: String,
|
||||
pub method: String,
|
||||
pub export_name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
|
||||
pub struct AppManifestTriggers {}
|
74
crates/panorama-core/src/state/appsv0/mod.rs
Normal file
74
crates/panorama-core/src/state/appsv0/mod.rs
Normal file
|
@ -0,0 +1,74 @@
|
|||
pub mod manifest;
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fs::{self, File},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use anyhow::{Context as _, Result};
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
use self::manifest::AppManifest;
|
||||
|
||||
impl AppState {
|
||||
pub async fn install_apps_from_search_paths(&self) -> Result<()> {
|
||||
let search_paths = vec![
|
||||
PathBuf::from("/Users/michael/Projects/panorama/apps"),
|
||||
PathBuf::from("/home/michael/Projects/panorama/apps"),
|
||||
];
|
||||
|
||||
let mut found = Vec::new();
|
||||
|
||||
for path in search_paths {
|
||||
if !path.exists() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let read_dir = fs::read_dir(&path)
|
||||
.with_context(|| format!("could not read {}", path.display()))?;
|
||||
|
||||
for dir_entry in read_dir {
|
||||
let dir_entry = dir_entry?;
|
||||
let path = dir_entry.path();
|
||||
|
||||
let manifest_path = path.join("manifest.yml");
|
||||
if manifest_path.exists() {
|
||||
found.push(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// let mut all_app_data = HashMap::new();
|
||||
// for path in found {
|
||||
// let app_data = self.install_app_from_path(&path).await?;
|
||||
// println!("App data: {:?}", app_data);
|
||||
// all_app_data.insert(
|
||||
// path.display().to_string(),
|
||||
// AppData {
|
||||
// name: "hello".to_string(),
|
||||
// },
|
||||
// );
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn install_app_from_path(&self, path: impl AsRef<Path>) -> Result<()> {
|
||||
let app_path = path.as_ref();
|
||||
let manifest_path = app_path.join("manifest.yml");
|
||||
let manifest: AppManifest = {
|
||||
let file = File::open(&manifest_path)?;
|
||||
serde_yaml::from_reader(file).with_context(|| {
|
||||
format!(
|
||||
"Could not parse config file from {}",
|
||||
manifest_path.display()
|
||||
)
|
||||
})?
|
||||
};
|
||||
println!("Manifest: {:?}", manifest);
|
||||
|
||||
todo!()
|
||||
}
|
||||
}
|
3
crates/panorama-core/src/state/codetrack.rs
Normal file
3
crates/panorama-core/src/state/codetrack.rs
Normal file
|
@ -0,0 +1,3 @@
|
|||
use crate::AppState;
|
||||
|
||||
impl AppState {}
|
77
crates/panorama-core/src/state/export.rs
Normal file
77
crates/panorama-core/src/state/export.rs
Normal file
|
@ -0,0 +1,77 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Result;
|
||||
use cozo::ScriptMutability;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
use super::utils::data_value_to_json_value;
|
||||
|
||||
impl AppState {
|
||||
pub async fn export(&self) -> Result<Value> {
|
||||
let result = self.db.run_script(
|
||||
"::relations",
|
||||
Default::default(),
|
||||
ScriptMutability::Immutable,
|
||||
)?;
|
||||
|
||||
let name_index = result.headers.iter().position(|x| x == "name").unwrap();
|
||||
let relation_names = result
|
||||
.rows
|
||||
.into_iter()
|
||||
.map(|row| row[name_index].get_str().unwrap().to_owned())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut relation_columns = HashMap::new();
|
||||
|
||||
for relation_name in relation_names.iter() {
|
||||
let result = self.db.run_script(
|
||||
&format!("::columns {relation_name}"),
|
||||
Default::default(),
|
||||
ScriptMutability::Immutable,
|
||||
)?;
|
||||
|
||||
let column_index =
|
||||
result.headers.iter().position(|x| x == "column").unwrap();
|
||||
let columns = result
|
||||
.rows
|
||||
.into_iter()
|
||||
.map(|row| row[column_index].get_str().unwrap().to_owned())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
relation_columns.insert(relation_name.clone(), columns);
|
||||
}
|
||||
|
||||
let tx = self.db.multi_transaction(false);
|
||||
|
||||
let mut all_relations = hmap! {};
|
||||
for relation_name in relation_names.iter() {
|
||||
if relation_name.contains(":") {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut relation_info = vec![];
|
||||
|
||||
let columns = relation_columns.get(relation_name.as_str()).unwrap();
|
||||
let columns_str = columns.join(", ");
|
||||
|
||||
let query =
|
||||
format!("?[{columns_str}] := *{relation_name} {{ {columns_str} }}");
|
||||
let result = tx.run_script(&query, Default::default())?;
|
||||
|
||||
for row in result.rows.into_iter() {
|
||||
let mut object = hmap! {};
|
||||
row.into_iter().enumerate().for_each(|(idx, col)| {
|
||||
object
|
||||
.insert(columns[idx].to_owned(), data_value_to_json_value(&col));
|
||||
});
|
||||
relation_info.push(object);
|
||||
}
|
||||
|
||||
all_relations.insert(relation_name.to_owned(), relation_info);
|
||||
}
|
||||
|
||||
Ok(json!({"relations": all_relations}))
|
||||
}
|
||||
}
|
56
crates/panorama-core/src/state/journal.rs
Normal file
56
crates/panorama-core/src/state/journal.rs
Normal file
|
@ -0,0 +1,56 @@
|
|||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Result;
|
||||
use chrono::Local;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{AppState, NodeId};
|
||||
|
||||
use super::node::CreateOrUpdate;
|
||||
|
||||
impl AppState {
|
||||
pub async fn get_todays_journal_id(&self) -> Result<NodeId> {
|
||||
let today = todays_date();
|
||||
|
||||
let result = self.db.run_script(
|
||||
"
|
||||
?[node_id] := *journal_day{day, node_id}, day = $day
|
||||
",
|
||||
btmap! {
|
||||
"day".to_owned() => today.clone().into(),
|
||||
},
|
||||
ScriptMutability::Immutable,
|
||||
)?;
|
||||
|
||||
// TODO: Do this check on the server side
|
||||
if result.rows.len() == 0 {
|
||||
// Insert a new one
|
||||
// let uuid = Uuid::now_v7();
|
||||
// let node_id = uuid.to_string();
|
||||
|
||||
let node_info = self
|
||||
.create_or_update_node(
|
||||
CreateOrUpdate::Create {
|
||||
r#type: "panorama/journal/page".to_owned(),
|
||||
},
|
||||
Some(btmap! {
|
||||
"panorama/journal/page/day".to_owned() => today.clone().into(),
|
||||
"panorama/journal/page/content".to_owned() => "".to_owned().into(),
|
||||
"panorama/journal/page/title".to_owned() => today.clone().into(),
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(node_info.node_id);
|
||||
}
|
||||
|
||||
let node_id = result.rows[0][0].get_str().unwrap();
|
||||
Ok(NodeId(Uuid::from_str(node_id).into_diagnostic()?))
|
||||
}
|
||||
}
|
||||
|
||||
fn todays_date() -> String {
|
||||
let now = Local::now();
|
||||
let date = now.date_naive();
|
||||
date.format("%Y-%m-%d").to_string()
|
||||
}
|
47
crates/panorama-core/src/state/mail.rs
Normal file
47
crates/panorama-core/src/state/mail.rs
Normal file
|
@ -0,0 +1,47 @@
|
|||
use std::{collections::HashMap, str::FromStr, time::Duration};
|
||||
|
||||
use anyhow::Result;
|
||||
use cozo::{DataValue, JsonData, ScriptMutability};
|
||||
use futures::TryStreamExt;
|
||||
use tokio::{net::TcpStream, time::sleep};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{AppState, NodeId};
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct MailConfig {
|
||||
pub node_id: NodeId,
|
||||
pub imap_hostname: String,
|
||||
pub imap_port: u16,
|
||||
pub imap_username: String,
|
||||
pub imap_password: String,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
/// Fetch the list of mail configs in the database
|
||||
pub fn fetch_mail_configs(&self) -> Result<Vec<MailConfig>> {
|
||||
let result = self.db.run_script(
|
||||
"
|
||||
?[node_id, imap_hostname, imap_port, imap_username, imap_password] :=
|
||||
*node{ id: node_id },
|
||||
*mail_config{ node_id, imap_hostname, imap_port, imap_username, imap_password }
|
||||
",
|
||||
Default::default(),
|
||||
ScriptMutability::Immutable,
|
||||
)?;
|
||||
|
||||
let result = result
|
||||
.rows
|
||||
.into_iter()
|
||||
.map(|row| MailConfig {
|
||||
node_id: NodeId(Uuid::from_str(row[0].get_str().unwrap()).unwrap()),
|
||||
imap_hostname: row[1].get_str().unwrap().to_owned(),
|
||||
imap_port: row[2].get_int().unwrap() as u16,
|
||||
imap_username: row[3].get_str().unwrap().to_owned(),
|
||||
imap_password: row[4].get_str().unwrap().to_owned(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
}
|
117
crates/panorama-core/src/state/mod.rs
Normal file
117
crates/panorama-core/src/state/mod.rs
Normal file
|
@ -0,0 +1,117 @@
|
|||
// pub mod apps;
|
||||
// pub mod codetrack;
|
||||
// pub mod export;
|
||||
// pub mod journal;
|
||||
// pub mod mail;
|
||||
pub mod appsv0;
|
||||
pub mod node;
|
||||
pub mod node_raw;
|
||||
// pub mod utils;
|
||||
|
||||
use std::{collections::HashMap, fs, path::Path};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use bimap::BiMap;
|
||||
use sqlx::{
|
||||
pool::PoolConnection,
|
||||
sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions},
|
||||
Sqlite, SqlitePool,
|
||||
};
|
||||
use tantivy::{
|
||||
directory::MmapDirectory,
|
||||
schema::{Field, Schema, STORED, STRING, TEXT},
|
||||
Index,
|
||||
};
|
||||
use wasmtime::Module;
|
||||
|
||||
use crate::{
|
||||
// mail::MailWorker,
|
||||
migrations::MIGRATOR,
|
||||
};
|
||||
|
||||
pub fn tantivy_schema() -> (Schema, BiMap<String, Field>) {
|
||||
let mut schema_builder = Schema::builder();
|
||||
|
||||
let mut field_map = BiMap::new();
|
||||
|
||||
let node_id = schema_builder.add_text_field("node_id", STRING | STORED);
|
||||
field_map.insert("node_id".to_owned(), node_id);
|
||||
|
||||
let journal_content = schema_builder.add_text_field("title", TEXT | STORED);
|
||||
field_map.insert("panorama/journal/page/content".to_owned(), journal_content);
|
||||
|
||||
(schema_builder.build(), field_map)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub db: SqlitePool,
|
||||
pub tantivy_index: Index,
|
||||
pub tantivy_field_map: BiMap<String, Field>,
|
||||
|
||||
pub app_wasm_modules: HashMap<String, Module>,
|
||||
// TODO: Compile this into a more efficient thing than just iter
|
||||
pub app_routes: HashMap<String, Vec<AppRoute>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppRoute {
|
||||
route: String,
|
||||
handler_name: String,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub async fn new(panorama_dir: impl AsRef<Path>) -> Result<Self> {
|
||||
let panorama_dir = panorama_dir.as_ref().to_path_buf();
|
||||
fs::create_dir_all(&panorama_dir)
|
||||
.context("Could not create panorama directory")?;
|
||||
|
||||
println!("Panorama dir: {}", panorama_dir.display());
|
||||
|
||||
let (tantivy_index, tantivy_field_map) = {
|
||||
let (schema, field_map) = tantivy_schema();
|
||||
let tantivy_path = panorama_dir.join("tantivy-index");
|
||||
fs::create_dir_all(&tantivy_path)?;
|
||||
let dir = MmapDirectory::open(&tantivy_path)?;
|
||||
let index = Index::builder().schema(schema).open_or_create(dir)?;
|
||||
(index, field_map)
|
||||
};
|
||||
|
||||
let db_path = panorama_dir.join("db.sqlite");
|
||||
let sqlite_connect_options = SqliteConnectOptions::new()
|
||||
.filename(db_path)
|
||||
.journal_mode(SqliteJournalMode::Wal)
|
||||
.create_if_missing(true);
|
||||
let db = SqlitePoolOptions::new()
|
||||
.connect_with(sqlite_connect_options)
|
||||
.await
|
||||
.context("Could not connect to SQLite database")?;
|
||||
|
||||
let state = AppState {
|
||||
db,
|
||||
tantivy_index,
|
||||
tantivy_field_map,
|
||||
app_wasm_modules: Default::default(),
|
||||
app_routes: Default::default(),
|
||||
};
|
||||
state.init().await?;
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
pub async fn conn(&self) -> Result<PoolConnection<Sqlite>> {
|
||||
self.db.acquire().await.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
async fn init(&self) -> Result<()> {
|
||||
// run_migrations(&self.db).await?;
|
||||
MIGRATOR
|
||||
.run(&self.db)
|
||||
.await
|
||||
.context("Could not migrate database")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn handle_app_route() {}
|
||||
}
|
523
crates/panorama-core/src/state/node.rs
Normal file
523
crates/panorama-core/src/state/node.rs
Normal file
|
@ -0,0 +1,523 @@
|
|||
use std::collections::{BTreeMap, HashMap};
|
||||
|
||||
use anyhow::Result;
|
||||
use chrono::{DateTime, Utc};
|
||||
use itertools::Itertools;
|
||||
use serde_json::Value;
|
||||
use sqlx::{Connection, Executor, FromRow, QueryBuilder, Sqlite};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{state::node_raw::FieldMappingRow, AppState, NodeId};
|
||||
|
||||
// use super::utils::owned_value_to_json_value;
|
||||
|
||||
pub type ExtraData = BTreeMap<String, Value>;
|
||||
pub type FieldsByTable<'a> =
|
||||
HashMap<(&'a i64, &'a String), Vec<&'a FieldMappingRow>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NodeInfo {
|
||||
pub node_id: NodeId,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub fields: Option<HashMap<String, Value>>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
/// Get all properties of a node
|
||||
pub async fn get_node(&self, node_id: impl AsRef<str>) -> Result<NodeInfo> {
|
||||
let node_id = node_id.as_ref().to_owned();
|
||||
let mut conn = self.conn().await?;
|
||||
|
||||
conn
|
||||
.transaction::<_, _, sqlx::Error>(|tx| {
|
||||
Box::pin(async move {
|
||||
let node_id = node_id.clone();
|
||||
let field_mapping =
|
||||
AppState::get_related_field_list_for_node_id(&mut **tx, &node_id)
|
||||
.await?;
|
||||
|
||||
// Group the keys by which relation they're in
|
||||
let fields_by_table = field_mapping.iter().into_group_map_by(
|
||||
|FieldMappingRow {
|
||||
app_id,
|
||||
app_table_name,
|
||||
..
|
||||
}| (app_id, app_table_name),
|
||||
);
|
||||
|
||||
// Run the query that grabs all of the relevant fields, and coalesce
|
||||
// the fields back
|
||||
let related_fields =
|
||||
AppState::query_related_fields(&mut **tx, &fields_by_table).await?;
|
||||
|
||||
println!("Related fields: {:?}", related_fields);
|
||||
|
||||
// let created_at = DateTime::from_timestamp_millis(
|
||||
// (result.rows[0][2].get_float().unwrap() * 1000.0) as i64,
|
||||
// )
|
||||
// .unwrap();
|
||||
|
||||
// let updated_at = DateTime::from_timestamp_millis(
|
||||
// (result.rows[0][3].get_float().unwrap() * 1000.0) as i64,
|
||||
// )
|
||||
// .unwrap();
|
||||
|
||||
// let mut fields = HashMap::new();
|
||||
|
||||
// for row in result
|
||||
// .rows
|
||||
// .into_iter()
|
||||
// .map(|row| row.into_iter().skip(4).zip(all_fields.iter()))
|
||||
// {
|
||||
// for (value, (_, _, field_name)) in row {
|
||||
// fields.insert(
|
||||
// field_name.to_string(),
|
||||
// data_value_to_json_value(&value),
|
||||
// );
|
||||
// }
|
||||
// }
|
||||
|
||||
todo!()
|
||||
|
||||
// Ok(NodeInfo {
|
||||
// node_id: NodeId(Uuid::from_str(&node_id).unwrap()),
|
||||
// created_at,
|
||||
// updated_at,
|
||||
// fields: Some(fields),
|
||||
// })
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
|
||||
todo!()
|
||||
// Ok(())
|
||||
}
|
||||
|
||||
async fn query_related_fields<'e, 'c: 'e, X>(
|
||||
x: X,
|
||||
fields_by_table: &FieldsByTable<'_>,
|
||||
) -> sqlx::Result<HashMap<String, Value>>
|
||||
where
|
||||
X: 'e + Executor<'c, Database = Sqlite>,
|
||||
{
|
||||
let mut query = QueryBuilder::new("");
|
||||
let mut mapping = HashMap::new();
|
||||
let mut ctr = 0;
|
||||
|
||||
let mut selected_fields = vec![];
|
||||
for ((app_id, app_table_name), fields) in fields_by_table.iter() {
|
||||
let table_gen_name = format!("c{ctr}");
|
||||
ctr += 1;
|
||||
|
||||
let mut keys = vec![];
|
||||
for field_info in fields.iter() {
|
||||
let field_gen_name = format!("f{ctr}");
|
||||
ctr += 1;
|
||||
mapping.insert(&field_info.full_key, field_gen_name.clone());
|
||||
|
||||
keys.push(field_gen_name.clone());
|
||||
|
||||
selected_fields.push(format!(
|
||||
"{}.{} as {}",
|
||||
table_gen_name, field_info.app_table_field, field_gen_name
|
||||
));
|
||||
|
||||
// constraints.push(format!(
|
||||
// "{}: {}",
|
||||
// field_info.relation_field.to_owned(),
|
||||
// field_gen_name,
|
||||
// ));
|
||||
// all_fields.push((
|
||||
// field_gen_name,
|
||||
// field_info.relation_field.to_owned(),
|
||||
// key,
|
||||
// ))
|
||||
}
|
||||
|
||||
// let keys = keys.join(", ");
|
||||
// let constraints = constraints.join(", ");
|
||||
// all_relation_queries.push(format!(
|
||||
// "
|
||||
// {table_gen_name}[{keys}] :=
|
||||
// *{relation}{{ node_id, {constraints} }},
|
||||
// node_id = $node_id
|
||||
// "
|
||||
// ));
|
||||
// all_relation_constraints.push(format!("{table_gen_name}[{keys}],"))
|
||||
}
|
||||
|
||||
if selected_fields.is_empty() {
|
||||
return Ok(HashMap::new());
|
||||
}
|
||||
|
||||
query.push("SELECT ");
|
||||
query.push(selected_fields.join(", "));
|
||||
query.push(" FROM ");
|
||||
println!("Query: {:?}", query.sql());
|
||||
|
||||
// let all_relation_constraints = all_relation_constraints.join("\n");
|
||||
// let all_relation_queries = all_relation_queries.join("\n\n");
|
||||
// let all_field_names = all_fields
|
||||
// .iter()
|
||||
// .map(|(field_name, _, _)| field_name)
|
||||
// .join(", ");
|
||||
// let _query = format!(
|
||||
// "
|
||||
// {all_relation_queries}
|
||||
|
||||
// ?[type, extra_data, created_at, updated_at, {all_field_names}] :=
|
||||
// *node {{ id, type, created_at, updated_at, extra_data }},
|
||||
// {all_relation_constraints}
|
||||
// id = $node_id
|
||||
// "
|
||||
// );
|
||||
|
||||
let rows = query.build().fetch_all(x).await;
|
||||
|
||||
todo!()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum CreateOrUpdate {
|
||||
Create { r#type: String },
|
||||
Update { node_id: NodeId },
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
// TODO: Split this out into create and update
|
||||
pub async fn create_or_update_node(
|
||||
&self,
|
||||
opts: CreateOrUpdate,
|
||||
extra_data: Option<ExtraData>,
|
||||
) -> Result<NodeInfo> {
|
||||
let node_id = match opts {
|
||||
CreateOrUpdate::Create { .. } => NodeId(Uuid::now_v7()),
|
||||
CreateOrUpdate::Update { ref node_id } => node_id.clone(),
|
||||
};
|
||||
let node_id = node_id.to_string();
|
||||
|
||||
let action = match opts {
|
||||
CreateOrUpdate::Create { .. } => "put",
|
||||
CreateOrUpdate::Update { .. } => "update",
|
||||
};
|
||||
|
||||
println!("Request: {opts:?} {extra_data:?}");
|
||||
|
||||
let mut conn = self.conn().await?;
|
||||
|
||||
conn
|
||||
.transaction::<_, _, sqlx::Error>(|tx| {
|
||||
Box::pin(async move {
|
||||
let node_info = match opts {
|
||||
CreateOrUpdate::Create { r#type } => {
|
||||
AppState::create_node_raw(&mut **tx, &r#type).await?
|
||||
}
|
||||
CreateOrUpdate::Update { node_id } => todo!(),
|
||||
};
|
||||
|
||||
if let Some(extra_data) = extra_data {
|
||||
if !extra_data.is_empty() {
|
||||
let node_id_str = node_id.to_string();
|
||||
let field_mapping = AppState::get_related_field_list_for_node_id(
|
||||
&mut **tx,
|
||||
&node_id_str,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Group the keys by which relation they're in
|
||||
let fields_by_table = field_mapping.iter().into_group_map_by(
|
||||
|FieldMappingRow {
|
||||
app_id,
|
||||
app_table_name,
|
||||
..
|
||||
}| (app_id, app_table_name),
|
||||
);
|
||||
|
||||
AppState::write_extra_data(
|
||||
&mut **tx,
|
||||
&node_id_str,
|
||||
&fields_by_table,
|
||||
extra_data,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(node_info)
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(|err| err.into())
|
||||
}
|
||||
|
||||
async fn create_node_raw<'e, 'c: 'e, X>(
|
||||
x: X,
|
||||
r#type: &str,
|
||||
) -> sqlx::Result<NodeInfo>
|
||||
where
|
||||
X: 'e + Executor<'c, Database = Sqlite>,
|
||||
{
|
||||
let node_id = Uuid::now_v7();
|
||||
let node_id_str = node_id.to_string();
|
||||
|
||||
#[derive(FromRow)]
|
||||
struct Result {
|
||||
updated_at: i64,
|
||||
}
|
||||
|
||||
let result = sqlx::query_as!(
|
||||
Result,
|
||||
r#"
|
||||
INSERT INTO node (node_id, node_type, extra_data)
|
||||
VALUES (?, ?, "{}")
|
||||
RETURNING updated_at
|
||||
"#,
|
||||
node_id_str,
|
||||
r#type,
|
||||
)
|
||||
.fetch_one(x)
|
||||
.await?;
|
||||
|
||||
let updated_at =
|
||||
DateTime::from_timestamp_millis(result.updated_at * 1000).unwrap();
|
||||
let created_at = DateTime::from_timestamp_millis(
|
||||
node_id.get_timestamp().unwrap().to_unix().0 as i64 * 1000,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(NodeInfo {
|
||||
node_id: NodeId(node_id),
|
||||
created_at,
|
||||
updated_at,
|
||||
fields: None,
|
||||
})
|
||||
}
|
||||
|
||||
async fn write_extra_data<'e, 'c: 'e, X>(
|
||||
x: X,
|
||||
node_id: &str,
|
||||
fields_by_table: &FieldsByTable<'_>,
|
||||
extra_data: ExtraData,
|
||||
) -> sqlx::Result<()>
|
||||
where
|
||||
X: 'e + Executor<'c, Database = Sqlite>,
|
||||
{
|
||||
// Update Tantivy indexes
|
||||
// for ((app_id, app_table_name), fields) in fields_by_table.iter() {
|
||||
// let mut writer =
|
||||
// self.tantivy_index.writer(15_000_000).into_diagnostic()?;
|
||||
|
||||
// let delete_term = Term::from_field_text(node_id_field.clone(), &node_id);
|
||||
// writer.delete_term(delete_term);
|
||||
|
||||
// writer.add_document(doc).into_diagnostic()?;
|
||||
// writer.commit().into_diagnostic()?;
|
||||
// drop(writer);
|
||||
// }
|
||||
|
||||
// Update database
|
||||
let mut node_has_keys = Vec::new();
|
||||
println!("Fields by table: {:?}", fields_by_table);
|
||||
for ((app_id, app_table_name), fields) in fields_by_table.iter() {
|
||||
for field_info in fields {
|
||||
node_has_keys.push(&field_info.full_key);
|
||||
}
|
||||
|
||||
// let mut doc =
|
||||
// btmap! { node_id_field.clone() => OwnedValue::Str(node_id.to_owned()) };
|
||||
// let fields_mapping = fields
|
||||
// .into_iter()
|
||||
// .map(
|
||||
// |(
|
||||
// key,
|
||||
// FieldInfo {
|
||||
// relation_field,
|
||||
// r#type,
|
||||
// is_fts_enabled,
|
||||
// ..
|
||||
// },
|
||||
// )| {
|
||||
// let new_value = extra_data.get(*key).unwrap();
|
||||
|
||||
// // TODO: Make this more generic
|
||||
// let new_value = match r#type.as_str() {
|
||||
// "int" => DataValue::from(new_value.as_i64().unwrap()),
|
||||
// _ => DataValue::from(new_value.as_str().unwrap()),
|
||||
// };
|
||||
|
||||
// if *is_fts_enabled {
|
||||
// if let Some(field) = self.tantivy_field_map.get_by_left(*key) {
|
||||
// doc.insert(
|
||||
// field.clone(),
|
||||
// OwnedValue::Str(new_value.get_str().unwrap().to_owned()),
|
||||
// );
|
||||
// }
|
||||
// }
|
||||
|
||||
// (relation_field.to_owned(), new_value)
|
||||
// },
|
||||
// )
|
||||
// .collect::<BTreeMap<_, _>>();
|
||||
|
||||
// let keys = fields_mapping.keys().collect::<Vec<_>>();
|
||||
// let keys_joined = keys.iter().join(", ");
|
||||
|
||||
// if !keys.is_empty() {
|
||||
// let query = format!(
|
||||
// "
|
||||
// ?[ node_id, {keys_joined} ] <- [$input_data]
|
||||
// :{action} {relation} {{ node_id, {keys_joined} }}
|
||||
// "
|
||||
// );
|
||||
|
||||
// let mut params = vec![];
|
||||
// params.push(DataValue::from(node_id.clone()));
|
||||
// for key in keys {
|
||||
// params.push(fields_mapping[key].clone());
|
||||
// }
|
||||
|
||||
// let result = tx.run_script(
|
||||
// &query,
|
||||
// btmap! {
|
||||
// "input_data".to_owned() => DataValue::List(params),
|
||||
// },
|
||||
// );
|
||||
// }
|
||||
}
|
||||
|
||||
if !node_has_keys.is_empty() {
|
||||
let mut query =
|
||||
QueryBuilder::new("INSERT INTO node_has_key (node_id, full_key) ");
|
||||
query.push_values(node_has_keys, |mut b, key| {
|
||||
b.push_bind(node_id).push_bind(key);
|
||||
});
|
||||
println!("Query: {:?}", query.sql());
|
||||
query.build().execute(x).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// impl AppState {
|
||||
|
||||
// pub async fn update_node() {}
|
||||
|
||||
// pub async fn search_nodes(
|
||||
// &self,
|
||||
// query: impl AsRef<str>,
|
||||
// ) -> Result<Vec<(NodeId, Value)>> {
|
||||
// let query = query.as_ref();
|
||||
|
||||
// let reader = self.tantivy_index.reader().into_diagnostic()?;
|
||||
// let searcher = reader.searcher();
|
||||
|
||||
// let node_id_field = self
|
||||
// .tantivy_field_map
|
||||
// .get_by_left("node_id")
|
||||
// .unwrap()
|
||||
// .clone();
|
||||
// let journal_page_field = self
|
||||
// .tantivy_field_map
|
||||
// .get_by_left("panorama/journal/page/content")
|
||||
// .unwrap()
|
||||
// .clone();
|
||||
// let mut query_parser =
|
||||
// QueryParser::for_index(&self.tantivy_index, vec![journal_page_field]);
|
||||
// query_parser.set_field_fuzzy(journal_page_field, true, 2, true);
|
||||
// let query = query_parser.parse_query(query).into_diagnostic()?;
|
||||
|
||||
// let top_docs = searcher
|
||||
// .search(&query, &TopDocs::with_limit(10))
|
||||
// .into_diagnostic()?;
|
||||
|
||||
// Ok(
|
||||
// top_docs
|
||||
// .into_iter()
|
||||
// .map(|(score, doc_address)| {
|
||||
// let retrieved_doc =
|
||||
// searcher.doc::<TantivyDocument>(doc_address).unwrap();
|
||||
// let node_id = retrieved_doc
|
||||
// .get_first(node_id_field.clone())
|
||||
// .unwrap()
|
||||
// .as_str()
|
||||
// .unwrap();
|
||||
// let all_fields = retrieved_doc.get_sorted_field_values();
|
||||
// let node_id = NodeId(Uuid::from_str(node_id).unwrap());
|
||||
// let fields = all_fields
|
||||
// .into_iter()
|
||||
// .map(|(field, values)| {
|
||||
// (
|
||||
// self.tantivy_field_map.get_by_right(&field).unwrap(),
|
||||
// if values.len() == 1 {
|
||||
// owned_value_to_json_value(values[0])
|
||||
// } else {
|
||||
// Value::Array(
|
||||
// values
|
||||
// .into_iter()
|
||||
// .map(owned_value_to_json_value)
|
||||
// .collect_vec(),
|
||||
// )
|
||||
// },
|
||||
// )
|
||||
// })
|
||||
// .collect::<HashMap<_, _>>();
|
||||
// (
|
||||
// node_id,
|
||||
// json!({
|
||||
// "score": score,
|
||||
// "fields": fields,
|
||||
// }),
|
||||
// )
|
||||
// })
|
||||
// .collect::<Vec<_>>(),
|
||||
// )
|
||||
// }
|
||||
|
||||
// fn get_rows_for_extra_keys(
|
||||
// &self,
|
||||
// tx: &MultiTransaction,
|
||||
// keys: &[String],
|
||||
// ) -> Result<FieldMapping> {
|
||||
// let result = tx.run_script(
|
||||
// "
|
||||
// ?[key, relation, field_name, type, is_fts_enabled] :=
|
||||
// *fqkey_to_dbkey{key, relation, field_name, type, is_fts_enabled},
|
||||
// is_in(key, $keys)
|
||||
// ",
|
||||
// btmap! {
|
||||
// "keys".to_owned() => DataValue::List(
|
||||
// keys.into_iter()
|
||||
// .map(|s| DataValue::from(s.as_str()))
|
||||
// .collect::<Vec<_>>()
|
||||
// ),
|
||||
// },
|
||||
// )?;
|
||||
|
||||
// AppState::rows_to_field_mapping(result)
|
||||
// }
|
||||
|
||||
// fn rows_to_field_mapping(result: NamedRows) -> Result<FieldMapping> {
|
||||
// let s = |s: &DataValue| s.get_str().unwrap().to_owned();
|
||||
|
||||
// Ok(
|
||||
// result
|
||||
// .rows
|
||||
// .into_iter()
|
||||
// .map(|row| {
|
||||
// (
|
||||
// s(&row[0]),
|
||||
// FieldInfo {
|
||||
// relation_name: s(&row[1]),
|
||||
// relation_field: s(&row[2]),
|
||||
// r#type: s(&row[3]),
|
||||
// is_fts_enabled: row[4].get_bool().unwrap(),
|
||||
// },
|
||||
// )
|
||||
// })
|
||||
// .collect::<HashMap<_, _>>(),
|
||||
// )
|
||||
// }
|
||||
// }
|
42
crates/panorama-core/src/state/node_raw.rs
Normal file
42
crates/panorama-core/src/state/node_raw.rs
Normal file
|
@ -0,0 +1,42 @@
|
|||
use sqlx::{Executor, FromRow, Sqlite};
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
#[derive(Debug, FromRow)]
|
||||
pub struct FieldMappingRow {
|
||||
pub full_key: String,
|
||||
pub app_id: i64,
|
||||
pub app_table_name: String,
|
||||
pub app_table_field: String,
|
||||
pub db_table_name: Option<String>,
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
pub(crate) async fn get_related_field_list_for_node_id<'e, 'c: 'e, X>(
|
||||
x: X,
|
||||
node_id: &str,
|
||||
) -> sqlx::Result<Vec<FieldMappingRow>>
|
||||
where
|
||||
X: 'e + Executor<'c, Database = Sqlite>,
|
||||
{
|
||||
sqlx::query_as!(
|
||||
FieldMappingRow,
|
||||
"
|
||||
SELECT
|
||||
node_has_key.full_key, key_mapping.app_id,
|
||||
key_mapping.app_table_name, app_table_field,
|
||||
app_table_mapping.db_table_name
|
||||
FROM node_has_key
|
||||
INNER JOIN key_mapping
|
||||
ON node_has_key.full_key = key_mapping.full_key
|
||||
INNER JOIN app_table_mapping
|
||||
ON key_mapping.app_id = app_table_mapping.app_id
|
||||
AND key_mapping.app_table_name = app_table_mapping.app_table_name
|
||||
WHERE node_id = $1
|
||||
",
|
||||
node_id
|
||||
)
|
||||
.fetch_all(x)
|
||||
.await
|
||||
}
|
||||
}
|
59
crates/panorama-core/src/state/utils.rs
Normal file
59
crates/panorama-core/src/state/utils.rs
Normal file
|
@ -0,0 +1,59 @@
|
|||
use itertools::Itertools;
|
||||
use serde_json::{Number, Value};
|
||||
use tantivy::schema::OwnedValue;
|
||||
|
||||
pub fn owned_value_to_json_value(data_value: &OwnedValue) -> Value {
|
||||
match data_value {
|
||||
OwnedValue::Null => Value::Null,
|
||||
OwnedValue::Str(s) => Value::String(s.to_string()),
|
||||
OwnedValue::U64(u) => Value::Number(Number::from(*u)),
|
||||
OwnedValue::I64(i) => Value::Number(Number::from(*i)),
|
||||
OwnedValue::F64(f) => Value::Number(Number::from_f64(*f).unwrap()),
|
||||
OwnedValue::Bool(b) => Value::Bool(*b),
|
||||
OwnedValue::Array(a) => {
|
||||
Value::Array(a.into_iter().map(owned_value_to_json_value).collect_vec())
|
||||
}
|
||||
OwnedValue::Object(o) => Value::Object(
|
||||
o.into_iter()
|
||||
.map(|(k, v)| (k.to_owned(), owned_value_to_json_value(v)))
|
||||
.collect(),
|
||||
),
|
||||
_ => {
|
||||
println!("Converting unknown {:?}", data_value);
|
||||
serde_json::to_value(data_value).unwrap()
|
||||
} // OwnedValue::Date(_) => todo!(),
|
||||
// OwnedValue::Facet(_) => todo!(),
|
||||
// OwnedValue::Bytes(_) => todo!(),
|
||||
// OwnedValue::IpAddr(_) => todo!(),
|
||||
// OwnedValue::PreTokStr(_) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
// pub fn data_value_to_json_value(data_value: &DataValue) -> Value {
|
||||
// match data_value {
|
||||
// DataValue::Null => Value::Null,
|
||||
// DataValue::Bool(b) => Value::Bool(*b),
|
||||
// DataValue::Num(n) => Value::Number(match n {
|
||||
// Num::Int(i) => Number::from(*i),
|
||||
// Num::Float(f) => Number::from_f64(*f).unwrap(),
|
||||
// }),
|
||||
// DataValue::Str(s) => Value::String(s.to_string()),
|
||||
// DataValue::List(v) => {
|
||||
// Value::Array(v.into_iter().map(data_value_to_json_value).collect_vec())
|
||||
// }
|
||||
// DataValue::Json(v) => v.0.clone(),
|
||||
// DataValue::Bytes(s) => {
|
||||
// Value::String(String::from_utf8_lossy(s).to_string())
|
||||
// }
|
||||
// _ => {
|
||||
// println!("Converting unknown {:?}", data_value);
|
||||
// serde_json::to_value(data_value).unwrap()
|
||||
// } // DataValue::Bytes(s) => todo!(),
|
||||
// // DataValue::Uuid(_) => todo!(),
|
||||
// // DataValue::Regex(_) => todo!(),
|
||||
// // DataValue::Set(_) => todo!(),
|
||||
// // DataValue::Vec(_) => todo!(),
|
||||
// // DataValue::Validity(_) => todo!(),
|
||||
// // DataValue::Bot => todo!(),
|
||||
// }
|
||||
// }
|
87
crates/panorama-core/src/tests/mod.rs
Normal file
87
crates/panorama-core/src/tests/mod.rs
Normal file
|
@ -0,0 +1,87 @@
|
|||
use anyhow::Result;
|
||||
use sqlx::SqlitePool;
|
||||
use tantivy::Index;
|
||||
|
||||
use crate::{
|
||||
migrations::MIGRATOR,
|
||||
state::{node::CreateOrUpdate, tantivy_schema},
|
||||
AppState,
|
||||
};
|
||||
|
||||
pub async fn test_state() -> Result<AppState> {
|
||||
let db = SqlitePool::connect(":memory:").await?;
|
||||
let (schema, tantivy_field_map) = tantivy_schema();
|
||||
let tantivy_index = Index::create_in_ram(schema);
|
||||
MIGRATOR.run(&db).await?;
|
||||
|
||||
let state = AppState {
|
||||
db,
|
||||
tantivy_index,
|
||||
tantivy_field_map,
|
||||
app_routes: Default::default(),
|
||||
app_wasm_modules: Default::default(),
|
||||
};
|
||||
|
||||
Ok(state)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_create_node() -> Result<()> {
|
||||
let state = test_state().await?;
|
||||
|
||||
let node_info = state
|
||||
.create_or_update_node(
|
||||
CreateOrUpdate::Create {
|
||||
r#type: "panorama/journal/page".to_string(),
|
||||
},
|
||||
Some(btmap! {
|
||||
"panorama/journal/page/content".to_owned() => json!("helloge"),
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut node = state.get_node(node_info.node_id.to_string()).await?;
|
||||
assert!(node.fields.is_some());
|
||||
|
||||
let fields = node.fields.take().unwrap();
|
||||
assert!(fields.contains_key("panorama/journal/page/content"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_full_text_search() -> Result<()> {
|
||||
let state = test_state().await?;
|
||||
|
||||
let node_info = state
|
||||
.create_or_update_node(
|
||||
CreateOrUpdate::Create {
|
||||
r#type: "panorama/journal/page".to_string(),
|
||||
},
|
||||
Some(btmap! {
|
||||
"panorama/journal/page/content".to_owned() => json!("Hello, world!"),
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
todo!();
|
||||
// let results = state.search_nodes("world").await?;
|
||||
|
||||
// assert!(results
|
||||
// .into_iter()
|
||||
// .map(|entry| entry.0)
|
||||
// .contains(&node_info.node_id));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
pub async fn test_install_apps() -> Result<()> {
|
||||
let state = test_state().await?;
|
||||
|
||||
state.install_apps_from_search_paths().await?;
|
||||
|
||||
todo!();
|
||||
|
||||
Ok(())
|
||||
}
|
1
crates/panorama-daemon/.gitignore
vendored
Normal file
1
crates/panorama-daemon/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
export
|
41
crates/panorama-daemon/Cargo.toml
Normal file
41
crates/panorama-daemon/Cargo.toml
Normal file
|
@ -0,0 +1,41 @@
|
|||
[package]
|
||||
name = "panorama-daemon"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.86"
|
||||
axum = "0.7.5"
|
||||
chrono = { version = "0.4.38", features = ["serde"] }
|
||||
clap = { version = "4.5.7", features = ["derive"] }
|
||||
# cozo = { version = "0.7.6", features = ["storage-rocksdb"] }
|
||||
csv = "1.3.0"
|
||||
dirs = "5.0.1"
|
||||
futures = "0.3.30"
|
||||
itertools = "0.13.0"
|
||||
panorama-core = { path = "../panorama-core" }
|
||||
schemars = "0.8.21"
|
||||
serde = { version = "1.0.202", features = ["derive"] }
|
||||
serde_json = "1.0.117"
|
||||
sugars = "3.0.1"
|
||||
tantivy = { version = "0.22.0", features = ["zstd"] }
|
||||
tokio = { version = "1.37.0", features = ["full"] }
|
||||
tower = "0.4.13"
|
||||
tower-http = { version = "0.5.2", features = ["cors", "trace"] }
|
||||
tracing-subscriber = "0.3.18"
|
||||
uuid = { version = "1.8.0", features = ["v7"] }
|
||||
|
||||
[dependencies.utoipa]
|
||||
git = "https://github.com/juhaku/utoipa"
|
||||
features = ["axum_extras", "time", "uuid", "chrono", "yaml"]
|
||||
|
||||
[dependencies.utoipa-scalar]
|
||||
git = "https://github.com/juhaku/utoipa"
|
||||
features = ["axum"]
|
||||
|
||||
[dependencies.async-imap]
|
||||
version = "0.9.7"
|
||||
default-features = false
|
||||
features = ["runtime-tokio"]
|
15
crates/panorama-daemon/src/apps.rs
Normal file
15
crates/panorama-daemon/src/apps.rs
Normal file
|
@ -0,0 +1,15 @@
|
|||
use axum::{
|
||||
routing::{method_routing, MethodFilter},
|
||||
Router,
|
||||
};
|
||||
use panorama_core::AppState;
|
||||
use utoipa::OpenApi;
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(paths(), components(schemas()))]
|
||||
pub(super) struct AppsApi;
|
||||
|
||||
pub(super) fn router() -> Router<AppState> {
|
||||
Router::new()
|
||||
// .route("/app/:id/*path", method_routing::any(handler))
|
||||
}
|
0
crates/panorama-daemon/src/codetrack.rs
Normal file
0
crates/panorama-daemon/src/codetrack.rs
Normal file
34
crates/panorama-daemon/src/error.rs
Normal file
34
crates/panorama-daemon/src/error.rs
Normal file
|
@ -0,0 +1,34 @@
|
|||
use axum::{
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
|
||||
pub type AppResult<T, E = AppError> = std::result::Result<T, E>;
|
||||
|
||||
// Make our own error that wraps `anyhow::Error`.
|
||||
#[derive(Debug)]
|
||||
pub struct AppError(anyhow::Error);
|
||||
|
||||
// Tell axum how to convert `AppError` into a response.
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
eprintln!("Encountered error: {}", self.0);
|
||||
eprintln!("{:?}", self.0);
|
||||
(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("Something went wrong: {}", self.0),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
}
|
||||
|
||||
// This enables using `?` on functions that return `Result<_, anyhow::Error>` to turn them into
|
||||
// `Result<_, AppError>`. That way you don't need to do that manually.
|
||||
impl<E> From<E> for AppError
|
||||
where
|
||||
E: Into<anyhow::Error>,
|
||||
{
|
||||
fn from(err: E) -> Self {
|
||||
Self(err.into())
|
||||
}
|
||||
}
|
31
crates/panorama-daemon/src/journal.rs
Normal file
31
crates/panorama-daemon/src/journal.rs
Normal file
|
@ -0,0 +1,31 @@
|
|||
use axum::Router;
|
||||
use utoipa::OpenApi;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
/// Node API
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(paths(), components(schemas()))]
|
||||
pub(super) struct JournalApi;
|
||||
|
||||
pub(super) fn router() -> Router<AppState> {
|
||||
Router::new()
|
||||
// .route("/get_todays_journal_id", get(get_todays_journal_id))
|
||||
}
|
||||
|
||||
// #[utoipa::path(
|
||||
// get,
|
||||
// path = "/get_todays_journal_id",
|
||||
// responses(
|
||||
// (status = 200),
|
||||
// ),
|
||||
// )]
|
||||
// pub async fn get_todays_journal_id(
|
||||
// State(state): State<AppState>,
|
||||
// ) -> AppResult<Json<Value>> {
|
||||
// let node_id = state.get_todays_journal_id().await?;
|
||||
|
||||
// Ok(Json(json!({
|
||||
// "node_id": node_id.to_string(),
|
||||
// })))
|
||||
// }
|
73
crates/panorama-daemon/src/lib.rs
Normal file
73
crates/panorama-daemon/src/lib.rs
Normal file
|
@ -0,0 +1,73 @@
|
|||
#[macro_use]
|
||||
extern crate anyhow;
|
||||
#[macro_use]
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate sugars;
|
||||
|
||||
pub mod apps;
|
||||
mod error;
|
||||
mod journal;
|
||||
pub mod mail;
|
||||
mod node;
|
||||
|
||||
use std::fs;
|
||||
|
||||
use anyhow::Result;
|
||||
use axum::{http::Method, routing::get, Router};
|
||||
use panorama_core::AppState;
|
||||
use tokio::net::TcpListener;
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::{
|
||||
cors::{self, CorsLayer},
|
||||
trace::TraceLayer,
|
||||
};
|
||||
use utoipa::OpenApi;
|
||||
use utoipa_scalar::{Scalar, Servable};
|
||||
|
||||
pub async fn run() -> Result<()> {
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
modifiers(),
|
||||
nest(
|
||||
(path = "/journal", api = crate::journal::JournalApi),
|
||||
(path = "/node", api = crate::node::NodeApi),
|
||||
),
|
||||
)]
|
||||
struct ApiDoc;
|
||||
|
||||
let data_dir = dirs::data_dir().unwrap();
|
||||
let panorama_dir = data_dir.join("panorama");
|
||||
fs::create_dir_all(&panorama_dir)?;
|
||||
|
||||
let state = AppState::new(&panorama_dir).await?;
|
||||
|
||||
state.install_apps_from_search_paths().await?;
|
||||
|
||||
let cors_layer = CorsLayer::new()
|
||||
.allow_methods([Method::GET, Method::POST, Method::PUT])
|
||||
.allow_headers(cors::Any)
|
||||
.allow_origin(cors::Any);
|
||||
|
||||
let trace_layer = TraceLayer::new_for_http();
|
||||
|
||||
// build our application with a single route
|
||||
let app = Router::new()
|
||||
.merge(Scalar::with_url("/api/docs", ApiDoc::openapi()))
|
||||
.route("/", get(|| async { "Hello, World!" }))
|
||||
.nest("/node", node::router().with_state(state.clone()))
|
||||
.nest("/journal", journal::router().with_state(state.clone()))
|
||||
// .route("/mail/config", get(get_mail_config))
|
||||
// .route("/mail", get(get_mail))
|
||||
.layer(ServiceBuilder::new().layer(cors_layer))
|
||||
.layer(ServiceBuilder::new().layer(trace_layer))
|
||||
.with_state(state.clone());
|
||||
|
||||
let listener = TcpListener::bind("0.0.0.0:5195").await?;
|
||||
println!("Listening... {:?}", listener);
|
||||
axum::serve(listener, app).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
47
crates/panorama-daemon/src/mail.rs
Normal file
47
crates/panorama-daemon/src/mail.rs
Normal file
|
@ -0,0 +1,47 @@
|
|||
// pub async fn get_mail_config(
|
||||
// State(state): State<AppState>,
|
||||
// ) -> AppResult<Json<Value>> {
|
||||
// let configs = state.fetch_mail_configs()?;
|
||||
// Ok(Json(json!({ "configs": configs })))
|
||||
// }
|
||||
|
||||
// pub async fn get_mail(State(state): State<AppState>) -> AppResult<Json<Value>> {
|
||||
// let mailboxes = state.db.run_script("
|
||||
// ?[node_id, account_node_id, mailbox_name] := *mailbox {node_id, account_node_id, mailbox_name}
|
||||
// ", Default::default(), ScriptMutability::Immutable)?;
|
||||
|
||||
// let mailboxes = mailboxes
|
||||
// .rows
|
||||
// .iter()
|
||||
// .map(|mb| {
|
||||
// json!({
|
||||
// "node_id": mb[0].get_str().unwrap(),
|
||||
// "account_node_id": mb[1].get_str().unwrap(),
|
||||
// "mailbox_name": mb[2].get_str().unwrap(),
|
||||
// })
|
||||
// })
|
||||
// .collect::<Vec<_>>();
|
||||
|
||||
// let messages = state.db.run_script("
|
||||
// ?[node_id, subject, body, internal_date] := *message {node_id, subject, body, internal_date}
|
||||
// :limit 10
|
||||
// ", Default::default(), ScriptMutability::Immutable)?;
|
||||
|
||||
// let messages = messages
|
||||
// .rows
|
||||
// .iter()
|
||||
// .map(|m| {
|
||||
// json!({
|
||||
// "node_id": m[0].get_str().unwrap(),
|
||||
// "subject": m[1].get_str().unwrap(),
|
||||
// "body": m[2].get_str(),
|
||||
// "internal_date": m[3].get_str().unwrap(),
|
||||
// })
|
||||
// })
|
||||
// .collect::<Vec<_>>();
|
||||
|
||||
// Ok(Json(json!({
|
||||
// "mailboxes": mailboxes,
|
||||
// "messages": messages,
|
||||
// })))
|
||||
// }
|
32
crates/panorama-daemon/src/main.rs
Normal file
32
crates/panorama-daemon/src/main.rs
Normal file
|
@ -0,0 +1,32 @@
|
|||
use anyhow::Result;
|
||||
use clap::{Parser, Subcommand};
|
||||
use panorama_core::state::appsv0::manifest::AppManifest;
|
||||
use schemars::schema_for;
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
struct Opt {
|
||||
#[clap(subcommand)]
|
||||
command: Option<Command>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
enum Command {
|
||||
GenerateConfigSchema,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let opt = Opt::parse();
|
||||
|
||||
tracing_subscriber::fmt::init();
|
||||
|
||||
match opt.command {
|
||||
Some(Command::GenerateConfigSchema) => {
|
||||
let schema = schema_for!(AppManifest);
|
||||
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
|
||||
}
|
||||
None => panorama_daemon::run().await?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
173
crates/panorama-daemon/src/node.rs
Normal file
173
crates/panorama-daemon/src/node.rs
Normal file
|
@ -0,0 +1,173 @@
|
|||
use axum::Router;
|
||||
use utoipa::OpenApi;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
/// Node API
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(paths(), components(schemas()))]
|
||||
pub(super) struct NodeApi;
|
||||
|
||||
pub(super) fn router() -> Router<AppState> {
|
||||
Router::new()
|
||||
// .route("/", put(create_node))
|
||||
// .route("/:id", get(get_node))
|
||||
// .route("/:id", post(update_node))
|
||||
// .route("/search", get(search_nodes))
|
||||
}
|
||||
|
||||
// #[derive(Serialize, Deserialize, ToSchema, Clone)]
|
||||
// struct GetNodeResult {
|
||||
// node_id: String,
|
||||
// fields: HashMap<String, Value>,
|
||||
// created_at: DateTime<Utc>,
|
||||
// updated_at: DateTime<Utc>,
|
||||
// }
|
||||
|
||||
// /// Get node info
|
||||
// ///
|
||||
// /// This endpoint retrieves all the fields for a particular node
|
||||
// #[utoipa::path(
|
||||
// get,
|
||||
// path = "/{id}",
|
||||
// responses(
|
||||
// (status = 200, body = [GetNodeResult]),
|
||||
// (status = 404, description = "the node ID provided was not found")
|
||||
// ),
|
||||
// params(
|
||||
// ("id" = String, Path, description = "Node ID"),
|
||||
// ),
|
||||
// )]
|
||||
// pub async fn get_node(
|
||||
// State(state): State<AppState>,
|
||||
// Path(node_id): Path<String>,
|
||||
// ) -> AppResult<(StatusCode, Json<Value>)> {
|
||||
// let node_info = state.get_node(&node_id).await?;
|
||||
|
||||
// Ok((
|
||||
// StatusCode::OK,
|
||||
// Json(json!({
|
||||
// "node_id": node_id,
|
||||
// "fields": node_info.fields,
|
||||
// "created_at": node_info.created_at,
|
||||
// "updated_at": node_info.updated_at,
|
||||
// })),
|
||||
// ))
|
||||
// }
|
||||
|
||||
// #[derive(Deserialize, Debug)]
|
||||
// pub struct UpdateData {
|
||||
// extra_data: Option<ExtraData>,
|
||||
// }
|
||||
|
||||
// /// Update node info
|
||||
// #[utoipa::path(
|
||||
// post,
|
||||
// path = "/{id}",
|
||||
// responses(
|
||||
// (status = 200)
|
||||
// ),
|
||||
// params(
|
||||
// ("id" = String, Path, description = "Node ID"),
|
||||
// )
|
||||
// )]
|
||||
// pub async fn update_node(
|
||||
// State(state): State<AppState>,
|
||||
// Path(node_id): Path<String>,
|
||||
// Json(opts): Json<UpdateData>,
|
||||
// ) -> AppResult<Json<Value>> {
|
||||
// let node_id = NodeId(Uuid::from_str(&node_id).into_diagnostic()?);
|
||||
// let node_info = state
|
||||
// .create_or_update_node(CreateOrUpdate::Update { node_id }, opts.extra_data)
|
||||
// .await?;
|
||||
|
||||
// Ok(Json(json!({
|
||||
// "node_id": node_info.node_id.to_string(),
|
||||
// })))
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Deserialize)]
|
||||
// pub struct CreateNodeOpts {
|
||||
// // TODO: Allow submitting a string
|
||||
// // id: Option<String>,
|
||||
// #[serde(rename = "type")]
|
||||
// ty: String,
|
||||
// extra_data: Option<ExtraData>,
|
||||
// }
|
||||
|
||||
// #[utoipa::path(
|
||||
// put,
|
||||
// path = "/",
|
||||
// responses((status = 200)),
|
||||
// )]
|
||||
// pub async fn create_node(
|
||||
// State(state): State<AppState>,
|
||||
// Json(opts): Json<CreateNodeOpts>,
|
||||
// ) -> AppResult<Json<Value>> {
|
||||
// let node_info = state
|
||||
// .create_or_update_node(
|
||||
// CreateOrUpdate::Create { r#type: opts.ty },
|
||||
// opts.extra_data,
|
||||
// )
|
||||
// .await?;
|
||||
|
||||
// Ok(Json(json!({
|
||||
// "node_id": node_info.node_id.to_string(),
|
||||
// })))
|
||||
// }
|
||||
|
||||
// #[derive(Deserialize)]
|
||||
// pub struct SearchQuery {
|
||||
// query: String,
|
||||
// }
|
||||
|
||||
// #[utoipa::path(
|
||||
// get,
|
||||
// path = "/search",
|
||||
// responses((status = 200)),
|
||||
// )]
|
||||
// pub async fn search_nodes(
|
||||
// State(state): State<AppState>,
|
||||
// Query(query): Query<SearchQuery>,
|
||||
// ) -> AppResult<Json<Value>> {
|
||||
// let search_result = state.search_nodes(query.query).await?;
|
||||
// let search_result = search_result
|
||||
// .into_iter()
|
||||
// .map(|(id, value)| value["fields"].clone())
|
||||
// .collect_vec();
|
||||
|
||||
// Ok(Json(json!({
|
||||
// "results": search_result,
|
||||
// })))
|
||||
// }
|
||||
|
||||
// fn get_rows_for_extra_keys(
|
||||
// tx: &MultiTransaction,
|
||||
// extra_data: &ExtraData,
|
||||
// ) -> AppResult<HashMap<String, (String, String, String)>> {
|
||||
// let result = tx.run_script(
|
||||
// "
|
||||
// ?[key, relation, field_name, type] :=
|
||||
// *fqkey_to_dbkey{key, relation, field_name, type},
|
||||
// is_in(key, $keys)
|
||||
// ",
|
||||
// btmap! {
|
||||
// "keys".to_owned() => DataValue::List(
|
||||
// extra_data
|
||||
// .keys()
|
||||
// .map(|s| DataValue::from(s.as_str()))
|
||||
// .collect::<Vec<_>>()
|
||||
// ),
|
||||
// },
|
||||
// )?;
|
||||
|
||||
// let s = |s: &DataValue| s.get_str().unwrap().to_owned();
|
||||
|
||||
// Ok(
|
||||
// result
|
||||
// .rows
|
||||
// .into_iter()
|
||||
// .map(|row| (s(&row[0]), (s(&row[1]), s(&row[2]), s(&row[3]))))
|
||||
// .collect::<HashMap<_, _>>(),
|
||||
// )
|
||||
// }
|
9
crates/panorama-macros/Cargo.toml
Normal file
9
crates/panorama-macros/Cargo.toml
Normal file
|
@ -0,0 +1,9 @@
|
|||
[package]
|
||||
name = "panorama-macros"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
1
crates/panorama-macros/src/lib.rs
Normal file
1
crates/panorama-macros/src/lib.rs
Normal file
|
@ -0,0 +1 @@
|
|||
// TODO: derive named rows
|
6
crates/panorama-sync/Cargo.toml
Normal file
6
crates/panorama-sync/Cargo.toml
Normal file
|
@ -0,0 +1,6 @@
|
|||
[package]
|
||||
name = "panorama-sync"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
1
crates/panorama-sync/src/lib.rs
Normal file
1
crates/panorama-sync/src/lib.rs
Normal file
|
@ -0,0 +1 @@
|
|||
|
23
docs/.gitignore
vendored
23
docs/.gitignore
vendored
|
@ -1,21 +1,2 @@
|
|||
# build output
|
||||
dist/
|
||||
# generated types
|
||||
.astro/
|
||||
|
||||
# dependencies
|
||||
node_modules/
|
||||
|
||||
# logs
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
|
||||
|
||||
# environment variables
|
||||
.env
|
||||
.env.production
|
||||
|
||||
# macOS-specific files
|
||||
.DS_Store
|
||||
book
|
||||
src/generated
|
4
docs/.vscode/extensions.json
vendored
4
docs/.vscode/extensions.json
vendored
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"recommendations": ["astro-build.astro-vscode"],
|
||||
"unwantedRecommendations": []
|
||||
}
|
11
docs/.vscode/launch.json
vendored
11
docs/.vscode/launch.json
vendored
|
@ -1,11 +0,0 @@
|
|||
{
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"command": "./node_modules/.bin/astro dev",
|
||||
"name": "Development server",
|
||||
"request": "launch",
|
||||
"type": "node-terminal"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
import { defineConfig } from "astro/config";
|
||||
import starlight from "@astrojs/starlight";
|
||||
import rehypeKatex from "rehype-katex";
|
||||
import remarkMath from "remark-math";
|
||||
|
||||
// https://astro.build/config
|
||||
export default defineConfig({
|
||||
base: process.env.BASE_URL ?? "/",
|
||||
integrations: [
|
||||
starlight({
|
||||
title: "Panorama",
|
||||
social: {
|
||||
github: "https://git.mzhang.io/michael/panorama",
|
||||
},
|
||||
sidebar: [
|
||||
{ label: "The panorama dream", link: "/dream" },
|
||||
{
|
||||
label: "High Level Design",
|
||||
autogenerate: { directory: "high-level-design" },
|
||||
},
|
||||
{
|
||||
label: "Technical Docs",
|
||||
autogenerate: { directory: "technical-docs" },
|
||||
},
|
||||
{
|
||||
label: "Protocols",
|
||||
autogenerate: { directory: "protocols" },
|
||||
},
|
||||
],
|
||||
customCss: ["./node_modules/katex/dist/katex.min.css"],
|
||||
}),
|
||||
],
|
||||
markdown: {
|
||||
remarkPlugins: [remarkMath],
|
||||
rehypePlugins: [rehypeKatex],
|
||||
},
|
||||
});
|
6
docs/book.toml
Normal file
6
docs/book.toml
Normal file
|
@ -0,0 +1,6 @@
|
|||
[book]
|
||||
authors = ["Michael Zhang"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "Panorama Docs"
|
BIN
docs/bun.lockb
BIN
docs/bun.lockb
Binary file not shown.
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
"name": "docs",
|
||||
"type": "module",
|
||||
"version": "0.0.1",
|
||||
"scripts": {
|
||||
"dev": "astro dev",
|
||||
"start": "astro dev",
|
||||
"build": "astro check && astro build",
|
||||
"preview": "astro preview",
|
||||
"astro": "astro"
|
||||
},
|
||||
"dependencies": {
|
||||
"@astrojs/check": "^0.7.0",
|
||||
"@astrojs/starlight": "^0.24.5",
|
||||
"astro": "^4.10.2",
|
||||
"katex": "^0.16.10",
|
||||
"rehype-katex": "^7.0.0",
|
||||
"remark-math": "^6.0.0",
|
||||
"sharp": "^0.32.5",
|
||||
"typescript": "^5.5.2"
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 128 128"><path fill-rule="evenodd" d="M81 36 64 0 47 36l-1 2-9-10a6 6 0 0 0-9 9l10 10h-2L0 64l36 17h2L28 91a6 6 0 1 0 9 9l9-10 1 2 17 36 17-36v-2l9 10a6 6 0 1 0 9-9l-9-9 2-1 36-17-36-17-2-1 9-9a6 6 0 1 0-9-9l-9 10v-2Zm-17 2-2 5c-4 8-11 15-19 19l-5 2 5 2c8 4 15 11 19 19l2 5 2-5c4-8 11-15 19-19l5-2-5-2c-8-4-15-11-19-19l-2-5Z" clip-rule="evenodd"/><path d="M118 19a6 6 0 0 0-9-9l-3 3a6 6 0 1 0 9 9l3-3Zm-96 4c-2 2-6 2-9 0l-3-3a6 6 0 1 1 9-9l3 3c3 2 3 6 0 9Zm0 82c-2-2-6-2-9 0l-3 3a6 6 0 1 0 9 9l3-3c3-2 3-6 0-9Zm96 4a6 6 0 0 1-9 9l-3-3a6 6 0 1 1 9-9l3 3Z"/><style>path{fill:#000}@media (prefers-color-scheme:dark){path{fill:#fff}}</style></svg>
|
Before Width: | Height: | Size: 696 B |
8
docs/src/SUMMARY.md
Normal file
8
docs/src/SUMMARY.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
# Summary
|
||||
|
||||
- [Front](./front.md)
|
||||
- [Nodes](./nodes.md)
|
||||
- [Custom Apps](./custom_apps.md)
|
||||
- [Sync](./sync.md)
|
||||
- [Dream](./dream.md)
|
||||
- [Comparison](./comparison.md)
|
Binary file not shown.
Before Width: | Height: | Size: 96 KiB |
8
docs/src/comparison.md
Normal file
8
docs/src/comparison.md
Normal file
|
@ -0,0 +1,8 @@
|
|||
# Comparison
|
||||
|
||||
From anytype:
|
||||
|
||||
- Knowledgeable about clients
|
||||
- Custom apps by third parties
|
||||
|
||||
From logseq:
|
|
@ -1,6 +0,0 @@
|
|||
import { defineCollection } from "astro:content";
|
||||
import { docsSchema } from "@astrojs/starlight/schema";
|
||||
|
||||
export const collections = {
|
||||
docs: defineCollection({ schema: docsSchema() }),
|
||||
};
|
|
@ -1,11 +0,0 @@
|
|||
---
|
||||
title: Attributes
|
||||
---
|
||||
|
||||
The core idea behind panorama is that apps can choose to define attributes, which you can think of as slots.
|
||||
|
||||
The slots have some particular type, which can be filled with some node.
|
||||
|
||||
:::caution
|
||||
The absence of an attribute is different from the existence of the $\textsf{None}$ value.
|
||||
:::
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
title: Cryptography
|
||||
---
|
||||
|
||||
lol
|
|
@ -1,30 +0,0 @@
|
|||
---
|
||||
title: Device
|
||||
---
|
||||
|
||||
The panorama network keeps track of what devices join and leave the network.
|
||||
|
||||
Each device has certain attributes:
|
||||
|
||||
```ts
|
||||
interface DeviceConfig {
|
||||
// Not used for anything important, just for displaying an icon if needed
|
||||
formFactor: "desktop" | "server" | "laptop" | "phone" | "tablet" | string;
|
||||
|
||||
// A string that represents a duration of time. If it has been longer than
|
||||
// this amount of time since last contacting this device, consider it to have
|
||||
// gone offline
|
||||
heartbeatDuration: string;
|
||||
|
||||
// Whether or not to schedule services to this device
|
||||
canRunServices: boolean;
|
||||
|
||||
// Whether or not this device should be treated as a file store
|
||||
// (recommended to be off for phones)
|
||||
canStoreFiles: boolean;
|
||||
}
|
||||
```
|
||||
|
||||
Each device keeps track of each other device, with a merkle tree of signatures.
|
||||
|
||||
Devices have their own keypairs. TODO: See how matrix does cross-signing
|
|
@ -1,18 +0,0 @@
|
|||
---
|
||||
title: Indexing
|
||||
---
|
||||
|
||||
There are several types of indexes in panorama.
|
||||
Some are the database kind that updates immediately.
|
||||
Others are the search kind that updates asynchronously.
|
||||
|
||||
Custom app authors can specify how their attributes should be indexed.
|
||||
Then, whenever any node has that particular attribute touched, a hook is run.
|
||||
|
||||
## Implementation
|
||||
|
||||
In the initial version of panorama, the daemon is thought of as having exclusive
|
||||
control over the database. It should not be run as multiple copies of itself either.
|
||||
|
||||
This way, the daemon can separately control indexes if it wishes, allowing it to
|
||||
call custom functions for indexing.
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
title: Nodes
|
||||
---
|
||||
|
||||
Everything is organized into nodes.
|
||||
|
||||
Each app (journal, mail, etc.) creates nodes to represent their information.
|
||||
These nodes are linked to each other through attributes.
|
||||
|
||||
When retrieving its contents, a closure-like query is conducted and all the
|
||||
nodes reachable through its attributes are returned.
|
||||
|
||||
Think of a node as being represented like this:
|
||||
|
||||
```ts
|
||||
interface Node {
|
||||
id: string;
|
||||
type: string;
|
||||
attributes: string[];
|
||||
}
|
||||
```
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
title: Onboarding
|
||||
---
|
||||
|
||||
## Creating a new database
|
||||
|
||||
1. Download the software
|
||||
2. It should automatically boot into a new database
|
||||
- Automatically connect to the hosted panorama bridge service
|
||||
3. Give the user the option to log into an existing database, and then allow them to merge
|
||||
|
||||
## Adding another device
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
title: Permissions
|
||||
---
|
||||
|
||||
## Goals
|
||||
|
||||
- Apps should probably not be allowed to read attributes they didn't explicitly request access to
|
||||
- (there should be an option "Unless they created the node")
|
||||
|
||||
## Design
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
title: Design Principles
|
||||
---
|
||||
|
||||
## Architecture
|
||||
|
||||
- **Never use fully-qualified names starting from domain (i.e `com.example.package`).**
|
||||
This makes it so migrating domains / package names becomes very hard.
|
||||
|
||||
## Data governance
|
||||
|
||||
- **Offline first, full control to the user.**
|
||||
Synchronization is an important feature but must be built as a separate thing.
|
||||
This also means that it should be possible for some devices to stay offline for long periods of time.
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
title: Sync
|
||||
---
|
||||
|
||||
:::caution
|
||||
This is documentation for a feature that is in development.
|
||||
|
||||
Almost none of this is implemented and most of it will probably change in the future.
|
||||
:::
|
||||
|
||||
## Node-level sync
|
||||
|
||||
## Attribute-level sync
|
||||
|
||||
## Index-level sync
|
|
@ -1,72 +0,0 @@
|
|||
---
|
||||
title: Types
|
||||
---
|
||||
|
||||
Types exist to ensure that apps are treating data properly.
|
||||
|
||||
## Formal definition
|
||||
|
||||
An attribute's type can be one of the following:
|
||||
|
||||
$\tau :\equiv$
|
||||
|
||||
- $c$ (constant)
|
||||
- $\alpha$ (type variable)
|
||||
- $\mu \alpha . \tau$ (inductive type)
|
||||
- $( \ell_k : \tau_k )_k$ (record type)
|
||||
- $\{ \ell_k : \tau_k \}_k$ (sum type)
|
||||
- $\#n$ (singleton type)
|
||||
|
||||
Constants may be node references, unit, unsigned/signed integers, decimal,
|
||||
strings, booleans, instant (or timezone-aware timestamp), or URL
|
||||
|
||||
It is possible in the future that node references are also made using URLs, but
|
||||
the URL format will need to be decided upon by then.
|
||||
|
||||
## Notes
|
||||
|
||||
- Nodes don't have types; only attributes do.
|
||||
- All attributes must belong to _closed_ types.
|
||||
This means type variables cannot exist at the top-level.
|
||||
- When shown in the panorama UI, the constant type will not be shown as a separate type.
|
||||
Instead the actual type itself will be inlined.
|
||||
- The type registry doesn't canonically exist in the database (it may exist in the form of system logs).
|
||||
Instead, apps register their types on boot.
|
||||
Everything is known to the panorama daemon after app initialization.
|
||||
- The following constant types have their fields embedded directly into the node table:
|
||||
- Number (integer, bigdecimal), string, boolean: `value`
|
||||
- Sum: `label` (which variant is used?)
|
||||
- Record types are essentially a collection of forced attributes.
|
||||
A node with a record type _must_ contain every field listed in the labels of the record type.
|
||||
- The panorama type system is _structurally_ typed.
|
||||
#TODO Maybe add some convenient way of introducing ways to distinguish types apart?
|
||||
|
||||
### Convenient types
|
||||
|
||||
- $\textsf{Optional}(\tau) :\equiv \{ \texttt{'none} : () , \texttt{'some} : \tau \}$ \
|
||||
The optional type.
|
||||
|
||||
### What is the point of a singleton type?
|
||||
|
||||
Singleton types only consist of a node ID.
|
||||
The point of this is so apps can create types that are forced to have exactly a single node.
|
||||
|
||||
:::note
|
||||
Apps with dashboards (mail) may create a type that represents the "entrypoint" into their application.
|
||||
The process of creating it would look like this:
|
||||
|
||||
+ Upon app registration, I declare that I want a singleton type to be registered as `panorama-mail/entry`.
|
||||
+ A node id will be assigned, if it doesn't already exist.
|
||||
+ The application is returned the node ID.
|
||||
+ The application can then register links to that node ID, and it can register a handler.
|
||||
:::
|
||||
|
||||
When an app is registered, its types are parsed and registered into the database.
|
||||
At the time of writing, if the node ID it refers to has already been found in the database, the type of the node will be checked against the given type.
|
||||
If it doesn't match #TODO
|
||||
|
||||
## Attributes
|
||||
|
||||
Nodes contain attributes.
|
||||
An attribute is a link to another node.
|
||||
Attributes are typed, and the node it's linked to must have that type.
|
|
@ -1,14 +0,0 @@
|
|||
---
|
||||
title: Welcome to Panorama
|
||||
description: Get started building your docs site with Starlight.
|
||||
template: splash
|
||||
hero:
|
||||
tagline: I love scope creep...
|
||||
image:
|
||||
file: ../../assets/houston.webp
|
||||
actions:
|
||||
- text: Read the docs
|
||||
link: ./dream
|
||||
icon: right-arrow
|
||||
variant: primary
|
||||
---
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
title: Client-Bridge Protocols
|
||||
---
|
||||
|
||||
A **bridge** is just a way of connecting two devices.
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
title: Client-Client Protocols
|
||||
---
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
---
|
||||
title: Custom app API
|
||||
---
|
||||
|
||||
## Registration
|
||||
|
||||
The following types of things can be registered by the app:
|
||||
|
||||
- Named types
|
||||
- Hooks (described below)
|
||||
- Background services
|
||||
- Frontend
|
||||
|
||||
## Hooks
|
||||
|
||||
Custom apps are allowed to hook into the following events:
|
||||
|
||||
- `install`: When the app is first being installed.
|
||||
|
||||
- `insert`, `update`, `delete`: CRUD hooks for nodes with a type that the app manages
|
||||
|
||||
- `attr-new`, `attr-update`, `attr-remove`: CRUD hooks for attributes with types that the app manages
|
||||
|
||||
Each hook is handled by a function, which must return with a success. If this
|
||||
doesn't happen, the daemon will re-call the function with exponential backoff for a specific number of retries.
|
|
@ -1,27 +0,0 @@
|
|||
---
|
||||
title: Custom app sandboxing
|
||||
---
|
||||
|
||||
:::caution
|
||||
For the initial releases of panorama, I am not planning on including _any_
|
||||
sandboxing whatsoever. The development overhead will be far too great to warrant supporting it.
|
||||
|
||||
The entire app _will_ be rewritten before the public alpha release, which will
|
||||
include proper custom app sandboxing. This page lists some ideas.
|
||||
:::
|
||||
|
||||
Custom apps are made up of two parts:
|
||||
|
||||
- The backend, which talks to the database
|
||||
- The frontend, which talks to the user
|
||||
|
||||
I say "the" frontend, but there could possibly be multiple frontends. (TUI, headless, etc.)
|
||||
Each part needs to be sandboxed individually.
|
||||
|
||||
## Backend sandboxing
|
||||
|
||||
This will be done via a WASM runtime. The custom app's backend software will
|
||||
|
||||
## Frontend sandboxing
|
||||
|
||||
lmao not sure if this is possible with a web-based host at all, looking into flutter...
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
title: Formal verification
|
||||
---
|
||||
|
||||
lol
|
|
@ -1,6 +0,0 @@
|
|||
---
|
||||
title: Loading process
|
||||
---
|
||||
|
||||
The goal of panorama is to start up as quickly as possible.
|
||||
The following tasks need to be performed on start:
|
|
@ -1,5 +0,0 @@
|
|||
---
|
||||
title: Notifications
|
||||
---
|
||||
|
||||
https://unifiedpush.org/
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
title: Protected namespaces
|
||||
---
|
||||
|
||||
There's some protected namespace of nodes that's used to keep track of the
|
||||
actual database functionality. For example:
|
||||
|
||||
- List of installed apps
|
||||
- List of currently registered types (maybe not keep this?)
|
||||
- System log
|
64
docs/src/custom_apps.md
Normal file
64
docs/src/custom_apps.md
Normal file
|
@ -0,0 +1,64 @@
|
|||
# Custom Apps
|
||||
|
||||
<div class="warning">
|
||||
|
||||
**WARNING:** This is documentation for a feature that is in development.
|
||||
|
||||
Almost none of this is implemented and most of it will probably change in the future.
|
||||
|
||||
</div>
|
||||
|
||||
Custom apps allow third parties to develop functionality for panorama.
|
||||
After this rolls out, most of the built-in panorama apps will also be converted into custom apps, and this feature will just be renamed "apps".
|
||||
|
||||
## API
|
||||
|
||||
To develop a custom app, you will need to provide:
|
||||
|
||||
-
|
||||
App metadata in a `manifest.yml`. This contains:
|
||||
|
||||
- App display name.
|
||||
- Version + License.
|
||||
- Description + Keywords.
|
||||
- Compatible panorama versions (TODO).
|
||||
- Authors + Maintainers.
|
||||
- Repository + Issues.
|
||||
- Extra data fields for whatever
|
||||
|
||||
This also includes relationships with other apps. For example:
|
||||
|
||||
- Field read dependencies. If your app needs to read for example `panorama/std/time`, then it needs to list it.
|
||||
- Field write dependencies. This breaks down to:
|
||||
- any: the app is allowed to write to the specified field on any node
|
||||
- owned: the app is allowed to write to the specified field on nodes it owns (**TODO** flesh out app ownership of nodes)
|
||||
- none: the app isn't allowed to write to the specified field
|
||||
|
||||
-
|
||||
List of endpoints and triggers, along with their handlers.
|
||||
|
||||
The handlers take the form `
|
||||
|
||||
## App ownership of nodes
|
||||
|
||||
Apps automatically own nodes they create.
|
||||
|
||||
**TODO:** is multiple ownership allowed?
|
||||
|
||||
## Design notes
|
||||
|
||||
-
|
||||
Maybe it's best to generate the actual db relation names and have their symbolic names be mapped? This will require an extra layer of indirection but it should still make querying be doable in 2 queries.
|
||||
|
||||
For example, the journal app specifies that it wants a `journal` relation. The db generates something like `journal_a41e`, registers that as a mapping for the "journal" app, and all queries will actually involve that name.
|
||||
|
||||
This avoids name conflicts for separate third parties that use the same name for a relation.
|
||||
|
||||
|
||||
## Built-in apps
|
||||
|
||||
### Journal
|
||||
|
||||
### Mail
|
||||
|
||||
### Codetrack
|
|
@ -1,35 +1,4 @@
|
|||
---
|
||||
title: The panorama dream
|
||||
---
|
||||
|
||||
In the ideal world, you're reading this via panorama right now.
|
||||
|
||||
The panorama dream is to have an "everything" app that is fully managed by the user.
|
||||
This page describes the vision for the app.
|
||||
|
||||
Almost everything on this list is something that I self host, or want to self
|
||||
host, but hosts its own database separately. I want to unify the data source in
|
||||
a very flexible way so that it can be shared among apps.
|
||||
|
||||
This app takes inspiration from many similar apps, such as Anytype, Logseq, Notion, etc.
|
||||
|
||||
## Features I want
|
||||
|
||||
- Graph view
|
||||
- Instantly share/publish anything
|
||||
- Full text+OCR search
|
||||
- IFTTT workflows
|
||||
- Notifications
|
||||
- Multiuser
|
||||
- Google docs like interface for docs / typst
|
||||
|
||||
## Development Principles
|
||||
|
||||
These are the goals for panorama development.
|
||||
|
||||
- **Local first.** Everything is first committed to a local database.
|
||||
- **Keyboard friendly.**
|
||||
- **Gradual adoption.**
|
||||
# Dream
|
||||
|
||||
## Custom Apps List
|
||||
|
||||
|
@ -68,4 +37,14 @@ These are the goals for panorama development.
|
|||
- Education
|
||||
- Anki flashcards
|
||||
- Canvas???
|
||||
- Dashboards
|
||||
- Dashboards
|
||||
|
||||
# Features
|
||||
|
||||
- Graph view
|
||||
- Instantly publish anything
|
||||
- Notifications
|
||||
- Full text+OCR search
|
||||
- IFTTT workflows
|
||||
- Multiuser
|
||||
- Google docs like interface for docs / typst
|
2
docs/src/env.d.ts
vendored
2
docs/src/env.d.ts
vendored
|
@ -1,2 +0,0 @@
|
|||
/// <reference path="../.astro/types.d.ts" />
|
||||
/// <reference types="astro/client" />
|
6
docs/src/front.md
Normal file
6
docs/src/front.md
Normal file
|
@ -0,0 +1,6 @@
|
|||
# Panorama
|
||||
|
||||
Panorama is a personal information manager.
|
||||
|
||||
- [Repository](https://git.mzhang.io/michael/panorama)
|
||||
- [Issues](https://git.mzhang.io/michael/panorama/issues)
|
28
docs/src/nodes.md
Normal file
28
docs/src/nodes.md
Normal file
|
@ -0,0 +1,28 @@
|
|||
# Nodes
|
||||
|
||||
Everything is organized into nodes.
|
||||
|
||||
Each app (journal, mail, etc.) creates relations from node IDs to their information.
|
||||
|
||||
For example, in a journal, there would be 2 database entries:
|
||||
|
||||
- `node { id: "12345" => type: "panorama/journal/page", created_at: (...), ... }`
|
||||
- `journal { node_id: "12345" => content: "blah blah blah" }`
|
||||
|
||||
When retrieving its contents, a join relation is conducted and all the fields are returned.
|
||||
|
||||
## Field mapping
|
||||
|
||||
In the database, there is a relation mapping field names that the frontend knows about, such as `panorama/journal/page/content` to the actual relation (`journal`) + field name (`content`). These are currently all hard-coded into the migrations, but when custom apps are added they will be able to be registered.
|
||||
|
||||
## Types
|
||||
|
||||
The node type tells the frontend how to render it.
|
||||
|
||||
**TODO:** when custom apps hit, what's the best way to package frontend React code?
|
||||
|
||||
## Synthetic nodes
|
||||
|
||||
These nodes basically only exist on the frontend. For example, `panorama/mail` is a special ID that renders the mail page.
|
||||
|
||||
**TODO:** consider replacing these with short-circuiting the query instead of having special IDs?
|
20
docs/src/sync.md
Normal file
20
docs/src/sync.md
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Sync
|
||||
|
||||
<div class="warning">
|
||||
|
||||
**WARNING:** This is documentation for a feature that is in development.
|
||||
|
||||
Almost none of this is implemented and most of it will probably change in the future.
|
||||
|
||||
</div>
|
||||
|
||||
This **only** deals with syncing nodes and files between devices owned by the same person. Permissions are not considered here.
|
||||
|
||||
## Design notes
|
||||
|
||||
-
|
||||
Devices need to have some kind of knowledge of each other's existence. This may not necessarily be exposed to apps, but the thing that's responsible for syncing needs to know which nodes have which files.
|
||||
-
|
||||
Slow internet connections and largely offline usage patterns need to be considered.
|
||||
-
|
||||
**TODO:** does this need to be deeply integrated within the panorama daemon itself or is there a way to expose enough APIs for this to just be an app?
|
|
@ -1,5 +0,0 @@
|
|||
{
|
||||
"extends": "astro/tsconfigs/strict",
|
||||
"compilerOptions": { "skipLibCheck": true },
|
||||
"exclude": ["dist"]
|
||||
}
|
13
package.json
13
package.json
|
@ -1,13 +0,0 @@
|
|||
{
|
||||
"name": "panorama",
|
||||
"private": true,
|
||||
"workspaces": [
|
||||
"packages/*",
|
||||
"apps/*"
|
||||
],
|
||||
"trustedDependencies": [
|
||||
"electron",
|
||||
"esbuild",
|
||||
"sqlite3"
|
||||
]
|
||||
}
|
175
packages/panorama-daemon/.gitignore
vendored
175
packages/panorama-daemon/.gitignore
vendored
|
@ -1,175 +0,0 @@
|
|||
# Based on https://raw.githubusercontent.com/github/gitignore/main/Node.gitignore
|
||||
|
||||
# Logs
|
||||
|
||||
logs
|
||||
_.log
|
||||
npm-debug.log_
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
.pnpm-debug.log*
|
||||
|
||||
# Caches
|
||||
|
||||
.cache
|
||||
|
||||
# Diagnostic reports (https://nodejs.org/api/report.html)
|
||||
|
||||
report.[0-9]_.[0-9]_.[0-9]_.[0-9]_.json
|
||||
|
||||
# Runtime data
|
||||
|
||||
pids
|
||||
_.pid
|
||||
_.seed
|
||||
*.pid.lock
|
||||
|
||||
# Directory for instrumented libs generated by jscoverage/JSCover
|
||||
|
||||
lib-cov
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
|
||||
coverage
|
||||
*.lcov
|
||||
|
||||
# nyc test coverage
|
||||
|
||||
.nyc_output
|
||||
|
||||
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
|
||||
|
||||
.grunt
|
||||
|
||||
# Bower dependency directory (https://bower.io/)
|
||||
|
||||
bower_components
|
||||
|
||||
# node-waf configuration
|
||||
|
||||
.lock-wscript
|
||||
|
||||
# Compiled binary addons (https://nodejs.org/api/addons.html)
|
||||
|
||||
build/Release
|
||||
|
||||
# Dependency directories
|
||||
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Snowpack dependency directory (https://snowpack.dev/)
|
||||
|
||||
web_modules/
|
||||
|
||||
# TypeScript cache
|
||||
|
||||
*.tsbuildinfo
|
||||
|
||||
# Optional npm cache directory
|
||||
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
|
||||
.eslintcache
|
||||
|
||||
# Optional stylelint cache
|
||||
|
||||
.stylelintcache
|
||||
|
||||
# Microbundle cache
|
||||
|
||||
.rpt2_cache/
|
||||
.rts2_cache_cjs/
|
||||
.rts2_cache_es/
|
||||
.rts2_cache_umd/
|
||||
|
||||
# Optional REPL history
|
||||
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
|
||||
.yarn-integrity
|
||||
|
||||
# dotenv environment variable files
|
||||
|
||||
.env
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.env.local
|
||||
|
||||
# parcel-bundler cache (https://parceljs.org/)
|
||||
|
||||
.parcel-cache
|
||||
|
||||
# Next.js build output
|
||||
|
||||
.next
|
||||
out
|
||||
|
||||
# Nuxt.js build / generate output
|
||||
|
||||
.nuxt
|
||||
dist
|
||||
|
||||
# Gatsby files
|
||||
|
||||
# Comment in the public line in if your project uses Gatsby and not Next.js
|
||||
|
||||
# https://nextjs.org/blog/next-9-1#public-directory-support
|
||||
|
||||
# public
|
||||
|
||||
# vuepress build output
|
||||
|
||||
.vuepress/dist
|
||||
|
||||
# vuepress v2.x temp and cache directory
|
||||
|
||||
.temp
|
||||
|
||||
# Docusaurus cache and generated files
|
||||
|
||||
.docusaurus
|
||||
|
||||
# Serverless directories
|
||||
|
||||
.serverless/
|
||||
|
||||
# FuseBox cache
|
||||
|
||||
.fusebox/
|
||||
|
||||
# DynamoDB Local files
|
||||
|
||||
.dynamodb/
|
||||
|
||||
# TernJS port file
|
||||
|
||||
.tern-port
|
||||
|
||||
# Stores VSCode versions used for testing VSCode extensions
|
||||
|
||||
.vscode-test
|
||||
|
||||
# yarn v2
|
||||
|
||||
.yarn/cache
|
||||
.yarn/unplugged
|
||||
.yarn/build-state.yml
|
||||
.yarn/install-state.gz
|
||||
.pnp.*
|
||||
|
||||
# IntelliJ based IDEs
|
||||
.idea
|
||||
|
||||
# Finder (MacOS) folder config
|
||||
.DS_Store
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue