Compare commits

...

21 commits

Author SHA1 Message Date
Michael Zhang 633f544e75 wip 2024-07-01 02:17:16 -05:00
Michael Zhang 0466b64c5a
Add flake 2024-06-28 01:28:52 -05:00
Michael Zhang 99046dba29 abi crate 2024-06-24 10:31:55 -05:00
Michael Zhang 99d50db212 go 2024-06-23 22:41:54 -05:00
Michael Zhang 5ec49db2ce add time 2024-06-23 22:26:54 -05:00
Michael Zhang 08192e1687 more progress 2024-06-23 18:49:25 -05:00
Michael Zhang 3e505d8690 hook printing up 2024-06-23 17:29:29 -05:00
Michael Zhang 947118def1 wip 2024-06-23 13:56:45 -05:00
Michael Zhang acef87c56b wip ffi callback 2024-06-21 20:00:34 -05:00
Michael Zhang 1942d22451 retrieve string from memory 2024-06-21 19:48:53 -05:00
Michael Zhang dff72850ce add sdk crate 2024-06-21 18:04:53 -05:00
Michael Zhang 0a21933795 wasm wip 2024-06-21 17:59:41 -05:00
Michael Zhang 1646db88d1 wasm progress 2024-06-21 16:45:29 -05:00
Michael Zhang 81d28b6740 refactor 2024-06-21 15:43:19 -05:00
Michael Zhang 98e07b56b8 remove .env 2024-06-19 14:27:56 -05:00
Michael Zhang bfca0dfbd7 update 2024-06-18 16:49:49 -05:00
Michael Zhang 3a2eda20cb compiles again 2024-06-18 16:28:15 -05:00
Michael Zhang f771a7d20f compiles 2024-06-18 15:20:42 -05:00
Michael Zhang 21728e6de5 updates 2024-06-18 14:16:51 -05:00
Michael Zhang 2d424d763f move to ui 2024-06-18 14:16:38 -05:00
Michael Zhang b08f584ab0 rip out cozo 2024-06-17 15:08:03 -04:00
108 changed files with 3574 additions and 2316 deletions

2
.envrc Normal file
View file

@ -0,0 +1,2 @@
export DATABASE_URL=sqlite://$(pwd)/test.db
use flake

5
.gitignore vendored
View file

@ -2,4 +2,7 @@ node_modules
dist dist
target target
.DS_Store .DS_Store
**/export/export.json **/export/export.json
test.db*
.env
.direnv

2710
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,2 +1,13 @@
workspace.resolver = "2" workspace.resolver = "2"
workspace.members = ["crates/*", "app/src-tauri"] workspace.members = ["apps/*", "crates/*", "ui/src-tauri"]
[profile.wasm-debug]
inherits = "dev"
panic = "abort"
[profile.wasm-release]
inherits = "release"
lto = true
opt-level = 's'
strip = true
panic = "abort"

View file

@ -1,3 +1,13 @@
deploy-docs: deploy-docs:
mdbook build docs mdbook build docs
rsync -azrP docs/book/ root@veil:/home/blogDeploy/public/panorama rsync -azrP docs/book/ root@veil:/home/blogDeploy/public/panorama
JOURNAL_SOURCES := $(shell find . apps/journal -name "*.rs" -not -path "./target/*")
journal: $(JOURNAL_SOURCES)
cargo build \
--profile=wasm-debug \
-p panorama-journal \
--target=wasm32-unknown-unknown
test-install-apps: journal
cargo test -p panorama-core -- tests::test_install_apps

10
apps/codetrack/Cargo.toml Normal file
View file

@ -0,0 +1,10 @@
[package]
name = "panorama-codetrack"
version = "0.1.0"
edition = "2021"
[[bin]]
name = "panorama-codetrack"
path = "rust-src/main.rs"
[dependencies]

View file

@ -0,0 +1,30 @@
name: panorama/codetrack
version: 0.1.0
panorama_version: 0.1.0
description: Code tracking app similar to WakaTime
command: cargo run -p panorama-codetrack
node_types:
- name: heartbeat
keys:
- name: start_time
type: date
- name: end_time
type: date
- name: project
type: text
indexes:
- type: rtree
start: panorama/codetrack/start_time
end: panorama/codetrack/start_time
endpoints:
profiles:
release:
module: ./main.wasm

View file

@ -0,0 +1,3 @@
fn main() {
println!("Hello, world!");
}

View file

@ -0,0 +1,9 @@
{
"compilerOptions": {
"lib": ["ESNext", "DOM", "DOM.Iterable"],
"allowJs": false,
"skipLibCheck": true,
"target": "ESNext",
"module": "ESNext"
}
}

View file

@ -0,0 +1,3 @@
export default {
nodeTypes: {},
};

View file

@ -4,19 +4,35 @@ version = "0.1.0"
edition = "2021" edition = "2021"
[dependencies] [dependencies]
anyhow = { version = "1.0.86", features = ["backtrace"] }
backoff = { version = "0.4.0", features = ["tokio"] } backoff = { version = "0.4.0", features = ["tokio"] }
bimap = "0.6.3" bimap = "0.6.3"
chrono = { version = "0.4.38", features = ["serde"] } chrono = { version = "0.4.38", features = ["serde"] }
cozo = { version = "0.7.6", features = ["storage-rocksdb"] }
futures = "0.3.30" futures = "0.3.30"
itertools = "0.13.0" itertools = "0.13.0"
miette = { version = "5.5.0", features = ["fancy", "backtrace"] } schemars = "0.8.21"
serde = { version = "1.0.203", features = ["derive"] } serde = { version = "1.0.203", features = ["derive"] }
serde_json = "1.0.117" serde_json = "1.0.117"
serde_yaml = "0.9.34"
sqlx = { version = "0.7.4", features = [
"runtime-tokio",
"tls-rustls",
"macros",
"sqlite",
"uuid",
"chrono",
"regexp",
] }
sugars = "3.0.1" sugars = "3.0.1"
tantivy = { version = "0.22.0", features = ["zstd"] } tantivy = { version = "0.22.0", features = ["zstd"] }
tokio = { version = "1.38.0", features = ["full"] } tokio = { version = "1.38.0", features = ["full"] }
uuid = { version = "1.8.0", features = ["v7"] } uuid = { version = "1.8.0", features = ["v7"] }
walkdir = "2.5.0"
wasmtime = { version = "22.0.0", default-features = false, features = [
"runtime",
"cranelift",
] }
wasmtime-wasi = "22.0.0"
[dependencies.async-imap] [dependencies.async-imap]
version = "0.9.7" version = "0.9.7"

View file

@ -0,0 +1,4 @@
fn main() {
println!("cargo:rerun-if-changed=../../apps");
println!("cargo:rerun-if-changed=migrations");
}

View file

@ -1,36 +0,0 @@
{
"$schema": "./base_fields_schema.json",
"fields": [
{
"fq_field_name": "/journal/page/content",
"relation_name": "content",
"relation_field_name": "string",
"type": "string",
"is_fts_enabled": true
},
{
"fq_field_name": "/mail/config/imap_hostname",
"relation_name": "mail_config",
"relation_field_name": "imap_hostname",
"type": "string"
},
{
"fq_field_name": "/mail/config/imap_port",
"relation_name": "mail_config",
"relation_field_name": "imap_port",
"type": "int"
},
{
"fq_field_name": "/mail/config/imap_username",
"relation_name": "mail_config",
"relation_field_name": "imap_username",
"type": "string"
},
{
"fq_field_name": "/mail/config/imap_password",
"relation_name": "mail_config",
"relation_field_name": "imap_password",
"type": "string"
}
]
}

View file

@ -1,36 +0,0 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"fields": {
"type": "array",
"items": {
"type": "object",
"required": [
"fq_field_name",
"relation_name",
"relation_field_name",
"type"
],
"properties": {
"fq_field_name": {
"type": "string"
},
"relation_name": {
"type": "string"
},
"relation_field_name": {
"type": "string"
},
"type": {
"type": "string"
},
"is_fts_enabled": {
"type": "boolean"
}
}
}
}
},
"title": "Base Panorama Fields"
}

View file

@ -0,0 +1,40 @@
CREATE TABLE node (
node_id TEXT PRIMARY KEY,
node_type TEXT NOT NULL,
updated_at INTEGER NOT NULL DEFAULT CURRENT_TIMESTAMP,
extra_data JSON
);
CREATE TABLE node_has_key (
node_id TEXT NOT NULL,
full_key TEXT NOT NULL,
PRIMARY KEY (node_id, full_key)
);
CREATE INDEX node_has_key_idx_node_id ON node_has_key(node_id);
CREATE INDEX node_has_key_idx_full_key ON node_has_key(full_key);
-- App-related tables
CREATE TABLE app (
app_id INTEGER PRIMARY KEY AUTOINCREMENT,
app_name TEXT NOT NULL,
app_version TEXT NOT NULL,
app_version_hash TEXT,
app_description TEXT,
app_homepage TEXT,
app_repository TEXT,
app_license TEXT
);
CREATE TABLE app_table_mapping (
app_id INTEGER NOT NULL,
app_table_name TEXT NOT NULL,
db_table_name TEXT NOT NULL
);
CREATE TABLE key_mapping (
full_key TEXT NOT NULL,
app_id INTEGER NOT NULL,
app_table_name TEXT NOT NULL,
app_table_field TEXT NOT NULL,
is_fts_enabled BOOLEAN NOT NULL DEFAULT FALSE
);

View file

@ -8,7 +8,8 @@ extern crate sugars;
pub mod migrations; pub mod migrations;
pub mod state; pub mod state;
pub mod mail; // pub mod mail;
pub mod messaging;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
@ -16,7 +17,7 @@ use std::fmt;
pub use crate::state::AppState; pub use crate::state::AppState;
use miette::{bail, IntoDiagnostic, Result}; use anyhow::{bail, Result};
use serde_json::Value; use serde_json::Value;
use uuid::Uuid; use uuid::Uuid;
@ -30,7 +31,7 @@ impl fmt::Display for NodeId {
} }
pub fn ensure_ok(s: &str) -> Result<()> { pub fn ensure_ok(s: &str) -> Result<()> {
let status: Value = serde_json::from_str(&s).into_diagnostic()?; let status: Value = serde_json::from_str(&s)?;
let status = status.as_object().unwrap(); let status = status.as_object().unwrap();
let ok = status.get("ok").unwrap().as_bool().unwrap_or(false); let ok = status.get("ok").unwrap().as_bool().unwrap_or(false);
if !ok { if !ok {

View file

@ -1,160 +1,286 @@
use std::{collections::HashMap, time::Duration}; use std::{
collections::{HashMap, HashSet},
time::Duration,
};
use anyhow::{Context as _, Result};
use async_imap::Session;
use backoff::{exponential::ExponentialBackoff, SystemClock}; use backoff::{exponential::ExponentialBackoff, SystemClock};
use cozo::{DataValue, JsonData, ScriptMutability};
use futures::TryStreamExt; use futures::TryStreamExt;
use miette::{IntoDiagnostic, Result}; use itertools::Itertools;
use tokio::{net::TcpStream, time::sleep}; use tokio::{net::TcpStream, time::sleep};
use uuid::Uuid; use uuid::Uuid;
use crate::AppState; use crate::{mail, AppState};
pub async fn mail_loop(state: AppState) -> Result<()> { pub struct MailWorker {
backoff::future::retry( state: AppState,
ExponentialBackoff::<SystemClock>::default(),
|| async {
mail_loop_inner(&state).await?;
// For now, just sleep 30 seconds and then fetch again
// TODO: Run a bunch of connections at once and do IDLE over them (if possible)
sleep(Duration::from_secs(30)).await;
Ok(())
},
)
.await?;
Ok(())
} }
async fn mail_loop_inner(state: &AppState) -> Result<()> { impl MailWorker {
// Fetch the mail configs pub fn new(state: AppState) -> MailWorker {
let configs = state.fetch_mail_configs()?; MailWorker { state }
if configs.len() == 0 {
return Ok(());
} }
// TODO: Do all configs instead of just the first pub async fn mail_loop(self) -> Result<()> {
let config = &configs[0]; loop {
let mut policy = ExponentialBackoff::<SystemClock>::default();
policy.current_interval = Duration::from_secs(5);
policy.initial_interval = Duration::from_secs(5);
let stream = backoff::future::retry(policy, || async {
TcpStream::connect((config.imap_hostname.as_str(), config.imap_port)) match self.mail_loop_inner().await {
.await Ok(_) => {}
.into_diagnostic()?; Err(err) => {
eprintln!("Mail error: {:?}", err);
Err(err)?;
}
}
// For now, just sleep 30 seconds and then fetch again
// TODO: Run a bunch of connections at once and do IDLE over them (if possible)
sleep(Duration::from_secs(30)).await;
let client = async_imap::Client::new(stream); Ok(())
let mut session = client })
.login(&config.imap_username, &config.imap_password) .await?;
.await
.map_err(|(err, _)| err)
.into_diagnostic()?;
// println!("Session: {:?}", session);
let mailboxes = session
.list(None, Some("*"))
.await
.into_diagnostic()?
.try_collect::<Vec<_>>()
.await
.into_diagnostic()?;
let mailbox_names =
mailboxes.iter().map(|name| name.name()).collect::<Vec<_>>();
println!("mailboxes: {mailbox_names:?}");
// Get the mailbox with INBOX
let inbox_node_id = {
let result = state.db.run_script("
?[node_id] :=
*mailbox{node_id, account_node_id, mailbox_name},
account_node_id = $account_node_id,
mailbox_name = 'INBOX'
", btmap! {"account_node_id".to_owned()=>DataValue::from(config.node_id.to_string())}, ScriptMutability::Immutable)?;
if result.rows.len() == 0 {
let new_node_id = Uuid::now_v7();
let new_node_id = new_node_id.to_string();
state.db.run_script("
?[node_id, account_node_id, mailbox_name] <-
[[$new_node_id, $account_node_id, 'INBOX']]
:put mailbox { node_id, account_node_id, mailbox_name }
",
btmap! {
"new_node_id".to_owned() => DataValue::from(new_node_id.clone()),
"account_node_id".to_owned() => DataValue::from(config.node_id.to_string()),
},
ScriptMutability::Mutable)?;
new_node_id
} else {
result.rows[0][0].get_str().unwrap().to_owned()
} }
};
println!("INBOX: {:?}", inbox_node_id);
let inbox = session.select("INBOX").await.into_diagnostic()?; Ok(())
println!("last unseen: {:?}", inbox.unseen); }
let messages = session async fn mail_loop_inner(&self) -> Result<()> {
.fetch( // Fetch the mail configs
"1:4", let configs = self.state.fetch_mail_configs()?;
"(FLAGS ENVELOPE BODY[HEADER] BODY[TEXT] INTERNALDATE)", if configs.is_empty() {
) return Ok(());
.await }
.into_diagnostic()?
.try_collect::<Vec<_>>()
.await
.into_diagnostic()?;
println!(
"messages {:?}",
messages.iter().map(|f| f.body()).collect::<Vec<_>>()
);
let input_data = DataValue::List( // TODO: Do all configs instead of just the first
messages let config = &configs[0];
let stream =
TcpStream::connect((config.imap_hostname.as_str(), config.imap_port))
.await?;
let client = async_imap::Client::new(stream);
let mut session = client
.login(&config.imap_username, &config.imap_password)
.await
.map_err(|(err, _)| err)?;
let all_mailbox_ids = self
.fetch_and_store_all_mailboxes(config.node_id.to_string(), &mut session)
.await
.context("Could not fetch mailboxes")?;
self
.fetch_all_mail_from_single_mailbox(
&mut session,
&all_mailbox_ids,
config.node_id.to_string(),
"INBOX",
)
.await
.context("Could not fetch mail from INBOX")?;
session.logout().await.into_diagnostic()?;
Ok(())
}
async fn fetch_and_store_all_mailboxes(
&self,
config_node_id: String,
session: &mut Session<TcpStream>,
) -> Result<HashMap<String, String>> {
// println!("Session: {:?}", session);
let mailboxes = session
.list(None, Some("*"))
.await?
.try_collect::<Vec<_>>()
.await?;
let mut all_mailboxes = HashMap::new();
// TODO: Make this more efficient by using bulk in query
for mailbox in mailboxes {
let tx = self.state.db.multi_transaction(true);
let result = tx.run_script(
"
?[node_id] :=
*mailbox{node_id, account_node_id, mailbox_name},
account_node_id = $account_node_id,
mailbox_name = $mailbox_name,
",
btmap! {
"account_node_id".to_owned()=>DataValue::from(config_node_id.clone()),
"mailbox_name".to_owned()=>DataValue::from(mailbox.name().to_string()),
},
)?;
let node_id = if result.rows.len() == 0 {
let new_node_id = Uuid::now_v7();
let new_node_id = new_node_id.to_string();
let extra_data = json!({
"name": mailbox.name(),
});
tx.run_script("
?[node_id, account_node_id, mailbox_name, extra_data] <-
[[$new_node_id, $account_node_id, $mailbox_name, $extra_data]]
:put mailbox { node_id, account_node_id, mailbox_name, extra_data }
",
btmap! {
"new_node_id".to_owned() => DataValue::from(new_node_id.clone()),
"account_node_id".to_owned() => DataValue::from(config_node_id.clone()),
"mailbox_name".to_owned()=>DataValue::from(mailbox.name().to_string()),
"extra_data".to_owned()=>DataValue::Json(JsonData(extra_data)),
},
)?;
new_node_id
} else {
result.rows[0][0].get_str().unwrap().to_owned()
};
tx.commit()?;
all_mailboxes.insert(mailbox.name().to_owned(), node_id);
}
// println!("All mailboxes: {:?}", all_mailboxes);
Ok(all_mailboxes)
}
async fn fetch_all_mail_from_single_mailbox(
&self,
session: &mut Session<TcpStream>,
all_mailbox_ids: &HashMap<String, String>,
config_node_id: String,
mailbox_name: impl AsRef<str>,
) -> Result<()> {
let mailbox_name = mailbox_name.as_ref();
let mailbox = session.select(mailbox_name).await.into_diagnostic()?;
let mailbox_node_id = all_mailbox_ids.get(mailbox_name).unwrap();
let extra_data = json!({
"uid_validity": mailbox.uid_validity,
"last_seen": mailbox.unseen,
});
// TODO: Validate uid validity here
let all_uids = session
.uid_search("ALL")
.await
.context("Could not fetch all UIDs")?;
println!("All UIDs ({}): {:?}", all_uids.len(), all_uids);
let messages = session
.uid_fetch(
all_uids.iter().join(","),
"(FLAGS ENVELOPE BODY[HEADER] BODY[TEXT] INTERNALDATE)",
)
.await
.into_diagnostic()?
.try_collect::<Vec<_>>()
.await
.into_diagnostic()
.context("Could not fetch messages")?;
println!(
"messages {:?}",
messages.iter().map(|f| f.body()).collect::<Vec<_>>()
);
let mut unique_message_ids = HashSet::new();
let data: Vec<_> = messages
.iter() .iter()
.map(|msg| { .map(|msg| {
let message_id = Uuid::now_v7(); let message_node_id = Uuid::now_v7();
let headers = let headers =
String::from_utf8(msg.header().unwrap().to_vec()).unwrap(); String::from_utf8(msg.header().unwrap().to_vec()).unwrap();
let headers = headers let headers = headers
.split("\r\n") .split("\r\n")
.filter_map(|s| { .filter_map(|s| {
// This is really bad lmao
let p = s.split(": ").collect::<Vec<_>>(); let p = s.split(": ").collect::<Vec<_>>();
if p.len() < 2 { if p.len() < 2 {
None None
} else { } else {
Some((p[0], p[1])) Some((p[0], p[1..].join(": ")))
} }
}) })
.collect::<HashMap<_, _>>(); .collect::<HashMap<_, _>>();
let message_id = headers
.get("Message-ID")
.map(|s| (*s).to_owned())
.unwrap_or(message_node_id.to_string());
unique_message_ids.insert(message_id.clone());
DataValue::List(vec![ DataValue::List(vec![
DataValue::from(message_id.to_string()), DataValue::from(message_node_id.to_string()),
DataValue::from(config.node_id.to_string()), DataValue::from(config_node_id.to_string()),
DataValue::from(inbox_node_id.clone()), DataValue::from(mailbox_node_id.clone()),
DataValue::from( DataValue::from(
headers headers
.get("Subject") .get("Subject")
.map(|s| (*s).to_owned()) .map(|s| (*s).to_owned())
.unwrap_or("Subject".to_owned()), .unwrap_or("Subject".to_owned()),
), ),
DataValue::Json(JsonData(serde_json::to_value(headers).unwrap())), DataValue::Json(JsonData(serde_json::to_value(&headers).unwrap())),
DataValue::Bytes(msg.text().unwrap().to_vec()), DataValue::Bytes(msg.text().unwrap().to_vec()),
DataValue::from(msg.internal_date().unwrap().to_rfc3339()), DataValue::from(msg.internal_date().unwrap().to_rfc3339()),
DataValue::from(message_id),
]) ])
}) })
.collect(), .collect();
);
state.db.run_script( println!("Adding {} messages to database...", data.len());
let input_data = DataValue::List(data);
// TODO: Can this be one query?
let tx = self.state.db.multi_transaction(true);
let unique_message_ids_data_value = DataValue::List(
unique_message_ids
.into_iter()
.map(|s| DataValue::from(s))
.collect_vec(),
);
let existing_ids = tx.run_script(
" "
?[node_id, account_node_id, mailbox_node_id, subject, headers, body, internal_date] <- $input_data ?[node_id] := *message { node_id, message_id },
:put message { node_id, account_node_id, mailbox_node_id, subject, headers, body, internal_date } is_in(message_id, $message_ids)
", ",
btmap! { btmap! { "message_ids".to_owned() => unique_message_ids_data_value },
"input_data".to_owned() => input_data,
},
ScriptMutability::Mutable,
)?; )?;
println!("Existing ids: {:?}", existing_ids);
session.logout().await.into_diagnostic()?; self
.state
.db
.run_script(
"
?[
node_id, account_node_id, mailbox_node_id, subject, headers, body,
internal_date, message_id
] <- $input_data
:put message {
node_id, account_node_id, mailbox_node_id, subject, headers, body,
internal_date, message_id
}
",
btmap! {
"input_data".to_owned() => input_data,
},
ScriptMutability::Mutable,
)
.context("Could not add message to database")?;
Ok(()) Ok(())
}
} }

View file

@ -0,0 +1,4 @@
//! Panorama uses an internal messaging system to pass content around
//!
//! This implementation is dead simple, just passes all messages and filters on the other end
pub struct Messaging {}

View file

@ -1,196 +1,197 @@
use cozo::DbInstance; use sqlx::migrate::Migrator;
use miette::{IntoDiagnostic, Result};
use serde_json::Value;
use crate::ensure_ok;
pub async fn run_migrations(db: &DbInstance) -> Result<()> { pub static MIGRATOR: Migrator = sqlx::migrate!();
let migration_status = check_migration_status(db).await?;
println!("migration status: {:?}", migration_status);
let migrations: Vec<Box<dyn for<'a> Fn(&'a DbInstance) -> Result<()>>> = // pub async fn run_migrations(db: &DbInstance) -> Result<()> {
vec![Box::new(no_op), Box::new(migration_01)]; // let migration_status = check_migration_status(db).await?;
// println!("migration status: {:?}", migration_status);
if let MigrationStatus::NoMigrations = migration_status { // let migrations: Vec<Box<dyn for<'a> Fn(&'a DbInstance) -> Result<()>>> =
let result = db.run_script_str( // vec![Box::new(no_op), Box::new(migration_01)];
"
{ :create migrations { yeah: Int default 0 => version: Int default 0 } }
{
?[yeah, version] <- [[0, 0]]
:put migrations { yeah, version }
}
",
"",
false,
);
ensure_ok(&result)?;
}
let start_at_migration = match migration_status { // if let MigrationStatus::NoMigrations = migration_status {
MigrationStatus::NoMigrations => 0, // let result = db.run_script_str(
MigrationStatus::HasVersion(n) => n, // "
}; // { :create migrations { yeah: Int default 0 => version: Int default 0 } }
let migrations_to_run = migrations // {
.iter() // ?[yeah, version] <- [[0, 0]]
.enumerate() // :put migrations { yeah, version }
.skip(start_at_migration as usize + 1); // }
// println!("running {} migrations...", migrations_to_run.len()); // ",
// "",
// false,
// );
// ensure_ok(&result)?;
// }
//TODO: This should all be done in a transaction // let start_at_migration = match migration_status {
for (idx, migration) in migrations_to_run { // MigrationStatus::NoMigrations => 0,
println!("running migration {idx}..."); // MigrationStatus::HasVersion(n) => n,
// };
// let migrations_to_run = migrations
// .iter()
// .enumerate()
// .skip(start_at_migration as usize + 1);
// // println!("running {} migrations...", migrations_to_run.len());
migration(db)?; // //TODO: This should all be done in a transaction
// for (idx, migration) in migrations_to_run {
// println!("running migration {idx}...");
let result = db.run_script_str( // migration(db)?;
"
?[yeah, version] <- [[0, $version]]
:put migrations { yeah => version }
",
&format!("{{\"version\":{}}}", idx),
false,
);
ensure_ok(&result)?; // let result = db.run_script_str(
// "
// ?[yeah, version] <- [[0, $version]]
// :put migrations { yeah => version }
// ",
// &format!("{{\"version\":{}}}", idx),
// false,
// );
println!("succeeded migration {idx}!"); // ensure_ok(&result)?;
}
Ok(()) // println!("succeeded migration {idx}!");
} // }
#[derive(Debug)] // Ok(())
enum MigrationStatus { // }
NoMigrations,
HasVersion(u64),
}
async fn check_migration_status(db: &DbInstance) -> Result<MigrationStatus> { // #[derive(Debug)]
let status = db.run_script_str( // enum MigrationStatus {
" // NoMigrations,
?[yeah, version] := *migrations[yeah, version] // HasVersion(u64),
", // }
"",
true,
);
println!("Status: {}", status);
let status: Value = serde_json::from_str(&status).into_diagnostic()?; // async fn check_migration_status(db: &DbInstance) -> Result<MigrationStatus> {
let status = status.as_object().unwrap(); // let status = db.run_script_str(
let ok = status.get("ok").unwrap().as_bool().unwrap_or(false); // "
if !ok { // ?[yeah, version] := *migrations[yeah, version]
let status_code = status.get("code").unwrap().as_str().unwrap(); // ",
if status_code == "query::relation_not_found" { // "",
return Ok(MigrationStatus::NoMigrations); // true,
} // );
} // println!("Status: {}", status);
let rows = status.get("rows").unwrap().as_array().unwrap(); // let status: Value = serde_json::from_str(&status).into_diagnostic()?;
let row = rows[0].as_array().unwrap(); // let status = status.as_object().unwrap();
let version = row[1].as_number().unwrap().as_u64().unwrap(); // let ok = status.get("ok").unwrap().as_bool().unwrap_or(false);
println!("row: {row:?}"); // if !ok {
// let status_code = status.get("code").unwrap().as_str().unwrap();
// if status_code == "query::relation_not_found" {
// return Ok(MigrationStatus::NoMigrations);
// }
// }
Ok(MigrationStatus::HasVersion(version)) // let rows = status.get("rows").unwrap().as_array().unwrap();
} // let row = rows[0].as_array().unwrap();
// let version = row[1].as_number().unwrap().as_u64().unwrap();
// println!("row: {row:?}");
fn no_op(_: &DbInstance) -> Result<()> { // Ok(MigrationStatus::HasVersion(version))
Ok(()) // }
}
fn migration_01(db: &DbInstance) -> Result<()> { // fn no_op(_: &DbInstance) -> Result<()> {
let result = db.run_script_str( // Ok(())
" // }
# Primary node type
{
:create node {
id: String
=>
type: String,
created_at: Float default now(),
updated_at: Float default now(),
extra_data: Json default {},
}
}
# Inverse mappings for easy querying // fn migration_01(db: &DbInstance) -> Result<()> {
{ :create node_has_key { key: String => id: String } } // let result = db.run_script_str(
{ ::index create node_has_key:inverse { id } } // "
{ :create node_managed_by_app { node_id: String => app: String } } // # Primary node type
{ :create node_refers_to { node_id: String => other_node_id: String } } // {
{ // :create node {
:create fqkey_to_dbkey { // id: String
key: String // =>
=> // type: String,
relation: String, // created_at: Float default now(),
field_name: String, // updated_at: Float default now(),
type: String, // extra_data: Json default {},
is_fts_enabled: Bool, // }
} // }
}
{
?[key, relation, field_name, type, is_fts_enabled] <- [
['panorama/journal/page/day', 'journal_day', 'day', 'string', false],
['panorama/journal/page/title', 'journal', 'title', 'string', true],
['panorama/journal/page/content', 'journal', 'content', 'string', true],
['panorama/mail/config/imap_hostname', 'mail_config', 'imap_hostname', 'string', false],
['panorama/mail/config/imap_port', 'mail_config', 'imap_port', 'int', false],
['panorama/mail/config/imap_username', 'mail_config', 'imap_username', 'string', false],
['panorama/mail/config/imap_password', 'mail_config', 'imap_password', 'string', false],
['panorama/mail/message/body', 'message', 'body', 'string', true],
['panorama/mail/message/subject', 'message', 'subject', 'string', true],
['panorama/mail/message/message_id', 'message', 'message_id', 'string', false],
]
:put fqkey_to_dbkey { key, relation, field_name, type, is_fts_enabled }
}
# Create journal type // # Inverse mappings for easy querying
{ :create journal { node_id: String => title: String default '', content: String } } // { :create node_has_key { key: String => id: String } }
{ :create journal_day { day: String => node_id: String } } // { ::index create node_has_key:inverse { id } }
// { :create node_managed_by_app { node_id: String => app: String } }
// { :create node_refers_to { node_id: String => other_node_id: String } }
// {
// :create fqkey_to_dbkey {
// key: String
// =>
// relation: String,
// field_name: String,
// type: String,
// is_fts_enabled: Bool,
// }
// }
// {
// ?[key, relation, field_name, type, is_fts_enabled] <- [
// ['panorama/journal/page/day', 'journal_day', 'day', 'string', false],
// ['panorama/journal/page/title', 'journal', 'title', 'string', true],
// ['panorama/journal/page/content', 'journal', 'content', 'string', true],
// ['panorama/mail/config/imap_hostname', 'mail_config', 'imap_hostname', 'string', false],
// ['panorama/mail/config/imap_port', 'mail_config', 'imap_port', 'int', false],
// ['panorama/mail/config/imap_username', 'mail_config', 'imap_username', 'string', false],
// ['panorama/mail/config/imap_password', 'mail_config', 'imap_password', 'string', false],
// ['panorama/mail/message/body', 'message', 'body', 'string', true],
// ['panorama/mail/message/subject', 'message', 'subject', 'string', true],
// ['panorama/mail/message/message_id', 'message', 'message_id', 'string', false],
// ]
// :put fqkey_to_dbkey { key, relation, field_name, type, is_fts_enabled }
// }
# Mail // # Create journal type
{ // { :create journal { node_id: String => title: String default '', content: String } }
:create mail_config { // { :create journal_day { day: String => node_id: String } }
node_id: String
=>
imap_hostname: String,
imap_port: Int,
imap_username: String,
imap_password: String,
}
}
{
:create mailbox {
node_id: String
=>
account_node_id: String,
mailbox_name: String,
}
}
{ ::index create mailbox:by_account_id_and_name { account_node_id, mailbox_name } }
{
:create message {
node_id: String
=>
message_id: String,
account_node_id: String,
mailbox_node_id: String,
subject: String,
headers: Json?,
body: Bytes,
internal_date: String,
}
}
{ ::index create message:message_id { message_id } }
{ ::index create message:date { internal_date } }
{ ::index create message:by_mailbox_id { mailbox_node_id } }
# Calendar // # Mail
", // {
"", // :create mail_config {
false, // node_id: String
); // =>
ensure_ok(&result)?; // imap_hostname: String,
// imap_port: Int,
// imap_username: String,
// imap_password: String,
// }
// }
// {
// :create mailbox {
// node_id: String
// =>
// account_node_id: String,
// mailbox_name: String,
// uid_validity: Int? default null,
// extra_data: Json default {},
// }
// }
// { ::index create mailbox:by_account_id_and_name { account_node_id, mailbox_name } }
// {
// :create message {
// node_id: String
// =>
// message_id: String,
// account_node_id: String,
// mailbox_node_id: String,
// subject: String,
// headers: Json?,
// body: Bytes,
// internal_date: String,
// }
// }
// { ::index create message:message_id { message_id } }
// { ::index create message:date { internal_date } }
// { ::index create message:by_mailbox_id { mailbox_node_id } }
Ok(()) // # Calendar
} // ",
// "",
// false,
// );
// ensure_ok(&result)?;
// Ok(())
// }

View file

@ -0,0 +1,51 @@
use std::io::{stdout, Write};
use anyhow::Result;
use chrono::{DateTime, Utc};
use wasmtime::{Caller, InstancePre, Linker, Memory};
pub struct WasmtimeModule {
pub(crate) module: InstancePre<WasmtimeInstanceEnv>,
}
impl WasmtimeModule {
pub fn link_imports(linker: &mut Linker<WasmtimeInstanceEnv>) -> Result<()> {
macro_rules! link_function {
($($module:literal :: $func:ident),* $(,)?) => {
linker $(
.func_wrap(
$module,
concat!("_", stringify!($func)),
WasmtimeInstanceEnv::$func,
)?
)*;
};
}
abi_funcs!(link_function);
Ok(())
}
}
/// This is loosely based on SpacetimeDB's implementation of their host.
/// See: https://github.com/clockworklabs/SpacetimeDB/blob/c19c0d45c454db2a4215deb23c7f9f82cb5d7561/crates/core/src/host/wasmtime/wasm_instance_env.rs
pub struct WasmtimeInstanceEnv {
/// This is only an Option because memory is initialized after this is created so we need to come back and put it in later
pub(crate) mem: Option<Memory>,
}
impl WasmtimeInstanceEnv {
pub fn print(mut caller: Caller<'_, Self>, len: u64, ptr: u32) {
let mem = caller.data().mem.unwrap();
let mut buffer = vec![0; len as usize];
mem.read(caller, ptr as usize, &mut buffer);
let s = String::from_utf8(buffer).unwrap();
println!("Called print: {}", s);
}
pub fn get_current_time(_: Caller<'_, Self>) -> i64 {
let now = Utc::now();
now.timestamp_nanos_opt().unwrap()
}
pub fn register_endpoint(mut caller: Caller<'_, Self>) {}
}

View file

@ -0,0 +1,10 @@
macro_rules! abi_funcs {
($macro_name:ident) => {
// TODO: Why is this "env"? How do i use another name
$macro_name! {
"env"::get_current_time,
"env"::print,
"env"::register_endpoint,
}
};
}

View file

@ -0,0 +1,7 @@
use anyhow::Result;
pub struct Memory {
pub memory: wasmtime::Memory,
}
impl Memory {}

View file

@ -0,0 +1,160 @@
#[macro_use]
pub mod macros;
pub mod internal;
pub mod manifest;
pub mod memory;
use std::{
collections::HashMap,
fs::{self, File},
io::Read,
path::{Path, PathBuf},
};
use anyhow::{anyhow, Context as _, Result};
use internal::{WasmtimeInstanceEnv, WasmtimeModule};
use itertools::Itertools;
use wasmtime::{AsContext, Config, Engine, Linker, Memory, Module, Store};
use crate::AppState;
use self::manifest::AppManifest;
pub type AllAppData = HashMap<String, AppData>;
impl AppState {
pub async fn install_apps_from_search_paths(&self) -> Result<AllAppData> {
let search_paths = vec![
PathBuf::from("/Users/michael/Projects/panorama/apps"),
PathBuf::from("/home/michael/Projects/panorama/apps"),
];
let mut found = Vec::new();
for path in search_paths {
if !path.exists() {
continue;
}
let read_dir = fs::read_dir(&path)
.with_context(|| format!("could not read {}", path.display()))?;
for dir_entry in read_dir {
let dir_entry = dir_entry?;
let path = dir_entry.path();
let manifest_path = path.join("manifest.yml");
if manifest_path.exists() {
found.push(path);
}
}
}
let mut all_app_data = HashMap::new();
for path in found {
let app_data = self.install_app_from_path(&path).await?;
println!("App data: {:?}", app_data);
all_app_data.insert(
path.display().to_string(),
AppData {
name: "hello".to_string(),
},
);
}
Ok(all_app_data)
}
}
#[derive(Debug)]
pub struct AppData {
name: String,
}
impl AppState {
async fn install_app_from_path(&self, path: impl AsRef<Path>) -> Result<()> {
let app_path = path.as_ref();
let manifest_path = app_path.join("manifest.yml");
let manifest: AppManifest = {
let file = File::open(&manifest_path)?;
serde_yaml::from_reader(file).with_context(|| {
format!(
"Could not parse config file from {}",
manifest_path.display()
)
})?
};
println!("Manifest: {:?}", manifest);
let module_path = app_path.join(manifest.module);
let installer_program = {
let mut file = File::open(&module_path).with_context(|| {
format!(
"Could not open installer from path: {}",
module_path.display()
)
})?;
let mut buf = Vec::new();
file.read_to_end(&mut buf)?;
buf
};
println!("Installer program: {} bytes", installer_program.len());
let config = Config::new();
let engine = Engine::new(&config)?;
let module = Module::new(&engine, &installer_program)?;
let mut linker = Linker::new(&engine);
WasmtimeModule::link_imports(&mut linker)?;
let module = linker.instantiate_pre(&module)?;
let module = WasmtimeModule { module };
let mut state = WasmtimeInstanceEnv { mem: None };
let mut store = Store::new(&engine, state);
println!(
"Required imports: {:?}",
module
.module
.module()
.imports()
.map(|s| s.name())
.collect_vec()
);
let instance = module
.module
.instantiate(&mut store)
.context("Could not instantiate")?;
let mem = instance
.get_memory(&mut store, "memory")
.ok_or_else(|| anyhow!("Fuck!"))?;
store.data_mut().mem = Some(mem);
instance.exports(&mut store).for_each(|export| {
println!("Export: {}", export.name());
});
let hello = instance
.get_typed_func::<(), i32>(&mut store, "install")
.context("Could not get typed function")?;
hello.call(&mut store, ()).context("Could not call")?;
Ok(())
}
}
fn read_utf_8string<C>(
c: C,
mem: &Memory,
len: usize,
offset: usize,
) -> Result<String>
where
C: AsContext,
{
let mut buffer = vec![0; len];
mem.read(c, offset, &mut buffer)?;
let string = String::from_utf8(buffer)?;
Ok(string)
}

View file

@ -0,0 +1,27 @@
use std::path::PathBuf;
use schemars::JsonSchema;
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct AppManifest {
pub name: String,
pub version: Option<String>,
pub panorama_version: Option<String>,
pub description: Option<String>,
pub module: PathBuf,
#[serde(default)]
pub endpoints: Vec<AppManifestEndpoint>,
#[serde(default)]
pub triggers: Vec<AppManifestTriggers>,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct AppManifestEndpoint {
pub url: String,
pub method: String,
pub export_name: String,
}
#[derive(Debug, Serialize, Deserialize, JsonSchema)]
pub struct AppManifestTriggers {}

View file

@ -0,0 +1,74 @@
pub mod manifest;
use std::{
collections::HashMap,
fs::{self, File},
path::{Path, PathBuf},
};
use anyhow::{Context as _, Result};
use crate::AppState;
use self::manifest::AppManifest;
impl AppState {
pub async fn install_apps_from_search_paths(&self) -> Result<()> {
let search_paths = vec![
PathBuf::from("/Users/michael/Projects/panorama/apps"),
PathBuf::from("/home/michael/Projects/panorama/apps"),
];
let mut found = Vec::new();
for path in search_paths {
if !path.exists() {
continue;
}
let read_dir = fs::read_dir(&path)
.with_context(|| format!("could not read {}", path.display()))?;
for dir_entry in read_dir {
let dir_entry = dir_entry?;
let path = dir_entry.path();
let manifest_path = path.join("manifest.yml");
if manifest_path.exists() {
found.push(path);
}
}
}
// let mut all_app_data = HashMap::new();
// for path in found {
// let app_data = self.install_app_from_path(&path).await?;
// println!("App data: {:?}", app_data);
// all_app_data.insert(
// path.display().to_string(),
// AppData {
// name: "hello".to_string(),
// },
// );
// }
Ok(())
}
async fn install_app_from_path(&self, path: impl AsRef<Path>) -> Result<()> {
let app_path = path.as_ref();
let manifest_path = app_path.join("manifest.yml");
let manifest: AppManifest = {
let file = File::open(&manifest_path)?;
serde_yaml::from_reader(file).with_context(|| {
format!(
"Could not parse config file from {}",
manifest_path.display()
)
})?
};
println!("Manifest: {:?}", manifest);
todo!()
}
}

View file

@ -0,0 +1,3 @@
use crate::AppState;
impl AppState {}

View file

@ -1,7 +1,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use anyhow::Result;
use cozo::ScriptMutability; use cozo::ScriptMutability;
use miette::Result;
use serde_json::Value; use serde_json::Value;
use crate::AppState; use crate::AppState;
@ -47,6 +47,10 @@ impl AppState {
let mut all_relations = hmap! {}; let mut all_relations = hmap! {};
for relation_name in relation_names.iter() { for relation_name in relation_names.iter() {
if relation_name.contains(":") {
continue;
}
let mut relation_info = vec![]; let mut relation_info = vec![];
let columns = relation_columns.get(relation_name.as_str()).unwrap(); let columns = relation_columns.get(relation_name.as_str()).unwrap();

View file

@ -1,8 +1,7 @@
use std::str::FromStr; use std::str::FromStr;
use anyhow::Result;
use chrono::Local; use chrono::Local;
use cozo::ScriptMutability;
use miette::{IntoDiagnostic, Result};
use uuid::Uuid; use uuid::Uuid;
use crate::{AppState, NodeId}; use crate::{AppState, NodeId};
@ -42,29 +41,6 @@ impl AppState {
) )
.await?; .await?;
// self.db.run_script(
// "
// {
// ?[id, type] <- [[$node_id, 'panorama/journal/page']]
// :put node { id, type }
// }
// {
// ?[node_id, title, content] <- [[$node_id, $title, '']]
// :put journal { node_id => title, content }
// }
// {
// ?[day, node_id] <- [[$day, $node_id]]
// :put journal_day { day => node_id }
// }
// ",
// btmap! {
// "node_id".to_owned() => node_id.clone().into(),
// "day".to_owned() => today.clone().into(),
// "title".to_owned() => today.clone().into(),
// },
// ScriptMutability::Mutable,
// )?;
return Ok(node_info.node_id); return Ok(node_info.node_id);
} }

View file

@ -1,8 +1,8 @@
use std::{collections::HashMap, str::FromStr, time::Duration}; use std::{collections::HashMap, str::FromStr, time::Duration};
use anyhow::Result;
use cozo::{DataValue, JsonData, ScriptMutability}; use cozo::{DataValue, JsonData, ScriptMutability};
use futures::TryStreamExt; use futures::TryStreamExt;
use miette::{IntoDiagnostic, Result};
use tokio::{net::TcpStream, time::sleep}; use tokio::{net::TcpStream, time::sleep};
use uuid::Uuid; use uuid::Uuid;

View file

@ -1,21 +1,33 @@
pub mod export; // pub mod apps;
pub mod journal; // pub mod codetrack;
pub mod mail; // pub mod export;
// pub mod journal;
// pub mod mail;
pub mod appsv0;
pub mod node; pub mod node;
pub mod utils; pub mod node_raw;
// pub mod utils;
use std::{collections::HashMap, fs, path::Path}; use std::{collections::HashMap, fs, path::Path};
use anyhow::{Context, Result};
use bimap::BiMap; use bimap::BiMap;
use cozo::DbInstance; use sqlx::{
use miette::{IntoDiagnostic, Result}; pool::PoolConnection,
sqlite::{SqliteConnectOptions, SqliteJournalMode, SqlitePoolOptions},
Sqlite, SqlitePool,
};
use tantivy::{ use tantivy::{
directory::MmapDirectory, directory::MmapDirectory,
schema::{Field, Schema, STORED, STRING, TEXT}, schema::{Field, Schema, STORED, STRING, TEXT},
Index, Index,
}; };
use wasmtime::Module;
use crate::{mail::mail_loop, migrations::run_migrations}; use crate::{
// mail::MailWorker,
migrations::MIGRATOR,
};
pub fn tantivy_schema() -> (Schema, BiMap<String, Field>) { pub fn tantivy_schema() -> (Schema, BiMap<String, Field>) {
let mut schema_builder = Schema::builder(); let mut schema_builder = Schema::builder();
@ -33,52 +45,73 @@ pub fn tantivy_schema() -> (Schema, BiMap<String, Field>) {
#[derive(Clone)] #[derive(Clone)]
pub struct AppState { pub struct AppState {
pub db: DbInstance, pub db: SqlitePool,
pub tantivy_index: Index, pub tantivy_index: Index,
pub tantivy_field_map: BiMap<String, Field>, pub tantivy_field_map: BiMap<String, Field>,
pub app_wasm_modules: HashMap<String, Module>,
// TODO: Compile this into a more efficient thing than just iter
pub app_routes: HashMap<String, Vec<AppRoute>>,
}
#[derive(Clone)]
pub struct AppRoute {
route: String,
handler_name: String,
} }
impl AppState { impl AppState {
pub async fn new(panorama_dir: impl AsRef<Path>) -> Result<Self> { pub async fn new(panorama_dir: impl AsRef<Path>) -> Result<Self> {
let panorama_dir = panorama_dir.as_ref().to_path_buf(); let panorama_dir = panorama_dir.as_ref().to_path_buf();
fs::create_dir_all(&panorama_dir)
.context("Could not create panorama directory")?;
println!("Panorama dir: {}", panorama_dir.display()); println!("Panorama dir: {}", panorama_dir.display());
let (tantivy_index, tantivy_field_map) = { let (tantivy_index, tantivy_field_map) = {
let (schema, field_map) = tantivy_schema(); let (schema, field_map) = tantivy_schema();
let tantivy_path = panorama_dir.join("tantivy-index"); let tantivy_path = panorama_dir.join("tantivy-index");
fs::create_dir_all(&tantivy_path).into_diagnostic()?; fs::create_dir_all(&tantivy_path)?;
let dir = MmapDirectory::open(&tantivy_path).into_diagnostic()?; let dir = MmapDirectory::open(&tantivy_path)?;
let index = Index::builder() let index = Index::builder().schema(schema).open_or_create(dir)?;
.schema(schema)
.open_or_create(dir)
.into_diagnostic()?;
(index, field_map) (index, field_map)
}; };
let db_path = panorama_dir.join("db.sqlite"); let db_path = panorama_dir.join("db.sqlite");
let db = DbInstance::new( let sqlite_connect_options = SqliteConnectOptions::new()
"sqlite", .filename(db_path)
db_path.display().to_string(), .journal_mode(SqliteJournalMode::Wal)
Default::default(), .create_if_missing(true);
) let db = SqlitePoolOptions::new()
.unwrap(); .connect_with(sqlite_connect_options)
.await
.context("Could not connect to SQLite database")?;
let state = AppState { let state = AppState {
db, db,
tantivy_index, tantivy_index,
tantivy_field_map, tantivy_field_map,
app_wasm_modules: Default::default(),
app_routes: Default::default(),
}; };
state.init().await?; state.init().await?;
Ok(state) Ok(state)
} }
async fn init(&self) -> Result<()> { pub async fn conn(&self) -> Result<PoolConnection<Sqlite>> {
run_migrations(&self.db).await?; self.db.acquire().await.map_err(|err| err.into())
}
let state = self.clone(); async fn init(&self) -> Result<()> {
tokio::spawn(async move { mail_loop(state).await }); // run_migrations(&self.db).await?;
MIGRATOR
.run(&self.db)
.await
.context("Could not migrate database")?;
Ok(()) Ok(())
} }
pub fn handle_app_route() {}
} }

View file

@ -1,26 +1,19 @@
use std::{ use std::collections::{BTreeMap, HashMap};
collections::{BTreeMap, HashMap},
str::FromStr,
};
use anyhow::Result;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use cozo::{DataValue, MultiTransaction, NamedRows};
use itertools::Itertools; use itertools::Itertools;
use miette::{bail, IntoDiagnostic, Result};
use serde_json::Value; use serde_json::Value;
use tantivy::{ use sqlx::{Connection, Executor, FromRow, QueryBuilder, Sqlite};
collector::TopDocs,
query::QueryParser,
schema::{OwnedValue, Value as _},
Document, TantivyDocument, Term,
};
use uuid::Uuid; use uuid::Uuid;
use crate::{AppState, NodeId}; use crate::{state::node_raw::FieldMappingRow, AppState, NodeId};
use super::utils::{data_value_to_json_value, owned_value_to_json_value}; // use super::utils::owned_value_to_json_value;
pub type ExtraData = BTreeMap<String, Value>; pub type ExtraData = BTreeMap<String, Value>;
pub type FieldsByTable<'a> =
HashMap<(&'a i64, &'a String), Vec<&'a FieldMappingRow>>;
#[derive(Debug)] #[derive(Debug)]
pub struct NodeInfo { pub struct NodeInfo {
@ -30,131 +23,159 @@ pub struct NodeInfo {
pub fields: Option<HashMap<String, Value>>, pub fields: Option<HashMap<String, Value>>,
} }
#[derive(Debug)]
pub struct FieldInfo {
pub relation_name: String,
pub relation_field: String,
pub r#type: String,
pub is_fts_enabled: bool,
}
pub type FieldMapping = HashMap<String, FieldInfo>;
impl AppState { impl AppState {
/// Get all properties of a node /// Get all properties of a node
pub async fn get_node(&self, node_id: impl AsRef<str>) -> Result<NodeInfo> { pub async fn get_node(&self, node_id: impl AsRef<str>) -> Result<NodeInfo> {
let node_id = node_id.as_ref().to_owned(); let node_id = node_id.as_ref().to_owned();
let tx = self.db.multi_transaction(false); let mut conn = self.conn().await?;
let result = tx.run_script( conn
" .transaction::<_, _, sqlx::Error>(|tx| {
?[key, relation, field_name, type, is_fts_enabled] := Box::pin(async move {
*node_has_key { key, id }, let node_id = node_id.clone();
*fqkey_to_dbkey { key, relation, field_name, type, is_fts_enabled }, let field_mapping =
id = $node_id AppState::get_related_field_list_for_node_id(&mut **tx, &node_id)
", .await?;
btmap! {"node_id".to_owned() => node_id.to_string().into()},
)?;
let field_mapping = AppState::rows_to_field_mapping(result)?; // Group the keys by which relation they're in
let fields_by_table = field_mapping.iter().into_group_map_by(
|FieldMappingRow {
app_id,
app_table_name,
..
}| (app_id, app_table_name),
);
// Group the keys by which relation they're in // Run the query that grabs all of the relevant fields, and coalesce
let result_by_relation = field_mapping // the fields back
.iter() let related_fields =
.into_group_map_by(|(_, FieldInfo { relation_name, .. })| relation_name); AppState::query_related_fields(&mut **tx, &fields_by_table).await?;
let mut all_relation_queries = vec![]; println!("Related fields: {:?}", related_fields);
let mut all_relation_constraints = vec![];
let mut all_fields = vec![]; // let created_at = DateTime::from_timestamp_millis(
let mut field_counter = 0; // (result.rows[0][2].get_float().unwrap() * 1000.0) as i64,
for (i, (relation, fields)) in result_by_relation.iter().enumerate() { // )
let constraint_name = format!("c{i}"); // .unwrap();
// let updated_at = DateTime::from_timestamp_millis(
// (result.rows[0][3].get_float().unwrap() * 1000.0) as i64,
// )
// .unwrap();
// let mut fields = HashMap::new();
// for row in result
// .rows
// .into_iter()
// .map(|row| row.into_iter().skip(4).zip(all_fields.iter()))
// {
// for (value, (_, _, field_name)) in row {
// fields.insert(
// field_name.to_string(),
// data_value_to_json_value(&value),
// );
// }
// }
todo!()
// Ok(NodeInfo {
// node_id: NodeId(Uuid::from_str(&node_id).unwrap()),
// created_at,
// updated_at,
// fields: Some(fields),
// })
})
})
.await?;
todo!()
// Ok(())
}
async fn query_related_fields<'e, 'c: 'e, X>(
x: X,
fields_by_table: &FieldsByTable<'_>,
) -> sqlx::Result<HashMap<String, Value>>
where
X: 'e + Executor<'c, Database = Sqlite>,
{
let mut query = QueryBuilder::new("");
let mut mapping = HashMap::new();
let mut ctr = 0;
let mut selected_fields = vec![];
for ((app_id, app_table_name), fields) in fields_by_table.iter() {
let table_gen_name = format!("c{ctr}");
ctr += 1;
let mut keys = vec![]; let mut keys = vec![];
let mut constraints = vec![]; for field_info in fields.iter() {
for (key, field_info) in fields.iter() { let field_gen_name = format!("f{ctr}");
let counted_field_name = format!("f{field_counter}"); ctr += 1;
field_counter += 1; mapping.insert(&field_info.full_key, field_gen_name.clone());
keys.push(counted_field_name.clone()); keys.push(field_gen_name.clone());
constraints.push(format!(
"{}: {}", selected_fields.push(format!(
field_info.relation_field.to_owned(), "{}.{} as {}",
counted_field_name, table_gen_name, field_info.app_table_field, field_gen_name
)); ));
all_fields.push((
counted_field_name, // constraints.push(format!(
field_info.relation_field.to_owned(), // "{}: {}",
key, // field_info.relation_field.to_owned(),
)) // field_gen_name,
// ));
// all_fields.push((
// field_gen_name,
// field_info.relation_field.to_owned(),
// key,
// ))
} }
let keys = keys.join(", "); // let keys = keys.join(", ");
let constraints = constraints.join(", "); // let constraints = constraints.join(", ");
all_relation_queries.push(format!( // all_relation_queries.push(format!(
" // "
{constraint_name}[{keys}] := // {table_gen_name}[{keys}] :=
*{relation}{{ node_id, {constraints} }}, // *{relation}{{ node_id, {constraints} }},
node_id = $node_id // node_id = $node_id
" // "
)); // ));
all_relation_constraints.push(format!("{constraint_name}[{keys}],")) // all_relation_constraints.push(format!("{table_gen_name}[{keys}],"))
} }
let all_relation_constraints = all_relation_constraints.join("\n"); if selected_fields.is_empty() {
let all_relation_queries = all_relation_queries.join("\n\n"); return Ok(HashMap::new());
let all_field_names = all_fields
.iter()
.map(|(field_name, _, _)| field_name)
.join(", ");
let query = format!(
"
{all_relation_queries}
?[type, extra_data, created_at, updated_at, {all_field_names}] :=
*node {{ id, type, created_at, updated_at, extra_data }},
{all_relation_constraints}
id = $node_id
"
);
let result = tx.run_script(
&query,
btmap! { "node_id".to_owned() => node_id.to_string().into(), },
)?;
if result.rows.is_empty() {
bail!("Not found")
} }
let created_at = DateTime::from_timestamp_millis( query.push("SELECT ");
(result.rows[0][2].get_float().unwrap() * 1000.0) as i64, query.push(selected_fields.join(", "));
) query.push(" FROM ");
.unwrap(); println!("Query: {:?}", query.sql());
let updated_at = DateTime::from_timestamp_millis( // let all_relation_constraints = all_relation_constraints.join("\n");
(result.rows[0][3].get_float().unwrap() * 1000.0) as i64, // let all_relation_queries = all_relation_queries.join("\n\n");
) // let all_field_names = all_fields
.unwrap(); // .iter()
// .map(|(field_name, _, _)| field_name)
// .join(", ");
// let _query = format!(
// "
// {all_relation_queries}
let mut fields = HashMap::new(); // ?[type, extra_data, created_at, updated_at, {all_field_names}] :=
// *node {{ id, type, created_at, updated_at, extra_data }},
// {all_relation_constraints}
// id = $node_id
// "
// );
for row in result let rows = query.build().fetch_all(x).await;
.rows
.into_iter()
.map(|row| row.into_iter().skip(4).zip(all_fields.iter()))
{
for (value, (_, _, field_name)) in row {
fields.insert(field_name.to_string(), data_value_to_json_value(&value));
}
}
Ok(NodeInfo { todo!()
node_id: NodeId(Uuid::from_str(&node_id).unwrap()),
created_at,
updated_at,
fields: Some(fields),
})
} }
} }
@ -184,292 +205,319 @@ impl AppState {
println!("Request: {opts:?} {extra_data:?}"); println!("Request: {opts:?} {extra_data:?}");
let tx = self.db.multi_transaction(true); let mut conn = self.conn().await?;
let (created_at, updated_at) = match opts { conn
CreateOrUpdate::Create { ref r#type } => { .transaction::<_, _, sqlx::Error>(|tx| {
let node_result = tx.run_script( Box::pin(async move {
" let node_info = match opts {
?[id, type] <- [[$node_id, $type]] CreateOrUpdate::Create { r#type } => {
:put node { id, type } AppState::create_node_raw(&mut **tx, &r#type).await?
:returning
",
btmap! {
"node_id".to_owned() => DataValue::from(node_id.clone()),
"type".to_owned() => DataValue::from(r#type.to_owned()),
},
)?;
let created_at = DateTime::from_timestamp_millis(
(node_result.rows[0][3].get_float().unwrap() * 1000.0) as i64,
)
.unwrap();
let updated_at = DateTime::from_timestamp_millis(
(node_result.rows[0][4].get_float().unwrap() * 1000.0) as i64,
)
.unwrap();
(created_at, updated_at)
}
CreateOrUpdate::Update { .. } => {
let node_result = tx.run_script(
"
?[id, type, created_at, updated_at] := *node { id, type, created_at, updated_at },
id = $node_id
",
btmap! {
"node_id".to_owned() => DataValue::from(node_id.clone()),
},
)?;
let created_at = DateTime::from_timestamp_millis(
(node_result.rows[0][2].get_float().unwrap() * 1000.0) as i64,
)
.unwrap();
let updated_at = DateTime::from_timestamp_millis(
(node_result.rows[0][3].get_float().unwrap() * 1000.0) as i64,
)
.unwrap();
(created_at, updated_at)
}
};
if let Some(extra_data) = extra_data {
let node_id_field = self
.tantivy_field_map
.get_by_left("node_id")
.unwrap()
.clone();
if !extra_data.is_empty() {
let keys = extra_data.keys().map(|s| s.to_owned()).collect::<Vec<_>>();
let field_mapping =
self.get_rows_for_extra_keys(&tx, keys.as_slice())?;
// Group the keys by which relation they're in
let result_by_relation = field_mapping.iter().into_group_map_by(
|(_, FieldInfo { relation_name, .. })| relation_name,
);
for (relation, fields) in result_by_relation.iter() {
let mut doc = btmap! { node_id_field.clone() => OwnedValue::Str(node_id.to_owned()) };
let fields_mapping = fields
.into_iter()
.map(
|(
key,
FieldInfo {
relation_field,
r#type,
is_fts_enabled,
..
},
)| {
let new_value = extra_data.get(*key).unwrap();
// TODO: Make this more generic
let new_value = match r#type.as_str() {
"int" => DataValue::from(new_value.as_i64().unwrap()),
_ => DataValue::from(new_value.as_str().unwrap()),
};
if *is_fts_enabled {
if let Some(field) = self.tantivy_field_map.get_by_left(*key)
{
doc.insert(
field.clone(),
OwnedValue::Str(new_value.get_str().unwrap().to_owned()),
);
}
}
(relation_field.to_owned(), new_value)
},
)
.collect::<BTreeMap<_, _>>();
let mut writer =
self.tantivy_index.writer(15_000_000).into_diagnostic()?;
let delete_term =
Term::from_field_text(node_id_field.clone(), &node_id);
writer.delete_term(delete_term);
writer.add_document(doc).into_diagnostic()?;
writer.commit().into_diagnostic()?;
drop(writer);
let keys = fields_mapping.keys().collect::<Vec<_>>();
let keys_joined = keys.iter().join(", ");
if !keys.is_empty() {
let query = format!(
"
?[ node_id, {keys_joined} ] <- [$input_data]
:{action} {relation} {{ node_id, {keys_joined} }}
"
);
let mut params = vec![];
params.push(DataValue::from(node_id.clone()));
for key in keys {
params.push(fields_mapping[key].clone());
} }
CreateOrUpdate::Update { node_id } => todo!(),
};
let result = tx.run_script( if let Some(extra_data) = extra_data {
&query, if !extra_data.is_empty() {
btmap! { let node_id_str = node_id.to_string();
"input_data".to_owned() => DataValue::List(params), let field_mapping = AppState::get_related_field_list_for_node_id(
}, &mut **tx,
); &node_id_str,
)
.await?;
// Group the keys by which relation they're in
let fields_by_table = field_mapping.iter().into_group_map_by(
|FieldMappingRow {
app_id,
app_table_name,
..
}| (app_id, app_table_name),
);
AppState::write_extra_data(
&mut **tx,
&node_id_str,
&fields_by_table,
extra_data,
)
.await?;
}
} }
}
let input = DataValue::List( Ok(node_info)
keys })
.iter() })
.map(|s| { .await
DataValue::List(vec![ .map_err(|err| err.into())
DataValue::from(s.to_owned()), }
DataValue::from(node_id.clone()),
])
})
.collect_vec(),
);
tx.run_script( async fn create_node_raw<'e, 'c: 'e, X>(
" x: X,
?[key, id] <- $input_data r#type: &str,
:put node_has_key { key, id } ) -> sqlx::Result<NodeInfo>
", where
btmap! { X: 'e + Executor<'c, Database = Sqlite>,
"input_data".to_owned() => input {
}, let node_id = Uuid::now_v7();
)?; let node_id_str = node_id.to_string();
}
#[derive(FromRow)]
struct Result {
updated_at: i64,
} }
tx.commit()?; let result = sqlx::query_as!(
Result,
r#"
INSERT INTO node (node_id, node_type, extra_data)
VALUES (?, ?, "{}")
RETURNING updated_at
"#,
node_id_str,
r#type,
)
.fetch_one(x)
.await?;
let updated_at =
DateTime::from_timestamp_millis(result.updated_at * 1000).unwrap();
let created_at = DateTime::from_timestamp_millis(
node_id.get_timestamp().unwrap().to_unix().0 as i64 * 1000,
)
.unwrap();
Ok(NodeInfo { Ok(NodeInfo {
node_id: NodeId(Uuid::from_str(&node_id).unwrap()), node_id: NodeId(node_id),
created_at, created_at,
updated_at, updated_at,
fields: None, fields: None,
}) })
} }
pub async fn update_node() {} async fn write_extra_data<'e, 'c: 'e, X>(
x: X,
node_id: &str,
fields_by_table: &FieldsByTable<'_>,
extra_data: ExtraData,
) -> sqlx::Result<()>
where
X: 'e + Executor<'c, Database = Sqlite>,
{
// Update Tantivy indexes
// for ((app_id, app_table_name), fields) in fields_by_table.iter() {
// let mut writer =
// self.tantivy_index.writer(15_000_000).into_diagnostic()?;
pub async fn search_nodes( // let delete_term = Term::from_field_text(node_id_field.clone(), &node_id);
&self, // writer.delete_term(delete_term);
query: impl AsRef<str>,
) -> Result<Vec<(NodeId, Value)>> {
let query = query.as_ref();
let reader = self.tantivy_index.reader().into_diagnostic()?; // writer.add_document(doc).into_diagnostic()?;
let searcher = reader.searcher(); // writer.commit().into_diagnostic()?;
// drop(writer);
// }
let node_id_field = self // Update database
.tantivy_field_map let mut node_has_keys = Vec::new();
.get_by_left("node_id") println!("Fields by table: {:?}", fields_by_table);
.unwrap() for ((app_id, app_table_name), fields) in fields_by_table.iter() {
.clone(); for field_info in fields {
let journal_page_field = self node_has_keys.push(&field_info.full_key);
.tantivy_field_map }
.get_by_left("panorama/journal/page/content")
.unwrap()
.clone();
let query_parser =
QueryParser::for_index(&self.tantivy_index, vec![journal_page_field]);
let query = query_parser.parse_query(query).into_diagnostic()?;
let top_docs = searcher // let mut doc =
.search(&query, &TopDocs::with_limit(10)) // btmap! { node_id_field.clone() => OwnedValue::Str(node_id.to_owned()) };
.into_diagnostic()?; // let fields_mapping = fields
// .into_iter()
// .map(
// |(
// key,
// FieldInfo {
// relation_field,
// r#type,
// is_fts_enabled,
// ..
// },
// )| {
// let new_value = extra_data.get(*key).unwrap();
Ok( // // TODO: Make this more generic
top_docs // let new_value = match r#type.as_str() {
.into_iter() // "int" => DataValue::from(new_value.as_i64().unwrap()),
.map(|(score, doc_address)| { // _ => DataValue::from(new_value.as_str().unwrap()),
let retrieved_doc = // };
searcher.doc::<TantivyDocument>(doc_address).unwrap();
let node_id = retrieved_doc
.get_first(node_id_field.clone())
.unwrap()
.as_str()
.unwrap();
let all_fields = retrieved_doc.get_sorted_field_values();
let node_id = NodeId(Uuid::from_str(node_id).unwrap());
let fields = all_fields
.into_iter()
.map(|(field, values)| {
(
self.tantivy_field_map.get_by_right(&field).unwrap(),
if values.len() == 1 {
owned_value_to_json_value(values[0])
} else {
Value::Array(
values
.into_iter()
.map(owned_value_to_json_value)
.collect_vec(),
)
},
)
})
.collect::<HashMap<_, _>>();
(
node_id,
json!({
"score": score,
"fields": fields,
}),
)
})
.collect::<Vec<_>>(),
)
}
fn get_rows_for_extra_keys( // if *is_fts_enabled {
&self, // if let Some(field) = self.tantivy_field_map.get_by_left(*key) {
tx: &MultiTransaction, // doc.insert(
keys: &[String], // field.clone(),
) -> Result<FieldMapping> { // OwnedValue::Str(new_value.get_str().unwrap().to_owned()),
let result = tx.run_script( // );
" // }
?[key, relation, field_name, type, is_fts_enabled] := // }
*fqkey_to_dbkey{key, relation, field_name, type, is_fts_enabled},
is_in(key, $keys)
",
btmap! {
"keys".to_owned() => DataValue::List(
keys.into_iter()
.map(|s| DataValue::from(s.as_str()))
.collect::<Vec<_>>()
),
},
)?;
AppState::rows_to_field_mapping(result) // (relation_field.to_owned(), new_value)
} // },
// )
// .collect::<BTreeMap<_, _>>();
fn rows_to_field_mapping(result: NamedRows) -> Result<FieldMapping> { // let keys = fields_mapping.keys().collect::<Vec<_>>();
let s = |s: &DataValue| s.get_str().unwrap().to_owned(); // let keys_joined = keys.iter().join(", ");
Ok( // if !keys.is_empty() {
result // let query = format!(
.rows // "
.into_iter() // ?[ node_id, {keys_joined} ] <- [$input_data]
.map(|row| { // :{action} {relation} {{ node_id, {keys_joined} }}
( // "
s(&row[0]), // );
FieldInfo {
relation_name: s(&row[1]), // let mut params = vec![];
relation_field: s(&row[2]), // params.push(DataValue::from(node_id.clone()));
r#type: s(&row[3]), // for key in keys {
is_fts_enabled: row[4].get_bool().unwrap(), // params.push(fields_mapping[key].clone());
}, // }
)
}) // let result = tx.run_script(
.collect::<HashMap<_, _>>(), // &query,
) // btmap! {
// "input_data".to_owned() => DataValue::List(params),
// },
// );
// }
}
if !node_has_keys.is_empty() {
let mut query =
QueryBuilder::new("INSERT INTO node_has_key (node_id, full_key) ");
query.push_values(node_has_keys, |mut b, key| {
b.push_bind(node_id).push_bind(key);
});
println!("Query: {:?}", query.sql());
query.build().execute(x).await?;
}
Ok(())
} }
} }
// impl AppState {
// pub async fn update_node() {}
// pub async fn search_nodes(
// &self,
// query: impl AsRef<str>,
// ) -> Result<Vec<(NodeId, Value)>> {
// let query = query.as_ref();
// let reader = self.tantivy_index.reader().into_diagnostic()?;
// let searcher = reader.searcher();
// let node_id_field = self
// .tantivy_field_map
// .get_by_left("node_id")
// .unwrap()
// .clone();
// let journal_page_field = self
// .tantivy_field_map
// .get_by_left("panorama/journal/page/content")
// .unwrap()
// .clone();
// let mut query_parser =
// QueryParser::for_index(&self.tantivy_index, vec![journal_page_field]);
// query_parser.set_field_fuzzy(journal_page_field, true, 2, true);
// let query = query_parser.parse_query(query).into_diagnostic()?;
// let top_docs = searcher
// .search(&query, &TopDocs::with_limit(10))
// .into_diagnostic()?;
// Ok(
// top_docs
// .into_iter()
// .map(|(score, doc_address)| {
// let retrieved_doc =
// searcher.doc::<TantivyDocument>(doc_address).unwrap();
// let node_id = retrieved_doc
// .get_first(node_id_field.clone())
// .unwrap()
// .as_str()
// .unwrap();
// let all_fields = retrieved_doc.get_sorted_field_values();
// let node_id = NodeId(Uuid::from_str(node_id).unwrap());
// let fields = all_fields
// .into_iter()
// .map(|(field, values)| {
// (
// self.tantivy_field_map.get_by_right(&field).unwrap(),
// if values.len() == 1 {
// owned_value_to_json_value(values[0])
// } else {
// Value::Array(
// values
// .into_iter()
// .map(owned_value_to_json_value)
// .collect_vec(),
// )
// },
// )
// })
// .collect::<HashMap<_, _>>();
// (
// node_id,
// json!({
// "score": score,
// "fields": fields,
// }),
// )
// })
// .collect::<Vec<_>>(),
// )
// }
// fn get_rows_for_extra_keys(
// &self,
// tx: &MultiTransaction,
// keys: &[String],
// ) -> Result<FieldMapping> {
// let result = tx.run_script(
// "
// ?[key, relation, field_name, type, is_fts_enabled] :=
// *fqkey_to_dbkey{key, relation, field_name, type, is_fts_enabled},
// is_in(key, $keys)
// ",
// btmap! {
// "keys".to_owned() => DataValue::List(
// keys.into_iter()
// .map(|s| DataValue::from(s.as_str()))
// .collect::<Vec<_>>()
// ),
// },
// )?;
// AppState::rows_to_field_mapping(result)
// }
// fn rows_to_field_mapping(result: NamedRows) -> Result<FieldMapping> {
// let s = |s: &DataValue| s.get_str().unwrap().to_owned();
// Ok(
// result
// .rows
// .into_iter()
// .map(|row| {
// (
// s(&row[0]),
// FieldInfo {
// relation_name: s(&row[1]),
// relation_field: s(&row[2]),
// r#type: s(&row[3]),
// is_fts_enabled: row[4].get_bool().unwrap(),
// },
// )
// })
// .collect::<HashMap<_, _>>(),
// )
// }
// }

View file

@ -0,0 +1,42 @@
use sqlx::{Executor, FromRow, Sqlite};
use crate::AppState;
#[derive(Debug, FromRow)]
pub struct FieldMappingRow {
pub full_key: String,
pub app_id: i64,
pub app_table_name: String,
pub app_table_field: String,
pub db_table_name: Option<String>,
}
impl AppState {
pub(crate) async fn get_related_field_list_for_node_id<'e, 'c: 'e, X>(
x: X,
node_id: &str,
) -> sqlx::Result<Vec<FieldMappingRow>>
where
X: 'e + Executor<'c, Database = Sqlite>,
{
sqlx::query_as!(
FieldMappingRow,
"
SELECT
node_has_key.full_key, key_mapping.app_id,
key_mapping.app_table_name, app_table_field,
app_table_mapping.db_table_name
FROM node_has_key
INNER JOIN key_mapping
ON node_has_key.full_key = key_mapping.full_key
INNER JOIN app_table_mapping
ON key_mapping.app_id = app_table_mapping.app_id
AND key_mapping.app_table_name = app_table_mapping.app_table_name
WHERE node_id = $1
",
node_id
)
.fetch_all(x)
.await
}
}

View file

@ -1,4 +1,3 @@
use cozo::{DataValue, Num};
use itertools::Itertools; use itertools::Itertools;
use serde_json::{Number, Value}; use serde_json::{Number, Value};
use tantivy::schema::OwnedValue; use tantivy::schema::OwnedValue;
@ -30,28 +29,31 @@ pub fn owned_value_to_json_value(data_value: &OwnedValue) -> Value {
} }
} }
pub fn data_value_to_json_value(data_value: &DataValue) -> Value { // pub fn data_value_to_json_value(data_value: &DataValue) -> Value {
match data_value { // match data_value {
DataValue::Null => Value::Null, // DataValue::Null => Value::Null,
DataValue::Bool(b) => Value::Bool(*b), // DataValue::Bool(b) => Value::Bool(*b),
DataValue::Num(n) => Value::Number(match n { // DataValue::Num(n) => Value::Number(match n {
Num::Int(i) => Number::from(*i), // Num::Int(i) => Number::from(*i),
Num::Float(f) => Number::from_f64(*f).unwrap(), // Num::Float(f) => Number::from_f64(*f).unwrap(),
}), // }),
DataValue::Str(s) => Value::String(s.to_string()), // DataValue::Str(s) => Value::String(s.to_string()),
DataValue::List(v) => { // DataValue::List(v) => {
Value::Array(v.into_iter().map(data_value_to_json_value).collect_vec()) // Value::Array(v.into_iter().map(data_value_to_json_value).collect_vec())
} // }
DataValue::Json(v) => v.0.clone(), // DataValue::Json(v) => v.0.clone(),
_ => { // DataValue::Bytes(s) => {
println!("Converting unknown {:?}", data_value); // Value::String(String::from_utf8_lossy(s).to_string())
serde_json::to_value(data_value).unwrap() // }
} // DataValue::Bytes(s) => todo!(), // _ => {
// DataValue::Uuid(_) => todo!(), // println!("Converting unknown {:?}", data_value);
// DataValue::Regex(_) => todo!(), // serde_json::to_value(data_value).unwrap()
// DataValue::Set(_) => todo!(), // } // DataValue::Bytes(s) => todo!(),
// DataValue::Vec(_) => todo!(), // // DataValue::Uuid(_) => todo!(),
// DataValue::Validity(_) => todo!(), // // DataValue::Regex(_) => todo!(),
// DataValue::Bot => todo!(), // // DataValue::Set(_) => todo!(),
} // // DataValue::Vec(_) => todo!(),
} // // DataValue::Validity(_) => todo!(),
// // DataValue::Bot => todo!(),
// }
// }

View file

@ -1,21 +1,26 @@
use cozo::DbInstance; use anyhow::Result;
use itertools::Itertools; use sqlx::SqlitePool;
use miette::Result;
use tantivy::Index; use tantivy::Index;
use crate::{migrations::run_migrations, state::tantivy_schema, AppState}; use crate::{
migrations::MIGRATOR,
state::{node::CreateOrUpdate, tantivy_schema},
AppState,
};
pub async fn test_state() -> Result<AppState> { pub async fn test_state() -> Result<AppState> {
let db = DbInstance::new("mem", "", "")?; let db = SqlitePool::connect(":memory:").await?;
let (schema, tantivy_field_map) = tantivy_schema(); let (schema, tantivy_field_map) = tantivy_schema();
let tantivy_index = Index::create_in_ram(schema); let tantivy_index = Index::create_in_ram(schema);
MIGRATOR.run(&db).await?;
let state = AppState { let state = AppState {
db, db,
tantivy_index, tantivy_index,
tantivy_field_map, tantivy_field_map,
app_routes: Default::default(),
app_wasm_modules: Default::default(),
}; };
run_migrations(&state.db).await?;
Ok(state) Ok(state)
} }
@ -26,8 +31,9 @@ pub async fn test_create_node() -> Result<()> {
let node_info = state let node_info = state
.create_or_update_node( .create_or_update_node(
None, CreateOrUpdate::Create {
"panorama/journal/page", r#type: "panorama/journal/page".to_string(),
},
Some(btmap! { Some(btmap! {
"panorama/journal/page/content".to_owned() => json!("helloge"), "panorama/journal/page/content".to_owned() => json!("helloge"),
}), }),
@ -49,19 +55,33 @@ pub async fn test_full_text_search() -> Result<()> {
let node_info = state let node_info = state
.create_or_update_node( .create_or_update_node(
"panorama/journal/page", CreateOrUpdate::Create {
r#type: "panorama/journal/page".to_string(),
},
Some(btmap! { Some(btmap! {
"panorama/journal/page/content".to_owned() => json!("Hello, world!"), "panorama/journal/page/content".to_owned() => json!("Hello, world!"),
}), }),
) )
.await?; .await?;
let results = state.search_nodes("world").await?; todo!();
// let results = state.search_nodes("world").await?;
assert!(results // assert!(results
.into_iter() // .into_iter()
.map(|entry| entry.0) // .map(|entry| entry.0)
.contains(&node_info.node_id)); // .contains(&node_info.node_id));
Ok(())
}
#[tokio::test]
pub async fn test_install_apps() -> Result<()> {
let state = test_state().await?;
state.install_apps_from_search_paths().await?;
todo!();
Ok(()) Ok(())
} }

View file

@ -9,13 +9,14 @@ edition = "2021"
anyhow = "1.0.86" anyhow = "1.0.86"
axum = "0.7.5" axum = "0.7.5"
chrono = { version = "0.4.38", features = ["serde"] } chrono = { version = "0.4.38", features = ["serde"] }
cozo = { version = "0.7.6", features = ["storage-rocksdb"] } clap = { version = "4.5.7", features = ["derive"] }
# cozo = { version = "0.7.6", features = ["storage-rocksdb"] }
csv = "1.3.0" csv = "1.3.0"
dirs = "5.0.1" dirs = "5.0.1"
futures = "0.3.30" futures = "0.3.30"
itertools = "0.13.0" itertools = "0.13.0"
miette = { version = "5.5.0", features = ["fancy", "backtrace"] }
panorama-core = { path = "../panorama-core" } panorama-core = { path = "../panorama-core" }
schemars = "0.8.21"
serde = { version = "1.0.202", features = ["derive"] } serde = { version = "1.0.202", features = ["derive"] }
serde_json = "1.0.117" serde_json = "1.0.117"
sugars = "3.0.1" sugars = "3.0.1"
@ -34,10 +35,6 @@ features = ["axum_extras", "time", "uuid", "chrono", "yaml"]
git = "https://github.com/juhaku/utoipa" git = "https://github.com/juhaku/utoipa"
features = ["axum"] features = ["axum"]
[dependencies.utoipa-swagger-ui]
git = "https://github.com/juhaku/utoipa"
features = ["axum"]
[dependencies.async-imap] [dependencies.async-imap]
version = "0.9.7" version = "0.9.7"
default-features = false default-features = false

View file

@ -0,0 +1,15 @@
use axum::{
routing::{method_routing, MethodFilter},
Router,
};
use panorama_core::AppState;
use utoipa::OpenApi;
#[derive(OpenApi)]
#[openapi(paths(), components(schemas()))]
pub(super) struct AppsApi;
pub(super) fn router() -> Router<AppState> {
Router::new()
// .route("/app/:id/*path", method_routing::any(handler))
}

View file

View file

@ -7,7 +7,7 @@ pub type AppResult<T, E = AppError> = std::result::Result<T, E>;
// Make our own error that wraps `anyhow::Error`. // Make our own error that wraps `anyhow::Error`.
#[derive(Debug)] #[derive(Debug)]
pub struct AppError(miette::Report); pub struct AppError(anyhow::Error);
// Tell axum how to convert `AppError` into a response. // Tell axum how to convert `AppError` into a response.
impl IntoResponse for AppError { impl IntoResponse for AppError {
@ -26,7 +26,7 @@ impl IntoResponse for AppError {
// `Result<_, AppError>`. That way you don't need to do that manually. // `Result<_, AppError>`. That way you don't need to do that manually.
impl<E> From<E> for AppError impl<E> From<E> for AppError
where where
E: Into<miette::Report>, E: Into<anyhow::Error>,
{ {
fn from(err: E) -> Self { fn from(err: E) -> Self {
Self(err.into()) Self(err.into())

View file

@ -1,25 +0,0 @@
use std::{
fs::{self, File},
path::PathBuf,
};
use axum::{extract::State, Json};
use miette::IntoDiagnostic;
use serde_json::Value;
use crate::{error::AppResult, AppState};
// This code is really bad but gives me a quick way to look at all of the data
// in the data at once. Rip this out once there's any Real Security Mechanism.
pub async fn export(State(state): State<AppState>) -> AppResult<Json<Value>> {
let export = state.export().await?;
let base_dir = PathBuf::from("export");
fs::create_dir_all(&base_dir).into_diagnostic()?;
let file = File::create(base_dir.join("export.json")).into_diagnostic()?;
serde_json::to_writer_pretty(file, &export).into_diagnostic()?;
Ok(Json(export))
}

View file

@ -1,34 +1,31 @@
use axum::{extract::State, routing::get, Json, Router}; use axum::Router;
use chrono::Local;
use cozo::ScriptMutability;
use serde_json::Value;
use utoipa::OpenApi; use utoipa::OpenApi;
use uuid::Uuid;
use crate::{error::AppResult, AppState}; use crate::AppState;
/// Node API /// Node API
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi(paths(get_todays_journal_id), components(schemas()))] #[openapi(paths(), components(schemas()))]
pub(super) struct JournalApi; pub(super) struct JournalApi;
pub(super) fn router() -> Router<AppState> { pub(super) fn router() -> Router<AppState> {
Router::new().route("/get_todays_journal_id", get(get_todays_journal_id)) Router::new()
// .route("/get_todays_journal_id", get(get_todays_journal_id))
} }
#[utoipa::path( // #[utoipa::path(
get, // get,
path = "/get_todays_journal_id", // path = "/get_todays_journal_id",
responses( // responses(
(status = 200), // (status = 200),
), // ),
)] // )]
pub async fn get_todays_journal_id( // pub async fn get_todays_journal_id(
State(state): State<AppState>, // State(state): State<AppState>,
) -> AppResult<Json<Value>> { // ) -> AppResult<Json<Value>> {
let node_id = state.get_todays_journal_id().await?; // let node_id = state.get_todays_journal_id().await?;
Ok(Json(json!({ // Ok(Json(json!({
"node_id": node_id.to_string(), // "node_id": node_id.to_string(),
}))) // })))
} // }

View file

@ -7,16 +7,16 @@ extern crate serde_json;
#[macro_use] #[macro_use]
extern crate sugars; extern crate sugars;
pub mod apps;
mod error; mod error;
mod export;
mod journal; mod journal;
pub mod mail; pub mod mail;
mod node; mod node;
use std::fs; use std::fs;
use anyhow::Result;
use axum::{http::Method, routing::get, Router}; use axum::{http::Method, routing::get, Router};
use miette::{IntoDiagnostic, Result};
use panorama_core::AppState; use panorama_core::AppState;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tower::ServiceBuilder; use tower::ServiceBuilder;
@ -27,12 +27,6 @@ use tower_http::{
use utoipa::OpenApi; use utoipa::OpenApi;
use utoipa_scalar::{Scalar, Servable}; use utoipa_scalar::{Scalar, Servable};
use crate::{
export::export,
mail::{get_mail, get_mail_config},
node::search_nodes,
};
pub async fn run() -> Result<()> { pub async fn run() -> Result<()> {
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi( #[openapi(
@ -46,10 +40,12 @@ pub async fn run() -> Result<()> {
let data_dir = dirs::data_dir().unwrap(); let data_dir = dirs::data_dir().unwrap();
let panorama_dir = data_dir.join("panorama"); let panorama_dir = data_dir.join("panorama");
fs::create_dir_all(&panorama_dir).into_diagnostic()?; fs::create_dir_all(&panorama_dir)?;
let state = AppState::new(&panorama_dir).await?; let state = AppState::new(&panorama_dir).await?;
state.install_apps_from_search_paths().await?;
let cors_layer = CorsLayer::new() let cors_layer = CorsLayer::new()
.allow_methods([Method::GET, Method::POST, Method::PUT]) .allow_methods([Method::GET, Method::POST, Method::PUT])
.allow_headers(cors::Any) .allow_headers(cors::Any)
@ -61,18 +57,17 @@ pub async fn run() -> Result<()> {
let app = Router::new() let app = Router::new()
.merge(Scalar::with_url("/api/docs", ApiDoc::openapi())) .merge(Scalar::with_url("/api/docs", ApiDoc::openapi()))
.route("/", get(|| async { "Hello, World!" })) .route("/", get(|| async { "Hello, World!" }))
.route("/export", get(export))
.nest("/node", node::router().with_state(state.clone())) .nest("/node", node::router().with_state(state.clone()))
.nest("/journal", journal::router().with_state(state.clone())) .nest("/journal", journal::router().with_state(state.clone()))
.route("/mail/config", get(get_mail_config)) // .route("/mail/config", get(get_mail_config))
.route("/mail", get(get_mail)) // .route("/mail", get(get_mail))
.layer(ServiceBuilder::new().layer(cors_layer)) .layer(ServiceBuilder::new().layer(cors_layer))
.layer(ServiceBuilder::new().layer(trace_layer)) .layer(ServiceBuilder::new().layer(trace_layer))
.with_state(state.clone()); .with_state(state.clone());
let listener = TcpListener::bind("0.0.0.0:5195").await.into_diagnostic()?; let listener = TcpListener::bind("0.0.0.0:5195").await?;
println!("Listening... {:?}", listener); println!("Listening... {:?}", listener);
axum::serve(listener, app).await.into_diagnostic()?; axum::serve(listener, app).await?;
Ok(()) Ok(())
} }

View file

@ -1,54 +1,47 @@
use axum::{extract::State, Json}; // pub async fn get_mail_config(
use cozo::ScriptMutability; // State(state): State<AppState>,
use panorama_core::AppState; // ) -> AppResult<Json<Value>> {
use serde_json::Value; // let configs = state.fetch_mail_configs()?;
// Ok(Json(json!({ "configs": configs })))
// }
use crate::error::AppResult; // pub async fn get_mail(State(state): State<AppState>) -> AppResult<Json<Value>> {
// let mailboxes = state.db.run_script("
// ?[node_id, account_node_id, mailbox_name] := *mailbox {node_id, account_node_id, mailbox_name}
// ", Default::default(), ScriptMutability::Immutable)?;
pub async fn get_mail_config( // let mailboxes = mailboxes
State(state): State<AppState>, // .rows
) -> AppResult<Json<Value>> { // .iter()
let configs = state.fetch_mail_configs()?; // .map(|mb| {
Ok(Json(json!({ "configs": configs }))) // json!({
} // "node_id": mb[0].get_str().unwrap(),
// "account_node_id": mb[1].get_str().unwrap(),
// "mailbox_name": mb[2].get_str().unwrap(),
// })
// })
// .collect::<Vec<_>>();
pub async fn get_mail(State(state): State<AppState>) -> AppResult<Json<Value>> { // let messages = state.db.run_script("
let mailboxes = state.db.run_script(" // ?[node_id, subject, body, internal_date] := *message {node_id, subject, body, internal_date}
?[node_id, account_node_id, mailbox_name] := *mailbox {node_id, account_node_id, mailbox_name} // :limit 10
", Default::default(), ScriptMutability::Immutable)?; // ", Default::default(), ScriptMutability::Immutable)?;
let mailboxes = mailboxes // let messages = messages
.rows // .rows
.iter() // .iter()
.map(|mb| { // .map(|m| {
json!({ // json!({
"node_id": mb[0].get_str().unwrap(), // "node_id": m[0].get_str().unwrap(),
"account_node_id": mb[1].get_str().unwrap(), // "subject": m[1].get_str().unwrap(),
"mailbox_name": mb[2].get_str().unwrap(), // "body": m[2].get_str(),
}) // "internal_date": m[3].get_str().unwrap(),
}) // })
.collect::<Vec<_>>(); // })
// .collect::<Vec<_>>();
let messages = state.db.run_script(" // Ok(Json(json!({
?[node_id, subject, body, internal_date] := *message {node_id, subject, body, internal_date} // "mailboxes": mailboxes,
:limit 10 // "messages": messages,
", Default::default(), ScriptMutability::Immutable)?; // })))
// }
let messages = messages
.rows
.iter()
.map(|m| {
json!({
"node_id": m[0].get_str().unwrap(),
"subject": m[1].get_str().unwrap(),
"body": m[2].get_str(),
"internal_date": m[3].get_str().unwrap(),
})
})
.collect::<Vec<_>>();
Ok(Json(json!({
"mailboxes": mailboxes,
"messages": messages,
})))
}

View file

@ -1,8 +1,32 @@
use miette::Result; use anyhow::Result;
use clap::{Parser, Subcommand};
use panorama_core::state::appsv0::manifest::AppManifest;
use schemars::schema_for;
#[derive(Debug, Parser)]
struct Opt {
#[clap(subcommand)]
command: Option<Command>,
}
#[derive(Debug, Subcommand)]
enum Command {
GenerateConfigSchema,
}
#[tokio::main] #[tokio::main]
async fn main() -> Result<()> { async fn main() -> Result<()> {
let opt = Opt::parse();
tracing_subscriber::fmt::init(); tracing_subscriber::fmt::init();
panorama_daemon::run().await?;
match opt.command {
Some(Command::GenerateConfigSchema) => {
let schema = schema_for!(AppManifest);
println!("{}", serde_json::to_string_pretty(&schema).unwrap());
}
None => panorama_daemon::run().await?,
}
Ok(()) Ok(())
} }

View file

@ -1,196 +1,173 @@
use std::{ use axum::Router;
collections::{BTreeMap, HashMap}, use utoipa::OpenApi;
str::FromStr,
};
use axum::{ use crate::AppState;
extract::{Path, Query, State},
http::StatusCode,
routing::{get, post, put},
Json, Router,
};
use chrono::{DateTime, Utc};
use cozo::{DataValue, MultiTransaction};
use itertools::Itertools;
use miette::IntoDiagnostic;
use panorama_core::{
state::node::{CreateOrUpdate, ExtraData},
NodeId,
};
use serde_json::Value;
use utoipa::{OpenApi, ToSchema};
use uuid::Uuid;
use crate::{error::AppResult, AppState};
/// Node API /// Node API
#[derive(OpenApi)] #[derive(OpenApi)]
#[openapi( #[openapi(paths(), components(schemas()))]
paths(get_node, update_node, create_node),
components(schemas(GetNodeResult))
)]
pub(super) struct NodeApi; pub(super) struct NodeApi;
pub(super) fn router() -> Router<AppState> { pub(super) fn router() -> Router<AppState> {
Router::new() Router::new()
.route("/", put(create_node)) // .route("/", put(create_node))
.route("/:id", get(get_node)) // .route("/:id", get(get_node))
.route("/:id", post(update_node)) // .route("/:id", post(update_node))
.route("/search", get(search_nodes)) // .route("/search", get(search_nodes))
} }
#[derive(Serialize, Deserialize, ToSchema, Clone)] // #[derive(Serialize, Deserialize, ToSchema, Clone)]
struct GetNodeResult { // struct GetNodeResult {
node_id: String, // node_id: String,
fields: HashMap<String, Value>, // fields: HashMap<String, Value>,
created_at: DateTime<Utc>, // created_at: DateTime<Utc>,
updated_at: DateTime<Utc>, // updated_at: DateTime<Utc>,
} // }
/// Get node info // /// Get node info
/// // ///
/// This endpoint retrieves all the fields for a particular node // /// This endpoint retrieves all the fields for a particular node
#[utoipa::path( // #[utoipa::path(
get, // get,
path = "/{id}", // path = "/{id}",
responses( // responses(
(status = 200, body = [GetNodeResult]), // (status = 200, body = [GetNodeResult]),
(status = 404, description = "the node ID provided was not found") // (status = 404, description = "the node ID provided was not found")
), // ),
params( // params(
("id" = String, Path, description = "Node ID"), // ("id" = String, Path, description = "Node ID"),
), // ),
)] // )]
pub async fn get_node( // pub async fn get_node(
State(state): State<AppState>, // State(state): State<AppState>,
Path(node_id): Path<String>, // Path(node_id): Path<String>,
) -> AppResult<(StatusCode, Json<Value>)> { // ) -> AppResult<(StatusCode, Json<Value>)> {
let node_info = state.get_node(&node_id).await?; // let node_info = state.get_node(&node_id).await?;
Ok(( // Ok((
StatusCode::OK, // StatusCode::OK,
Json(json!({ // Json(json!({
"node_id": node_id, // "node_id": node_id,
"fields": node_info.fields, // "fields": node_info.fields,
"created_at": node_info.created_at, // "created_at": node_info.created_at,
"updated_at": node_info.updated_at, // "updated_at": node_info.updated_at,
})), // })),
)) // ))
} // }
#[derive(Deserialize, Debug)] // #[derive(Deserialize, Debug)]
pub struct UpdateData { // pub struct UpdateData {
extra_data: Option<ExtraData>, // extra_data: Option<ExtraData>,
} // }
/// Update node info // /// Update node info
#[utoipa::path( // #[utoipa::path(
post, // post,
path = "/{id}", // path = "/{id}",
responses( // responses(
(status = 200) // (status = 200)
), // ),
params( // params(
("id" = String, Path, description = "Node ID"), // ("id" = String, Path, description = "Node ID"),
) // )
)] // )]
pub async fn update_node( // pub async fn update_node(
State(state): State<AppState>, // State(state): State<AppState>,
Path(node_id): Path<String>, // Path(node_id): Path<String>,
Json(opts): Json<UpdateData>, // Json(opts): Json<UpdateData>,
) -> AppResult<Json<Value>> { // ) -> AppResult<Json<Value>> {
let node_id = NodeId(Uuid::from_str(&node_id).into_diagnostic()?); // let node_id = NodeId(Uuid::from_str(&node_id).into_diagnostic()?);
let node_info = state // let node_info = state
.create_or_update_node(CreateOrUpdate::Update { node_id }, opts.extra_data) // .create_or_update_node(CreateOrUpdate::Update { node_id }, opts.extra_data)
.await?; // .await?;
Ok(Json(json!({ // Ok(Json(json!({
"node_id": node_info.node_id.to_string(), // "node_id": node_info.node_id.to_string(),
}))) // })))
} // }
#[derive(Debug, Deserialize)] // #[derive(Debug, Deserialize)]
pub struct CreateNodeOpts { // pub struct CreateNodeOpts {
// TODO: Allow submitting a string // // TODO: Allow submitting a string
// id: Option<String>, // // id: Option<String>,
#[serde(rename = "type")] // #[serde(rename = "type")]
ty: String, // ty: String,
extra_data: Option<ExtraData>, // extra_data: Option<ExtraData>,
} // }
#[utoipa::path( // #[utoipa::path(
put, // put,
path = "/", // path = "/",
responses((status = 200)), // responses((status = 200)),
)] // )]
pub async fn create_node( // pub async fn create_node(
State(state): State<AppState>, // State(state): State<AppState>,
Json(opts): Json<CreateNodeOpts>, // Json(opts): Json<CreateNodeOpts>,
) -> AppResult<Json<Value>> { // ) -> AppResult<Json<Value>> {
let node_info = state // let node_info = state
.create_or_update_node( // .create_or_update_node(
CreateOrUpdate::Create { r#type: opts.ty }, // CreateOrUpdate::Create { r#type: opts.ty },
opts.extra_data, // opts.extra_data,
) // )
.await?; // .await?;
Ok(Json(json!({ // Ok(Json(json!({
"node_id": node_info.node_id.to_string(), // "node_id": node_info.node_id.to_string(),
}))) // })))
} // }
#[derive(Deserialize)] // #[derive(Deserialize)]
pub struct SearchQuery { // pub struct SearchQuery {
query: String, // query: String,
} // }
#[utoipa::path( // #[utoipa::path(
get, // get,
path = "/search", // path = "/search",
responses((status = 200)), // responses((status = 200)),
)] // )]
pub async fn search_nodes( // pub async fn search_nodes(
State(state): State<AppState>, // State(state): State<AppState>,
Query(query): Query<SearchQuery>, // Query(query): Query<SearchQuery>,
) -> AppResult<Json<Value>> { // ) -> AppResult<Json<Value>> {
let search_result = state.search_nodes(query.query).await?; // let search_result = state.search_nodes(query.query).await?;
let search_result = search_result // let search_result = search_result
.into_iter() // .into_iter()
.map(|(id, value)| value["fields"].clone()) // .map(|(id, value)| value["fields"].clone())
.collect_vec(); // .collect_vec();
Ok(Json(json!({ // Ok(Json(json!({
"results": search_result, // "results": search_result,
}))) // })))
} // }
fn get_rows_for_extra_keys( // fn get_rows_for_extra_keys(
tx: &MultiTransaction, // tx: &MultiTransaction,
extra_data: &ExtraData, // extra_data: &ExtraData,
) -> AppResult<HashMap<String, (String, String, String)>> { // ) -> AppResult<HashMap<String, (String, String, String)>> {
let result = tx.run_script( // let result = tx.run_script(
" // "
?[key, relation, field_name, type] := // ?[key, relation, field_name, type] :=
*fqkey_to_dbkey{key, relation, field_name, type}, // *fqkey_to_dbkey{key, relation, field_name, type},
is_in(key, $keys) // is_in(key, $keys)
", // ",
btmap! { // btmap! {
"keys".to_owned() => DataValue::List( // "keys".to_owned() => DataValue::List(
extra_data // extra_data
.keys() // .keys()
.map(|s| DataValue::from(s.as_str())) // .map(|s| DataValue::from(s.as_str()))
.collect::<Vec<_>>() // .collect::<Vec<_>>()
), // ),
}, // },
)?; // )?;
let s = |s: &DataValue| s.get_str().unwrap().to_owned(); // let s = |s: &DataValue| s.get_str().unwrap().to_owned();
Ok( // Ok(
result // result
.rows // .rows
.into_iter() // .into_iter()
.map(|row| (s(&row[0]), (s(&row[1]), s(&row[2]), s(&row[3])))) // .map(|row| (s(&row[0]), (s(&row[1]), s(&row[2]), s(&row[3]))))
.collect::<HashMap<_, _>>(), // .collect::<HashMap<_, _>>(),
) // )
} // }

View file

@ -0,0 +1,6 @@
[package]
name = "panorama-sync"
version = "0.1.0"
edition = "2021"
[dependencies]

View file

@ -0,0 +1 @@

1
docs/.gitignore vendored
View file

@ -1 +1,2 @@
book book
src/generated

View file

@ -3,4 +3,6 @@
- [Front](./front.md) - [Front](./front.md)
- [Nodes](./nodes.md) - [Nodes](./nodes.md)
- [Custom Apps](./custom_apps.md) - [Custom Apps](./custom_apps.md)
- [Sync](./sync.md) - [Sync](./sync.md)
- [Dream](./dream.md)
- [Comparison](./comparison.md)

8
docs/src/comparison.md Normal file
View file

@ -0,0 +1,8 @@
# Comparison
From anytype:
- Knowledgeable about clients
- Custom apps by third parties
From logseq:

View file

@ -16,7 +16,7 @@ After this rolls out, most of the built-in panorama apps will also be converted
To develop a custom app, you will need to provide: To develop a custom app, you will need to provide:
- -
App metadata. This contains: App metadata in a `manifest.yml`. This contains:
- App display name. - App display name.
- Version + License. - Version + License.
@ -35,13 +35,9 @@ To develop a custom app, you will need to provide:
- none: the app isn't allowed to write to the specified field - none: the app isn't allowed to write to the specified field
- -
A list of relations your app will use. List of endpoints and triggers, along with their handlers.
For example, the journal app will use `journal` for keeping track of regular pages, but may use another relation `journal_day` for keeping track of mapping days to journals. (**TODO:** not a good example, these could be combined) The handlers take the form `
The indexes for the relations should also be listed.
-
A list of services your app will run in the background.
## App ownership of nodes ## App ownership of nodes
@ -65,6 +61,4 @@ Apps automatically own nodes they create.
### Mail ### Mail
### Calendar ### Codetrack
### Contacts

50
docs/src/dream.md Normal file
View file

@ -0,0 +1,50 @@
# Dream
## Custom Apps List
- File Backup
- Object storage
- Archivebox like system, bookmarking
- Journal
- Block-based editor
- Embed any node type into journal
- Food
- Recipe tracker
- Grocery list (adds to my todo list)
- Meal planner
- Food blogging
- Health+Fitness
- Running progress (incl. saving GPS waypoints)
- Workout log for various workouts
- Weight tracking
- Connect to smartwatch?
- Pictures
- Face recognition
- Map view
- Coding
- Code tracking like Wakatime
- Git forge???
- Calendar
- Calendly-like appointment booking system
- Social
- Store people into people app
- Email+matrix chat
- Video conferencing?
- Feed readers / RSS
- Media
- Music and video hosting / streaming i.e Navidrome
- Money tracking
- Education
- Anki flashcards
- Canvas???
- Dashboards
# Features
- Graph view
- Instantly publish anything
- Notifications
- Full text+OCR search
- IFTTT workflows
- Multiuser
- Google docs like interface for docs / typst

View file

@ -1,6 +1,6 @@
# Panorama # Panorama
Panorama is a personal information manager. It relies on [Cozo](https://cozodb.org) as its primary data backend. Panorama is a personal information manager.
- [Repository](https://git.mzhang.io/michael/panorama) - [Repository](https://git.mzhang.io/michael/panorama)
- [Issues](https://git.mzhang.io/michael/panorama/issues) - [Issues](https://git.mzhang.io/michael/panorama/issues)

98
flake.lock Normal file
View file

@ -0,0 +1,98 @@
{
"nodes": {
"fenix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
],
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1719469637,
"narHash": "sha256-cOA40mIqjIIf+mCdtuglxdP/0to1LDL1Lkef7vqVykc=",
"owner": "nix-community",
"repo": "fenix",
"rev": "3374c72204714eb979719e77a1856009584ba4d7",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "fenix",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"id": "flake-utils",
"type": "indirect"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1719554759,
"narHash": "sha256-B64IsJMis4A9dePPOKi2T5EEs9AJWfsvkMKSh9/NANs=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "44677ecde6c8a7a7e32f9a2709c316975bf89a60",
"type": "github"
},
"original": {
"owner": "nixos",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"fenix": "fenix",
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1719378198,
"narHash": "sha256-c1jWpdPlZyL6/a0pWa30680ivP7nMLNBPuz5hMGoifg=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "b33a0cae335b85e11a700df2d9a7c0006a3b80ec",
"type": "github"
},
"original": {
"owner": "rust-lang",
"ref": "nightly",
"repo": "rust-analyzer",
"type": "github"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

42
flake.nix Normal file
View file

@ -0,0 +1,42 @@
{
inputs = {
nixpkgs.url = "github:nixos/nixpkgs";
fenix = {
url = "github:nix-community/fenix";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, flake-utils, fenix }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
overlays = [ fenix.overlays.default ];
};
toolchain = pkgs.fenix.stable;
flakePkgs = {
#markout = pkgs.callPackage ./. { inherit toolchain; };
};
in rec {
packages = flake-utils.lib.flattenTree flakePkgs;
devShell = pkgs.mkShell {
inputsFrom = with packages;
[
#markout
];
packages = (with pkgs; [
cargo-watch
cargo-deny
cargo-edit
corepack
nodejs_20
sqlx-cli
go
]) ++ (with toolchain; [ cargo rustc rustfmt clippy ]);
};
});
}

View file

@ -142,6 +142,12 @@ importers:
specifier: ^5.0.0 specifier: ^5.0.0
version: 5.2.11(sass@1.77.2) version: 5.2.11(sass@1.77.2)
apps/journal:
devDependencies:
assemblyscript:
specifier: ^0.27.27
version: 0.27.27
packages: packages:
'@ampproject/remapping@2.3.0': '@ampproject/remapping@2.3.0':
@ -893,6 +899,11 @@ packages:
resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==}
engines: {node: '>= 8'} engines: {node: '>= 8'}
assemblyscript@0.27.27:
resolution: {integrity: sha512-z4ijXsjjk3uespEeCWpO1K2GQySc6bn+LL5dL0tsC2VXNYKFnKDmAh3wefcKazxXHFVhYlxqNfyv96ajaQyINQ==}
engines: {node: '>=16', npm: '>=7'}
hasBin: true
babel-plugin-macros@3.1.0: babel-plugin-macros@3.1.0:
resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==} resolution: {integrity: sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==}
engines: {node: '>=10', npm: '>=6'} engines: {node: '>=10', npm: '>=6'}
@ -907,6 +918,10 @@ packages:
resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==}
engines: {node: '>=8'} engines: {node: '>=8'}
binaryen@116.0.0-nightly.20240114:
resolution: {integrity: sha512-0GZrojJnuhoe+hiwji7QFaL3tBlJoA+KFUN7ouYSDGZLSo9CKM8swQX8n/UcbR0d1VuZKU+nhogNzv423JEu5A==}
hasBin: true
boolbase@1.0.0: boolbase@1.0.0:
resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==}
@ -1303,6 +1318,9 @@ packages:
lodash@4.17.21: lodash@4.17.21:
resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==}
long@5.2.3:
resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==}
longest-streak@3.1.0: longest-streak@3.1.0:
resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==}
@ -2585,6 +2603,11 @@ snapshots:
normalize-path: 3.0.0 normalize-path: 3.0.0
picomatch: 2.3.1 picomatch: 2.3.1
assemblyscript@0.27.27:
dependencies:
binaryen: 116.0.0-nightly.20240114
long: 5.2.3
babel-plugin-macros@3.1.0: babel-plugin-macros@3.1.0:
dependencies: dependencies:
'@babel/runtime': 7.24.6 '@babel/runtime': 7.24.6
@ -2597,6 +2620,8 @@ snapshots:
binary-extensions@2.3.0: {} binary-extensions@2.3.0: {}
binaryen@116.0.0-nightly.20240114: {}
boolbase@1.0.0: {} boolbase@1.0.0: {}
braces@3.0.3: braces@3.0.3:
@ -3081,6 +3106,8 @@ snapshots:
lodash@4.17.21: {} lodash@4.17.21: {}
long@5.2.3: {}
longest-streak@3.1.0: {} longest-streak@3.1.0: {}
loose-envify@1.4.0: loose-envify@1.4.0:

View file

View file

@ -1,5 +1,5 @@
{ {
"name": "panorama-app", "name": "panorama",
"version": "0.1.0", "version": "0.1.0",
"type": "module", "type": "module",
"scripts": { "scripts": {

View file

Before

Width:  |  Height:  |  Size: 2.5 KiB

After

Width:  |  Height:  |  Size: 2.5 KiB

View file

Before

Width:  |  Height:  |  Size: 1.5 KiB

After

Width:  |  Height:  |  Size: 1.5 KiB

View file

Before

Width:  |  Height:  |  Size: 5.9 KiB

After

Width:  |  Height:  |  Size: 5.9 KiB

View file

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View file

Before

Width:  |  Height:  |  Size: 1 KiB

After

Width:  |  Height:  |  Size: 1 KiB

View file

Before

Width:  |  Height:  |  Size: 4.5 KiB

After

Width:  |  Height:  |  Size: 4.5 KiB

View file

Before

Width:  |  Height:  |  Size: 6.8 KiB

After

Width:  |  Height:  |  Size: 6.8 KiB

View file

Before

Width:  |  Height:  |  Size: 6.9 KiB

After

Width:  |  Height:  |  Size: 6.9 KiB

View file

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 20 KiB

View file

Before

Width:  |  Height:  |  Size: 965 B

After

Width:  |  Height:  |  Size: 965 B

View file

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 22 KiB

View file

Before

Width:  |  Height:  |  Size: 1.4 KiB

After

Width:  |  Height:  |  Size: 1.4 KiB

View file

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 2.6 KiB

View file

Before

Width:  |  Height:  |  Size: 3.4 KiB

After

Width:  |  Height:  |  Size: 3.4 KiB

View file

Before

Width:  |  Height:  |  Size: 1.6 KiB

After

Width:  |  Height:  |  Size: 1.6 KiB

View file

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 24 KiB

View file

Before

Width:  |  Height:  |  Size: 50 KiB

After

Width:  |  Height:  |  Size: 50 KiB

View file

@ -25,6 +25,7 @@ async fn main() {
Some(Command::Daemon) => { Some(Command::Daemon) => {
panorama_daemon::run().await; panorama_daemon::run().await;
} }
None => { None => {
if !opt.no_embedded_daemon { if !opt.no_embedded_daemon {
tokio::spawn(panorama_daemon::run()); tokio::spawn(panorama_daemon::run());

View file

Before

Width:  |  Height:  |  Size: 4 KiB

After

Width:  |  Height:  |  Size: 4 KiB

View file

@ -1,20 +1,18 @@
import styles from "./SearchBar.module.scss"; import styles from "./SearchBar.module.scss";
import { import {
FloatingFocusManager,
FloatingOverlay, FloatingOverlay,
FloatingPortal, FloatingPortal,
autoUpdate, autoUpdate,
offset, offset,
useClick,
useDismiss, useDismiss,
useFloating, useFloating,
useFocus, useFocus,
useInteractions, useInteractions,
} from "@floating-ui/react"; } from "@floating-ui/react";
import { useDebounce } from "use-debounce"; import { useCallback, useEffect, useState } from "react";
import { useEffect, useState } from "react";
import { atom, useAtom, useSetAtom } from "jotai"; import { atom, useAtom, useSetAtom } from "jotai";
import { useNodeControls } from "../App"; import { useNodeControls } from "../App";
import { useDebounce, useDebouncedCallback } from "use-debounce";
const searchQueryAtom = atom(""); const searchQueryAtom = atom("");
const showMenuAtom = atom(false); const showMenuAtom = atom(false);
@ -37,8 +35,7 @@ export default function SearchBar() {
useDismiss(context), useDismiss(context),
]); ]);
useEffect(() => { const performSearch = useCallback(() => {
setSearchResults([]);
const trimmed = searchQuery.trim(); const trimmed = searchQuery.trim();
if (trimmed === "") return; if (trimmed === "") return;
@ -63,7 +60,11 @@ export default function SearchBar() {
onFocus={() => setShowMenu(true)} onFocus={() => setShowMenu(true)}
ref={refs.setReference} ref={refs.setReference}
value={searchQuery} value={searchQuery}
onChange={(evt) => setSearchQuery(evt.target.value)} onChange={(evt) => {
setSearchQuery(evt.target.value);
if (evt.target.value) performSearch();
else setSearchResults([]);
}}
{...getReferenceProps()} {...getReferenceProps()}
/> />
</div> </div>

View file

@ -7,18 +7,15 @@ import remarkMath from "remark-math";
import rehypeKatex from "rehype-katex"; import rehypeKatex from "rehype-katex";
import { parse as parseDate, format as formatDate } from "date-fns"; import { parse as parseDate, format as formatDate } from "date-fns";
import { useDebounce } from "use-debounce"; import { useDebounce } from "use-debounce";
import {
const JOURNAL_PAGE_CONTENT_FIELD_NAME = "panorama/journal/page/content"; JOURNAL_PAGE_CONTENT_FIELD_NAME,
const JOURNAL_PAGE_TITLE_FIELD_NAME = "panorama/journal/page/title"; JOURNAL_PAGE_TITLE_FIELD_NAME,
NodeInfo,
} from "../../lib/data";
export interface JournalPageProps { export interface JournalPageProps {
id: string; id: string;
data: { data: NodeInfo;
day?: string;
title?: string;
content: string;
fields: object;
};
} }
export default function JournalPage({ id, data }: JournalPageProps) { export default function JournalPage({ id, data }: JournalPageProps) {

Some files were not shown because too many files have changed in this diff Show more