From 22203ba1d7fc6993c7c43a7f861395bae0c59363 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 29 Mar 2026 22:13:41 +0900 Subject: [PATCH 01/41] synmetrize *Client --- examples/adapters/db_client.rs | 8 +- examples/adapters/kvs_client.rs | 10 ++ examples/app/db/001_init.sql | 4 + examples/app/src/main.rs | 16 +- src/codec_value.rs | 289 ++++++++++++++++++++++++++++++++ src/core/codec.rs | 50 +----- src/core/manifest.rs | 38 +++-- src/lib.rs | 1 + src/load.rs | 122 ++++++++------ src/ports/required.rs | 16 +- src/state.rs | 63 ++++++- src/store.rs | 53 ++++-- 12 files changed, 526 insertions(+), 144 deletions(-) create mode 100644 src/codec_value.rs diff --git a/examples/adapters/db_client.rs b/examples/adapters/db_client.rs index 50d870d..b42f6ef 100644 --- a/examples/adapters/db_client.rs +++ b/examples/adapters/db_client.rs @@ -85,7 +85,7 @@ impl DbClient for DbAdapter { &self, connection: &Value, table: &str, - columns: &[(Vec, Vec)], + map: &[(Vec, Vec)], where_clause: Option<&[u8]>, ) -> Option> { let runtime = tokio::runtime::Runtime::new().ok()?; @@ -101,8 +101,8 @@ impl DbClient for DbAdapter { let client = pool_lock.get(&conn_name)?; - let col_names: Vec<&str> = columns.iter() - .filter_map(|(k, _)| std::str::from_utf8(k).ok()) + let col_names: Vec<&str> = map.iter() + .filter_map(|(_, v)| std::str::from_utf8(v).ok()) .collect(); let column_list = if col_names.is_empty() { "*".to_string() } else { col_names.join(", ") }; @@ -155,7 +155,7 @@ impl DbClient for DbAdapter { &self, _connection: &Value, _table: &str, - _columns: &[(Vec, Vec)], + _map: &[(Vec, Vec)], _where_clause: Option<&[u8]>, ) -> bool { false } diff --git a/examples/adapters/kvs_client.rs b/examples/adapters/kvs_client.rs index 2526277..9dfa21c 100644 --- a/examples/adapters/kvs_client.rs +++ b/examples/adapters/kvs_client.rs @@ -10,6 +10,16 @@ pub struct KVSAdapter { } impl KVSAdapter { + pub fn raw_get(&self, key: &str) -> Option> { + let client = self.client.lock().unwrap(); + let mut conn = client.get_connection().ok()?; + redis::cmd("GET") + .arg(key) + .query::>>(&mut conn) + .ok() + .flatten() + } + pub fn new() -> Result { let host = std::env::var("REDIS_HOST").unwrap_or_else(|_| "localhost".to_string()); let port = std::env::var("REDIS_PORT").unwrap_or_else(|_| "6379".to_string()); diff --git a/examples/app/db/001_init.sql b/examples/app/db/001_init.sql index e6eecd0..699f118 100644 --- a/examples/app/db/001_init.sql +++ b/examples/app/db/001_init.sql @@ -34,6 +34,10 @@ ON CONFLICT DO NOTHING; CREATE INDEX IF NOT EXISTS idx_users_sso_user_id ON users(sso_user_id); +GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO current_user; +GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO current_user; +GRANT SELECT ON ALL TABLES IN SCHEMA public TO PUBLIC; + -- tenant_db: per-tenant data diff --git a/examples/app/src/main.rs b/examples/app/src/main.rs index cc2c989..11356ca 100644 --- a/examples/app/src/main.rs +++ b/examples/app/src/main.rs @@ -130,9 +130,10 @@ fn run_tests() -> (usize, usize) { test!("set and get leaf key cache.user.org_id", { let im = Arc::new(InMemoryAdapter::new()); im.set("request-attributes-user-key", scalar("1")); + let kvs = Arc::new(KVSAdapter::new().unwrap()); let mut state = make_state( Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), + Arc::clone(&kvs), Arc::new(DbAdapter::new()), im, Arc::new(HttpAdapter), @@ -141,6 +142,19 @@ fn run_tests() -> (usize, usize) { assert!(state.set("cache.user.org_id", scalar("100"), None).unwrap()); let got = state.get("cache.user.org_id").unwrap(); assert_eq!(got, Some(scalar("100"))); + + // verify Redis directly: KVS key is "user:1" (session.sso_user_id=1) + // expected: encoded Mapping with org_id=100, not raw b"100" + let raw = kvs.raw_get("user:1"); + assert!(raw.is_some(), "user:1 should exist in Redis"); + let decoded = state_engine::codec_value::decode(raw.as_deref().unwrap()); + assert!(decoded.is_some(), "Redis value should decode as Value"); + let decoded = decoded.unwrap(); + assert_eq!( + mapping_get(&decoded, b"org_id"), + Some(&scalar("100")), + "org_id in Redis mapping should be 100" + ); }); test!("set and get leaf key cache.user.id", { diff --git a/src/codec_value.rs b/src/codec_value.rs new file mode 100644 index 0000000..6a6b45c --- /dev/null +++ b/src/codec_value.rs @@ -0,0 +1,289 @@ +use crate::ports::provided::Value; + +// Wire format: +// Null : 0x00 +// Scalar : 0x01 | len(u32le) | bytes +// Sequence : 0x02 | count(u32le) | item... +// Mapping : 0x03 | count(u32le) | (key_len(u32le) | key_bytes | item)... + +const TAG_NULL: u8 = 0x00; +const TAG_SCALAR: u8 = 0x01; +const TAG_SEQUENCE: u8 = 0x02; +const TAG_MAPPING: u8 = 0x03; + +pub fn encode(value: &Value) -> Vec { + let mut buf = Vec::new(); + write_value(value, &mut buf); + buf +} + +pub fn decode(bytes: &[u8]) -> Option { + let (value, _) = read_value(bytes)?; + Some(value) +} + +fn write_value(value: &Value, buf: &mut Vec) { + match value { + Value::Null => { + buf.push(TAG_NULL); + } + Value::Scalar(b) => { + buf.push(TAG_SCALAR); + buf.extend_from_slice(&(b.len() as u32).to_le_bytes()); + buf.extend_from_slice(b); + } + Value::Sequence(items) => { + buf.push(TAG_SEQUENCE); + buf.extend_from_slice(&(items.len() as u32).to_le_bytes()); + for item in items { + write_value(item, buf); + } + } + Value::Mapping(pairs) => { + buf.push(TAG_MAPPING); + buf.extend_from_slice(&(pairs.len() as u32).to_le_bytes()); + for (k, v) in pairs { + buf.extend_from_slice(&(k.len() as u32).to_le_bytes()); + buf.extend_from_slice(k); + write_value(v, buf); + } + } + } +} + +fn read_value(bytes: &[u8]) -> Option<(Value, &[u8])> { + let (&tag, rest) = bytes.split_first()?; + match tag { + TAG_NULL => Some((Value::Null, rest)), + TAG_SCALAR => { + let (len, rest) = read_u32(rest)?; + let (data, rest) = split_at(rest, len)?; + Some((Value::Scalar(data.to_vec()), rest)) + } + TAG_SEQUENCE => { + let (count, mut rest) = read_u32(rest)?; + let mut items = Vec::with_capacity(count); + for _ in 0..count { + let (item, next) = read_value(rest)?; + items.push(item); + rest = next; + } + Some((Value::Sequence(items), rest)) + } + TAG_MAPPING => { + let (count, mut rest) = read_u32(rest)?; + let mut pairs = Vec::with_capacity(count); + for _ in 0..count { + let (klen, next) = read_u32(rest)?; + let (kdata, next) = split_at(next, klen)?; + let (val, next) = read_value(next)?; + pairs.push((kdata.to_vec(), val)); + rest = next; + } + Some((Value::Mapping(pairs), rest)) + } + _ => None, + } +} + +fn read_u32(bytes: &[u8]) -> Option<(usize, &[u8])> { + let (b, rest) = split_at(bytes, 4)?; + let n = u32::from_le_bytes(b.try_into().ok()?) as usize; + Some((n, rest)) +} + +fn split_at(bytes: &[u8], n: usize) -> Option<(&[u8], &[u8])> { + if bytes.len() >= n { Some(bytes.split_at(n)) } else { None } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn rt(v: &Value) -> Value { + decode(&encode(v)).unwrap() + } + + #[test] + fn test_null() { + assert_eq!(rt(&Value::Null), Value::Null); + } + + #[test] + fn test_scalar() { + assert_eq!(rt(&Value::Scalar(b"hello".to_vec())), Value::Scalar(b"hello".to_vec())); + } + + #[test] + fn test_scalar_empty() { + assert_eq!(rt(&Value::Scalar(vec![])), Value::Scalar(vec![])); + } + + #[test] + fn test_sequence() { + let v = Value::Sequence(vec![ + Value::Scalar(b"a".to_vec()), + Value::Null, + Value::Scalar(b"b".to_vec()), + ]); + assert_eq!(rt(&v), v); + } + + #[test] + fn test_mapping() { + let v = Value::Mapping(vec![ + (b"id".to_vec(), Value::Scalar(b"1".to_vec())), + (b"name".to_vec(), Value::Scalar(b"alice".to_vec())), + ]); + assert_eq!(rt(&v), v); + } + + #[test] + fn test_nested() { + let v = Value::Mapping(vec![ + (b"user".to_vec(), Value::Mapping(vec![ + (b"id".to_vec(), Value::Scalar(b"1".to_vec())), + (b"tags".to_vec(), Value::Sequence(vec![ + Value::Scalar(b"admin".to_vec()), + Value::Scalar(b"staff".to_vec()), + ])), + (b"extra".to_vec(), Value::Null), + ])), + ]); + assert_eq!(rt(&v), v); + } + + #[test] + fn test_decode_invalid_returns_none() { + assert_eq!(decode(&[0xFF]), None); + assert_eq!(decode(&[TAG_SCALAR, 0x05, 0x00, 0x00, 0x00]), None); // truncated + } + + /// Simulate what State::get("cache.user") would return after Db load — + /// a Mapping built from yaml_to_parse_value output (String → Scalar bytes, + /// Number → Scalar bytes, Null → Null). Verify encode→decode roundtrip. + #[test] + fn test_roundtrip_cache_user_from_yaml() { + // Equivalent to serde_yaml_ng parsing: + // id: 1 + // org_id: 100 + // tenant_id: 10 + // yaml_to_parse_value converts Number → Scalar(n.to_string().into_bytes()) + let original = Value::Mapping(vec![ + (b"id".to_vec(), Value::Scalar(b"1".to_vec())), + (b"org_id".to_vec(), Value::Scalar(b"100".to_vec())), + (b"tenant_id".to_vec(), Value::Scalar(b"10".to_vec())), + ]); + + let bytes = encode(&original); + let decoded = decode(&bytes).unwrap(); + assert_eq!(decoded, original); + + // spot-check the wire bytes start with TAG_MAPPING + assert_eq!(bytes[0], TAG_MAPPING); + // 3 pairs + assert_eq!(&bytes[1..5], &3u32.to_le_bytes()); + } + + /// Simulate cache.tenant which has a nested Mapping and a Sequence. + #[test] + fn test_roundtrip_nested_from_yaml() { + // Equivalent to: + // name: "acme" + // health: + // status: "ok" + // tags: + // - "gold" + // - "active" + let original = Value::Mapping(vec![ + (b"name".to_vec(), Value::Scalar(b"acme".to_vec())), + (b"health".to_vec(), Value::Mapping(vec![ + (b"status".to_vec(), Value::Scalar(b"ok".to_vec())), + ])), + (b"tags".to_vec(), Value::Sequence(vec![ + Value::Scalar(b"gold".to_vec()), + Value::Scalar(b"active".to_vec()), + ])), + ]); + + let bytes = encode(&original); + let decoded = decode(&bytes).unwrap(); + assert_eq!(decoded, original); + } + + /// Null fields survive the roundtrip (yaml `~` or missing values). + #[test] + fn test_roundtrip_with_null_field() { + let original = Value::Mapping(vec![ + (b"id".to_vec(), Value::Scalar(b"1".to_vec())), + (b"deleted_at".to_vec(), Value::Null), + ]); + assert_eq!(decode(&encode(&original)).unwrap(), original); + } + + fn from_yaml(v: serde_yaml_ng::Value) -> Value { + match v { + serde_yaml_ng::Value::Mapping(m) => Value::Mapping( + m.into_iter() + .filter_map(|(k, v)| { + let key = match k { + serde_yaml_ng::Value::String(s) => s.into_bytes(), + _ => return None, + }; + Some((key, from_yaml(v))) + }) + .collect(), + ), + serde_yaml_ng::Value::Sequence(s) => Value::Sequence( + s.into_iter().map(from_yaml).collect() + ), + serde_yaml_ng::Value::String(s) => Value::Scalar(s.into_bytes()), + serde_yaml_ng::Value::Number(n) => Value::Scalar(n.to_string().into_bytes()), + serde_yaml_ng::Value::Bool(b) => Value::Scalar(b.to_string().into_bytes()), + serde_yaml_ng::Value::Null => Value::Null, + _ => Value::Null, + } + } + + /// Parse a real YAML string with serde_yaml_ng, convert to Value, + /// then verify encode→decode roundtrip produces identical Value. + #[test] + fn test_roundtrip_real_yaml_cache_user() { + let yaml = r#" +id: 1 +org_id: 100 +tenant_id: 10 +name: "alice" +active: true +score: 3.14 +deleted_at: ~ +"#; + let parsed: serde_yaml_ng::Value = serde_yaml_ng::from_str(yaml).unwrap(); + let original = from_yaml(parsed); + + let bytes = encode(&original); + let decoded = decode(&bytes).unwrap(); + assert_eq!(decoded, original); + } + + #[test] + fn test_roundtrip_real_yaml_nested() { + let yaml = r#" +user: + id: 1 + tags: + - admin + - staff + address: + city: Tokyo + zip: "100-0001" + note: ~ +"#; + let parsed: serde_yaml_ng::Value = serde_yaml_ng::from_str(yaml).unwrap(); + let original = from_yaml(parsed); + + let bytes = encode(&original); + let decoded = decode(&bytes).unwrap(); + assert_eq!(decoded, original); + } +} diff --git a/src/core/codec.rs b/src/core/codec.rs index 9fd3233..04e1c4f 100644 --- a/src/core/codec.rs +++ b/src/core/codec.rs @@ -13,11 +13,6 @@ pub fn root_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::ROOT_NULL) } -pub fn root_decode(v: u64) -> Option<&'static [u8]> { - ROOT_NAMES.iter() - .find(|(_, val)| *val == v) - .map(|(name, _)| *name) -} pub const CLIENT_NAMES: &[(&[u8], u64)] = &[ (b"State", fixed_bits::CLIENT_STATE), @@ -36,11 +31,6 @@ pub fn client_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::CLIENT_NULL) } -pub fn client_decode(v: u64) -> Option<&'static [u8]> { - CLIENT_NAMES.iter() - .find(|(_, val)| *val == v) - .map(|(name, _)| *name) -} pub const PROP_NAMES: &[(&[u8], u64)] = &[ (b"type", fixed_bits::PROP_TYPE), @@ -61,11 +51,6 @@ pub fn prop_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::PROP_NULL) } -pub fn prop_decode(v: u64) -> Option<&'static [u8]> { - PROP_NAMES.iter() - .find(|(_, val)| *val == v) - .map(|(name, _)| *name) -} pub const TYPE_NAMES: &[(&[u8], u64)] = &[ (b"integer", fixed_bits::TYPE_I64), @@ -82,61 +67,40 @@ pub fn type_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::TYPE_NULL) } -pub fn type_decode(v: u64) -> Option<&'static [u8]> { - TYPE_NAMES.iter() - .find(|(_, val)| *val == v) - .map(|(name, _)| *name) -} #[cfg(test)] mod tests { use super::*; #[test] - fn test_client_codec() { + fn test_client_encode() { for &(name, val) in CLIENT_NAMES { assert_eq!(client_encode(name), val); - assert_eq!(client_decode(val), Some(name)); } + assert_eq!(client_encode(b"Unknown"), fixed_bits::CLIENT_NULL); } #[test] - fn test_root_codec() { + fn test_root_encode() { for &(name, val) in ROOT_NAMES { assert_eq!(root_encode(name), val); - assert_eq!(root_decode(val), Some(name)); } + assert_eq!(root_encode(b"_unknown"), fixed_bits::ROOT_NULL); } #[test] - fn test_prop_codec() { + fn test_prop_encode() { for &(name, val) in PROP_NAMES { assert_eq!(prop_encode(name), val); - assert_eq!(prop_decode(val), Some(name)); } + assert_eq!(prop_encode(b"unknown"), fixed_bits::PROP_NULL); } #[test] - fn test_type_codec() { + fn test_type_encode() { for &(name, val) in TYPE_NAMES { assert_eq!(type_encode(name), val); - assert_eq!(type_decode(val), Some(name)); } - } - - #[test] - fn test_unknown_encode() { - assert_eq!(client_encode(b"Unknown"), fixed_bits::CLIENT_NULL); - assert_eq!(root_encode(b"_unknown"), fixed_bits::ROOT_NULL); - assert_eq!(prop_encode(b"unknown"), fixed_bits::PROP_NULL); assert_eq!(type_encode(b"unknown"), fixed_bits::TYPE_NULL); } - - #[test] - fn test_null_decode() { - assert_eq!(client_decode(fixed_bits::CLIENT_NULL), None); - assert_eq!(root_decode(fixed_bits::ROOT_NULL), None); - assert_eq!(prop_decode(fixed_bits::PROP_NULL), None); - assert_eq!(type_decode(fixed_bits::TYPE_NULL), None); - } } diff --git a/src/core/manifest.rs b/src/core/manifest.rs index eeaf276..e600239 100644 --- a/src/core/manifest.rs +++ b/src/core/manifest.rs @@ -3,8 +3,8 @@ use alloc::string::String; use alloc::vec::Vec; use alloc::collections::BTreeMap; -use super::fixed_bits; use super::codec; +use super::fixed_bits; use super::pool::DynamicPool; use super::parser::ParsedManifest; @@ -123,7 +123,7 @@ impl Manifest { let segments: Vec<&str> = if path.is_empty() { alloc::vec![] } else { path.split('.').collect() }; let mut meta = MetaIndices::default(); - self.collect_meta(file_record, &mut meta); + self.collect_meta(file_record, file_idx, &mut meta); let mut candidates = self.children_of(file_record); for segment in &segments { @@ -138,7 +138,7 @@ impl Manifest { } let dyn_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; if self.dynamic.get(dyn_idx) == Some(segment.as_bytes()) { - self.collect_meta(record, &mut meta); + self.collect_meta(record, idx, &mut meta); found_idx = Some(idx); break; } @@ -154,7 +154,7 @@ impl Manifest { meta } - fn collect_meta(&self, record: u64, meta: &mut MetaIndices) { + fn collect_meta(&self, record: u64, node_idx: u16, meta: &mut MetaIndices) { for &idx in &self.children_of(record) { let child = match self.keys.get(idx as usize).copied() { Some(r) => r, @@ -162,9 +162,9 @@ impl Manifest { }; let root = fixed_bits::get(child, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT); match root { - fixed_bits::ROOT_LOAD => meta.load = Some(idx), - fixed_bits::ROOT_STORE => meta.store = Some(idx), - fixed_bits::ROOT_STATE => meta.state = Some(idx), + fixed_bits::ROOT_LOAD => { meta.load = Some(idx); meta.load_owner = node_idx; } + fixed_bits::ROOT_STORE => { meta.store = Some(idx); meta.store_owner = node_idx; } + fixed_bits::ROOT_STATE => { meta.state = Some(idx); } _ => {} } } @@ -223,16 +223,13 @@ impl Manifest { continue; } - let prop_name = match codec::prop_decode(prop as u64) { - Some(name) => name, - None => continue, - }; + if prop as u64 == fixed_bits::PROP_NULL { continue; } - if prop_name == b"map" { + if prop as u64 == fixed_bits::PROP_MAP { if let Some(pairs) = self.decode_map(child_idx) { entries.push(("map".into(), ConfigValue::Map(pairs))); } - } else if prop_name == b"connection" { + } else if prop as u64 == fixed_bits::PROP_CONNECTION { if value_idx != 0 { if let Some(cv) = self.decode_value(value_idx) { entries.push(("connection".into(), cv)); @@ -240,8 +237,10 @@ impl Manifest { } } else if value_idx != 0 { if let Some(cv) = self.decode_value(value_idx) { - let name = String::from_utf8_lossy(prop_name).into_owned(); - entries.push((name, cv)); + if let Some((name_bytes, _)) = codec::PROP_NAMES.iter().find(|(_, v)| *v == prop as u64) { + let name = String::from_utf8_lossy(name_bytes).into_owned(); + entries.push((name, cv)); + } } } } @@ -358,11 +357,14 @@ impl Default for Manifest { } /// Indices of meta records for a given node, collected from root to node (child overrides parent). +/// `load_owner` / `store_owner` are the key_idx of the node that directly defines `_load` / `_store`. #[derive(Debug, Default)] pub struct MetaIndices { - pub load: Option, - pub store: Option, - pub state: Option, + pub load: Option, + pub load_owner: u16, + pub store: Option, + pub store_owner: u16, + pub state: Option, } #[cfg(test)] diff --git a/src/lib.rs b/src/lib.rs index be655b2..2a6f673 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,5 @@ mod core; +pub mod codec_value; pub mod log_format; pub mod ports; pub mod load; diff --git a/src/load.rs b/src/load.rs index 1386596..0867692 100644 --- a/src/load.rs +++ b/src/load.rs @@ -84,21 +84,10 @@ impl Load { let env = self.env.as_deref() .ok_or(LoadError::ClientNotConfigured)?; - let map = match config.get("map") { - Some(Value::Mapping(m)) => m, - _ => return Err(LoadError::ConfigMissing("map".into())), - }; - - let mut result = Vec::new(); - for (config_key, env_key_value) in map { - if let Value::Scalar(env_key) = env_key_value { - let env_key_str = std::str::from_utf8(env_key).unwrap_or(""); - if let Some(value) = env.get(env_key_str) { - result.push((config_key.clone(), Value::Scalar(value))); - } - } - } - Ok(Value::Mapping(result)) + let (yaml_keys, ext_keys) = split_map(config)?; + let values = env.get(&ext_keys) + .ok_or(LoadError::ClientNotConfigured)?; + Ok(zip_to_mapping(yaml_keys, values)) } fn load_from_in_memory( @@ -125,7 +114,7 @@ impl Load { let bytes = kvs .get(key) .ok_or_else(|| LoadError::NotFound(key.into()))?; - Ok(Value::Scalar(bytes)) + Ok(crate::codec_value::decode(&bytes).unwrap_or(Value::Scalar(bytes))) } fn load_from_db( @@ -141,27 +130,16 @@ impl Load { let table = scalar_str(config, "table")?; - let columns = match config.get("columns") { - Some(Value::Mapping(m)) => m.iter() - .filter_map(|(k, v)| { - if let Value::Scalar(col) = v { Some((k.clone(), col.clone())) } else { None } - }) - .collect::>(), - _ => return Err(LoadError::ConfigMissing("columns".into())), - }; + let (yaml_keys, ext_keys) = split_map(config)?; let where_clause = config.get("where") .and_then(|v| if let Value::Scalar(b) = v { Some(b.as_slice()) } else { None }); - let rows = db - .get(connection, table, &columns, where_clause) + let values = db + .get(connection, table, &ext_keys, where_clause) .ok_or_else(|| LoadError::NotFound(table.into()))?; - if rows.is_empty() { - return Err(LoadError::NotFound(table.into())); - } - - Ok(Value::Sequence(rows)) + Ok(zip_to_mapping(yaml_keys, values)) } fn load_from_file( @@ -175,7 +153,7 @@ impl Load { let bytes = file .get(key) .ok_or_else(|| LoadError::NotFound(key.into()))?; - Ok(Value::Scalar(bytes)) + Ok(crate::codec_value::decode(&bytes).unwrap_or(Value::Scalar(bytes))) } fn load_from_http( @@ -187,6 +165,8 @@ impl Load { let url = scalar_str(config, "url")?; + let (yaml_keys, ext_keys) = split_map(config)?; + let headers = match config.get("headers") { Some(Value::Mapping(m)) => Some( m.iter() @@ -198,11 +178,35 @@ impl Load { _ => None, }; - http.get(url, headers.as_deref()) - .ok_or_else(|| LoadError::NotFound(url.into())) + let values = http.get(url, &ext_keys, headers.as_deref()) + .ok_or_else(|| LoadError::NotFound(url.into()))?; + + Ok(zip_to_mapping(yaml_keys, values)) + } +} + +/// Splits a `map` config entry into (yaml_keys, ext_keys). +/// yaml_keys: the left-hand side (state-engine field names) +/// ext_keys: the right-hand side (external source field names, passed to adapter) +fn split_map(config: &HashMap) -> Result<(Vec>, Vec>), LoadError> { + match config.get("map") { + Some(Value::Mapping(m)) => { + let (yaml_keys, ext_keys) = m.iter() + .filter_map(|(k, v)| { + if let Value::Scalar(ext) = v { Some((k.clone(), ext.clone())) } else { None } + }) + .unzip(); + Ok((yaml_keys, ext_keys)) + } + _ => Err(LoadError::ConfigMissing("map".into())), } } +/// Zips yaml_keys and values into a Value::Mapping. +fn zip_to_mapping(yaml_keys: Vec>, values: Vec) -> Value { + Value::Mapping(yaml_keys.into_iter().zip(values).collect()) +} + fn scalar_str<'a>(config: &'a HashMap, key: &str) -> Result<&'a str, LoadError> { match config.get(key) { Some(Value::Scalar(b)) => std::str::from_utf8(b) @@ -229,12 +233,13 @@ mod tests { struct MockEnvClient; impl EnvClient for MockEnvClient { - fn get(&self, key: &str) -> Option> { - match key { - "DB_HOST" => Some(b"localhost".to_vec()), - "DB_PORT" => Some(b"5432".to_vec()), - _ => None, - } + fn get(&self, keys: &[Vec]) -> Option> { + let vals = keys.iter().map(|k| match k.as_slice() { + b"DB_HOST" => Value::Scalar(b"localhost".to_vec()), + b"DB_PORT" => Value::Scalar(b"5432".to_vec()), + _ => Value::Null, + }).collect(); + Some(vals) } fn set(&self, _key: &str, _value: Vec) -> bool { false } fn delete(&self, _key: &str) -> bool { false } @@ -358,36 +363,34 @@ mod tests { fn new(rows: Vec) -> Self { Self { rows } } } impl DbClient for MockDb { - fn get(&self, _conn: &Value, _table: &str, _cols: &[(Vec, Vec)], _where: Option<&[u8]>) -> Option> { + fn get(&self, _conn: &Value, _table: &str, _keys: &[Vec], _where: Option<&[u8]>) -> Option> { if self.rows.is_empty() { None } else { Some(self.rows.clone()) } } - fn set(&self, _: &Value, _: &str, _: &[(Vec, Vec)], _: Option<&[u8]>) -> bool { false } + fn set(&self, _: &Value, _: &str, _: &[Vec], _: Option<&[u8]>) -> bool { false } fn delete(&self, _: &Value, _: &str, _: Option<&[u8]>) -> bool { false } } - fn db_config(table: &str, columns: &[(&str, &str)]) -> HashMap { + fn db_config(table: &str, map: &[(&str, &str)]) -> HashMap { let mut config = HashMap::new(); config.insert("client".to_string(), client_config(fixed_bits::CLIENT_DB)); config.insert("table".to_string(), Value::Scalar(table.as_bytes().to_vec())); config.insert("connection".to_string(), Value::Mapping(vec![])); - config.insert("columns".to_string(), Value::Mapping( - columns.iter().map(|(k, v)| (k.as_bytes().to_vec(), Value::Scalar(v.as_bytes().to_vec()))).collect() + config.insert("map".to_string(), Value::Mapping( + map.iter().map(|(k, v)| (k.as_bytes().to_vec(), Value::Scalar(v.as_bytes().to_vec()))).collect() )); config } #[test] fn test_load_from_db() { - let row = Value::Mapping(vec![(b"id".to_vec(), Value::Scalar(b"42".to_vec()))]); - let client = Arc::new(MockDb::new(vec![row.clone()])); + // adapter returns field values in ext_keys order + let client = Arc::new(MockDb::new(vec![Value::Scalar(b"42".to_vec())])); let load = Load::new().with_db(client); let config = db_config("users", &[("id", "id")]); let result = load.handle(&config).unwrap(); - if let Value::Sequence(rows) = result { - assert_eq!(rows[0], row); - } else { - panic!("expected Sequence"); - } + // zip_to_mapping: yaml_key "id" → Value::Scalar("42") + let expected = Value::Mapping(vec![(b"id".to_vec(), Value::Scalar(b"42".to_vec()))]); + assert_eq!(result, expected); } #[test] @@ -414,7 +417,17 @@ mod tests { fn new(response: Option) -> Self { Self { response } } } impl HttpClient for MockHttp { - fn get(&self, _: &str, _: Option<&[(Vec, Vec)]>) -> Option { self.response.clone() } + fn get(&self, _: &str, keys: &[Vec], _: Option<&[(Vec, Vec)]>) -> Option> { + self.response.as_ref().map(|v| { + keys.iter().map(|k| match v { + Value::Mapping(m) => m.iter() + .find(|(mk, _)| mk == k) + .map(|(_, mv)| mv.clone()) + .unwrap_or(Value::Null), + _ => v.clone(), + }).collect() + }) + } fn set(&self, _: &str, _: Value, _: Option<&[(Vec, Vec)]>) -> bool { false } fn delete(&self, _: &str, _: Option<&[(Vec, Vec)]>) -> bool { false } } @@ -423,6 +436,9 @@ mod tests { let mut c = HashMap::new(); c.insert("client".to_string(), client_config(fixed_bits::CLIENT_HTTP)); c.insert("url".to_string(), Value::Scalar(url.as_bytes().to_vec())); + c.insert("map".to_string(), Value::Mapping(vec![ + (b"status".to_vec(), Value::Scalar(b"status".to_vec())), + ])); c } diff --git a/src/ports/required.rs b/src/ports/required.rs index 4300373..9daf34f 100644 --- a/src/ports/required.rs +++ b/src/ports/required.rs @@ -17,9 +17,11 @@ pub trait KVSClient: Send + Sync { } /// Environment / config store. +/// `keys` is the list of external key names (map values from manifest). +/// Returns values in the same order as `keys`. /// Internal mutability is the implementor's responsibility. pub trait EnvClient: Send + Sync { - fn get(&self, key: &str) -> Option>; + fn get(&self, keys: &[Vec]) -> Option>; fn set(&self, key: &str, value: Vec) -> bool; fn delete(&self, key: &str) -> bool; } @@ -27,20 +29,21 @@ pub trait EnvClient: Send + Sync { /// Relational DB client. /// Do NOT call State inside DbClient — it would cause recursion. /// `connection` is a Value::Mapping resolved from the manifest. -/// `columns` is the raw manifest `map` object; adapter is responsible for column extraction. +/// `keys` is the list of db column names (map values from manifest). +/// Returns values in the same order as `keys`. pub trait DbClient: Send + Sync { fn get( &self, connection: &Value, table: &str, - columns: &[(Vec, Vec)], + keys: &[Vec], where_clause: Option<&[u8]>, ) -> Option>; fn set( &self, connection: &Value, table: &str, - columns: &[(Vec, Vec)], + keys: &[Vec], where_clause: Option<&[u8]>, ) -> bool; fn delete( @@ -52,13 +55,16 @@ pub trait DbClient: Send + Sync { } /// HTTP client. +/// `keys` is the list of response field names (map values from manifest). +/// Returns values in the same order as `keys`. /// `headers` is an optional list of (name, value) byte pairs. pub trait HttpClient: Send + Sync { fn get( &self, url: &str, + keys: &[Vec], headers: Option<&[(Vec, Vec)]>, - ) -> Option; + ) -> Option>; fn set( &self, url: &str, diff --git a/src/state.rs b/src/state.rs index 5357c99..bb29e47 100644 --- a/src/state.rs +++ b/src/state.rs @@ -390,16 +390,61 @@ impl State { let meta = self.manifest.get_meta(&file, &path); if let Some(store_idx) = meta.store { + let owner_idx = meta.store_owner; + let is_leaf = owner_idx != key_idx; + + // For leaf keys: build updated owner Mapping via read-modify-write + let store_value = if is_leaf { + let field = path.rsplit('.').next().unwrap_or(&path).as_bytes().to_vec(); + + // 1. state_vals から owner Mapping を取得、なければ store から read + let owner_mapping = match self.find_state_value(owner_idx) + .and_then(|i| self.state_vals.get(i).cloned()) + { + Some(v @ Value::Mapping(_)) => Some(v), + _ => { + match self.resolve_config(store_idx)? { + Some(ref config) => self.store.get(config), + None => None, + } + } + }; + + // 2. Mapping にフィールドを差し込む + let mut pairs = match owner_mapping { + Some(Value::Mapping(p)) => p, + _ => vec![], + }; + if let Some(entry) = pairs.iter_mut().find(|(k, _)| *k == field) { + entry.1 = value.clone(); + } else { + pairs.push((field, value.clone())); + } + Value::Mapping(pairs) + } else { + value.clone() + }; + match self.resolve_config(store_idx)? { Some(config) => { - return match self.store.set(&config, value.clone(), ttl) { + return match self.store.set(&config, store_value.clone(), ttl) { Ok(ok) => { if ok { - if let Some(sv_idx) = self.find_state_value(key_idx) { - self.state_vals[sv_idx] = value; + // owner の state_vals を更新 + if let Some(sv_idx) = self.find_state_value(owner_idx) { + self.state_vals[sv_idx] = store_value; } else { - self.state_keys.push(key_idx); - self.state_vals.push(value); + self.state_keys.push(owner_idx); + self.state_vals.push(store_value); + } + // 葉キー自身も state_vals に記録 + if is_leaf { + if let Some(sv_idx) = self.find_state_value(key_idx) { + self.state_vals[sv_idx] = value; + } else { + self.state_keys.push(key_idx); + self.state_vals.push(value); + } } } Ok(ok) @@ -566,14 +611,14 @@ mod tests { struct StubDb; impl DbClient for StubDb { - fn get(&self, _: &Value, _: &str, _: &[(Vec, Vec)], _: Option<&[u8]>) -> Option> { None } - fn set(&self, _: &Value, _: &str, _: &[(Vec, Vec)], _: Option<&[u8]>) -> bool { false } + fn get(&self, _: &Value, _: &str, _: &[Vec], _: Option<&[u8]>) -> Option> { None } + fn set(&self, _: &Value, _: &str, _: &[Vec], _: Option<&[u8]>) -> bool { false } fn delete(&self, _: &Value, _: &str, _: Option<&[u8]>) -> bool { false } } struct StubEnv; impl EnvClient for StubEnv { - fn get(&self, _: &str) -> Option> { None } + fn get(&self, _: &[Vec]) -> Option> { None } fn set(&self, _: &str, _: Vec) -> bool { false } fn delete(&self, _: &str) -> bool { false } } @@ -587,7 +632,7 @@ mod tests { struct StubHttp; impl crate::ports::required::HttpClient for StubHttp { - fn get(&self, _: &str, _: Option<&[(Vec, Vec)]>) -> Option { None } + fn get(&self, _: &str, _: &[Vec], _: Option<&[(Vec, Vec)]>) -> Option> { None } fn set(&self, _: &str, _: Value, _: Option<&[(Vec, Vec)]>) -> bool { false } fn delete(&self, _: &str, _: Option<&[(Vec, Vec)]>) -> bool { false } } diff --git a/src/store.rs b/src/store.rs index fc36f2d..9cdbcc4 100644 --- a/src/store.rs +++ b/src/store.rs @@ -53,18 +53,21 @@ impl Store { fixed_bits::CLIENT_KVS => { let kvs = self.kvs.as_deref()?; let key = scalar_str(store_config, "key")?; - kvs.get(key).map(Value::Scalar) + kvs.get(key).map(|b| crate::codec_value::decode(&b).unwrap_or(Value::Scalar(b))) } fixed_bits::CLIENT_HTTP => { let http = self.http.as_deref()?; let url = scalar_str(store_config, "url")?; + let ext_keys = split_ext_keys(store_config)?; let headers = headers_list(store_config); - http.get(url, headers.as_deref()) + let values = http.get(url, &ext_keys, headers.as_deref())?; + let yaml_keys = split_yaml_keys(store_config)?; + Some(zip_to_mapping(yaml_keys, values)) } fixed_bits::CLIENT_FILE => { let file = self.file.as_deref()?; let key = scalar_str(store_config, "key")?; - file.get(key).map(Value::Scalar) + file.get(key).map(|b| crate::codec_value::decode(&b).unwrap_or(Value::Scalar(b))) } _ => None, } @@ -189,10 +192,30 @@ fn headers_list(config: &HashMap) -> Option, Vec } } +fn split_yaml_keys(config: &HashMap) -> Option>> { + match config.get("map") { + Some(Value::Mapping(m)) => Some(m.iter().map(|(k, _)| k.clone()).collect()), + _ => None, + } +} + +fn split_ext_keys(config: &HashMap) -> Option>> { + match config.get("map") { + Some(Value::Mapping(m)) => Some( + m.iter().filter_map(|(_, v)| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect() + ), + _ => None, + } +} + +fn zip_to_mapping(yaml_keys: Vec>, values: Vec) -> Value { + Value::Mapping(yaml_keys.into_iter().zip(values).collect()) +} + fn value_to_bytes(value: Value) -> Vec { match value { Value::Scalar(b) => b, - _ => Vec::new(), + other => crate::codec_value::encode(&other), } } @@ -383,8 +406,12 @@ mod tests { fn new() -> Self { Self { store: std::sync::Mutex::new(std::collections::HashMap::new()) } } } impl HttpClient for MockHttp { - fn get(&self, url: &str, _: Option<&[(Vec, Vec)]>) -> Option { - self.store.lock().unwrap().get(url).cloned() + fn get(&self, url: &str, keys: &[Vec], _: Option<&[(Vec, Vec)]>) -> Option> { + let stored = self.store.lock().unwrap().get(url).cloned()?; + Some(keys.iter().map(|k| match &stored { + Value::Mapping(m) => m.iter().find(|(mk, _)| mk == k).map(|(_, v)| v.clone()).unwrap_or(Value::Null), + _ => stored.clone(), + }).collect()) } fn set(&self, url: &str, value: Value, _: Option<&[(Vec, Vec)]>) -> bool { self.store.lock().unwrap().insert(url.to_string(), value); true @@ -398,6 +425,9 @@ mod tests { let mut c = HashMap::new(); c.insert("client".to_string(), client_config(fixed_bits::CLIENT_HTTP)); c.insert("url".to_string(), Value::Scalar(url.as_bytes().to_vec())); + c.insert("map".to_string(), Value::Mapping(vec![ + (b"status".to_vec(), Value::Scalar(b"status".to_vec())), + ])); c } @@ -406,9 +436,11 @@ mod tests { let client = Arc::new(MockHttp::new()); let store = Store::new().with_http(client); let config = http_config("http://example.com/data"); - let data = Value::Scalar(b"payload".to_vec()); - assert!(store.set(&config, data.clone(), None).unwrap()); - assert_eq!(store.get(&config).unwrap(), data); + let data = Value::Mapping(vec![(b"status".to_vec(), Value::Scalar(b"ok".to_vec()))]); + assert!(store.set(&config, data, None).unwrap()); + let result = store.get(&config).unwrap(); + let expected = Value::Mapping(vec![(b"status".to_vec(), Value::Scalar(b"ok".to_vec()))]); + assert_eq!(result, expected); } #[test] @@ -416,9 +448,8 @@ mod tests { let client = Arc::new(MockHttp::new()); let store = Store::new().with_http(client); let config = http_config("http://example.com/data"); - store.set(&config, Value::Scalar(b"x".to_vec()), None).unwrap(); + store.set(&config, Value::Mapping(vec![(b"status".to_vec(), Value::Scalar(b"ok".to_vec()))]), None).unwrap(); assert!(store.delete(&config).unwrap()); - assert!(store.get(&config).is_none()); } #[test] From 9d003d8438596dc7cd197ab8132466459aa3dda7 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 29 Mar 2026 23:44:24 +0900 Subject: [PATCH 02/41] underway --- src/load.rs | 56 ++++++++++++++++++++++++++-------------------------- src/state.rs | 9 ++++++++- src/store.rs | 32 ++++++++++++------------------ 3 files changed, 49 insertions(+), 48 deletions(-) diff --git a/src/load.rs b/src/load.rs index 0867692..4e32ec4 100644 --- a/src/load.rs +++ b/src/load.rs @@ -84,7 +84,7 @@ impl Load { let env = self.env.as_deref() .ok_or(LoadError::ClientNotConfigured)?; - let (yaml_keys, ext_keys) = split_map(config)?; + let (yaml_keys, ext_keys) = get_map_keys(config)?; let values = env.get(&ext_keys) .ok_or(LoadError::ClientNotConfigured)?; Ok(zip_to_mapping(yaml_keys, values)) @@ -130,7 +130,7 @@ impl Load { let table = scalar_str(config, "table")?; - let (yaml_keys, ext_keys) = split_map(config)?; + let (yaml_keys, ext_keys) = get_map_keys(config)?; let where_clause = config.get("where") .and_then(|v| if let Value::Scalar(b) = v { Some(b.as_slice()) } else { None }); @@ -165,7 +165,7 @@ impl Load { let url = scalar_str(config, "url")?; - let (yaml_keys, ext_keys) = split_map(config)?; + let (yaml_keys, ext_keys) = get_map_keys(config)?; let headers = match config.get("headers") { Some(Value::Mapping(m)) => Some( @@ -185,21 +185,16 @@ impl Load { } } -/// Splits a `map` config entry into (yaml_keys, ext_keys). -/// yaml_keys: the left-hand side (state-engine field names) -/// ext_keys: the right-hand side (external source field names, passed to adapter) -fn split_map(config: &HashMap) -> Result<(Vec>, Vec>), LoadError> { - match config.get("map") { - Some(Value::Mapping(m)) => { - let (yaml_keys, ext_keys) = m.iter() - .filter_map(|(k, v)| { - if let Value::Scalar(ext) = v { Some((k.clone(), ext.clone())) } else { None } - }) - .unzip(); - Ok((yaml_keys, ext_keys)) - } - _ => Err(LoadError::ConfigMissing("map".into())), - } +fn get_map_keys(config: &HashMap) -> Result<(Vec>, Vec>), LoadError> { + let yaml_keys = match config.get("yaml_keys") { + Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), + _ => return Err(LoadError::ConfigMissing("yaml_keys".into())), + }; + let ext_keys = match config.get("ext_keys") { + Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), + _ => return Err(LoadError::ConfigMissing("ext_keys".into())), + }; + Ok((yaml_keys, ext_keys)) } /// Zips yaml_keys and values into a Value::Mapping. @@ -229,6 +224,12 @@ mod tests { Value::Scalar(client_id.to_le_bytes().to_vec()) } + fn map_config(pairs: &[(&str, &str)]) -> (Value, Value) { + let yaml_keys = Value::Sequence(pairs.iter().map(|(k, _)| Value::Scalar(k.as_bytes().to_vec())).collect()); + let ext_keys = Value::Sequence(pairs.iter().map(|(_, v)| Value::Scalar(v.as_bytes().to_vec())).collect()); + (yaml_keys, ext_keys) + } + // --- Env --- struct MockEnvClient; @@ -250,10 +251,9 @@ mod tests { let load = Load::new().with_env(Arc::new(MockEnvClient)); let mut config = HashMap::new(); config.insert("client".to_string(), client_config(fixed_bits::CLIENT_ENV)); - config.insert("map".to_string(), Value::Mapping(vec![ - (b"host".to_vec(), Value::Scalar(b"DB_HOST".to_vec())), - (b"port".to_vec(), Value::Scalar(b"DB_PORT".to_vec())), - ])); + let (yaml_keys, ext_keys) = map_config(&[("host", "DB_HOST"), ("port", "DB_PORT")]); + config.insert("yaml_keys".to_string(), yaml_keys); + config.insert("ext_keys".to_string(), ext_keys); let result = load.handle(&config).unwrap(); if let Value::Mapping(m) = result { let host = m.iter().find(|(k, _)| k == b"host").map(|(_, v)| v.clone()); @@ -375,9 +375,9 @@ mod tests { config.insert("client".to_string(), client_config(fixed_bits::CLIENT_DB)); config.insert("table".to_string(), Value::Scalar(table.as_bytes().to_vec())); config.insert("connection".to_string(), Value::Mapping(vec![])); - config.insert("map".to_string(), Value::Mapping( - map.iter().map(|(k, v)| (k.as_bytes().to_vec(), Value::Scalar(v.as_bytes().to_vec()))).collect() - )); + let (yaml_keys, ext_keys) = map_config(map); + config.insert("yaml_keys".to_string(), yaml_keys); + config.insert("ext_keys".to_string(), ext_keys); config } @@ -436,9 +436,9 @@ mod tests { let mut c = HashMap::new(); c.insert("client".to_string(), client_config(fixed_bits::CLIENT_HTTP)); c.insert("url".to_string(), Value::Scalar(url.as_bytes().to_vec())); - c.insert("map".to_string(), Value::Mapping(vec![ - (b"status".to_vec(), Value::Scalar(b"status".to_vec())), - ])); + let (yaml_keys, ext_keys) = map_config(&[("status", "status")]); + c.insert("yaml_keys".to_string(), yaml_keys); + c.insert("ext_keys".to_string(), ext_keys); c } diff --git a/src/state.rs b/src/state.rs index bb29e47..7d389ce 100644 --- a/src/state.rs +++ b/src/state.rs @@ -188,7 +188,14 @@ impl State { let mut config = HashMap::new(); for (key, cv) in entries { - if let Some(v) = self.resolve_config_value(cv)? { + if key == "map" { + if let ConfigValue::Map(pairs) = cv { + let yaml_keys = Value::Sequence(pairs.iter().map(|(k, _)| Value::Scalar(k.as_bytes().to_vec())).collect()); + let ext_keys = Value::Sequence(pairs.into_iter().map(|(_, v)| Value::Scalar(v.into_bytes())).collect()); + config.insert("yaml_keys".into(), yaml_keys); + config.insert("ext_keys".into(), ext_keys); + } + } else if let Some(v) = self.resolve_config_value(cv)? { config.insert(key, v); } } diff --git a/src/store.rs b/src/store.rs index 9cdbcc4..4686fca 100644 --- a/src/store.rs +++ b/src/store.rs @@ -58,10 +58,9 @@ impl Store { fixed_bits::CLIENT_HTTP => { let http = self.http.as_deref()?; let url = scalar_str(store_config, "url")?; - let ext_keys = split_ext_keys(store_config)?; + let (yaml_keys, ext_keys) = get_map_keys(store_config)?; let headers = headers_list(store_config); let values = http.get(url, &ext_keys, headers.as_deref())?; - let yaml_keys = split_yaml_keys(store_config)?; Some(zip_to_mapping(yaml_keys, values)) } fixed_bits::CLIENT_FILE => { @@ -192,20 +191,16 @@ fn headers_list(config: &HashMap) -> Option, Vec } } -fn split_yaml_keys(config: &HashMap) -> Option>> { - match config.get("map") { - Some(Value::Mapping(m)) => Some(m.iter().map(|(k, _)| k.clone()).collect()), - _ => None, - } -} - -fn split_ext_keys(config: &HashMap) -> Option>> { - match config.get("map") { - Some(Value::Mapping(m)) => Some( - m.iter().filter_map(|(_, v)| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect() - ), - _ => None, - } +fn get_map_keys(config: &HashMap) -> Option<(Vec>, Vec>)> { + let yaml_keys = match config.get("yaml_keys") { + Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), + _ => return None, + }; + let ext_keys = match config.get("ext_keys") { + Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), + _ => return None, + }; + Some((yaml_keys, ext_keys)) } fn zip_to_mapping(yaml_keys: Vec>, values: Vec) -> Value { @@ -425,9 +420,8 @@ mod tests { let mut c = HashMap::new(); c.insert("client".to_string(), client_config(fixed_bits::CLIENT_HTTP)); c.insert("url".to_string(), Value::Scalar(url.as_bytes().to_vec())); - c.insert("map".to_string(), Value::Mapping(vec![ - (b"status".to_vec(), Value::Scalar(b"status".to_vec())), - ])); + c.insert("yaml_keys".to_string(), Value::Sequence(vec![Value::Scalar(b"status".to_vec())])); + c.insert("ext_keys".to_string(), Value::Sequence(vec![Value::Scalar(b"status".to_vec())])); c } From bd830d9d2b915df96543188512364f3529a366d7 Mon Sep 17 00:00:00 2001 From: Andyou Date: Tue, 31 Mar 2026 08:03:26 +0900 Subject: [PATCH 03/41] update --- src/core/fixed_bits.rs | 16 +++- src/core/manifest.rs | 175 +++++++++++++++++++++++++++-------------- src/core/pool.rs | 5 ++ src/state.rs | 60 +++++++------- 4 files changed, 165 insertions(+), 91 deletions(-) diff --git a/src/core/fixed_bits.rs b/src/core/fixed_bits.rs index d937605..4ef3bf4 100644 --- a/src/core/fixed_bits.rs +++ b/src/core/fixed_bits.rs @@ -1,8 +1,17 @@ -// fixed bits record for intern pools +// - keyword: a string used as a key in DSL +// - key: fixed bits of flags, reserved value or keyword index +// - path: keyword indices +// - value: indices composed of +// - arg +// // note: // - Value 0 means null in each field. // key record (64 bits) +// note: +// - is_path: is child keys of "_load.map" or has "{}" (placeholder) +// - has_children: has multiple child keys +// - is_leaf: has value(= has no child keys) // // | category | field | bits | offset | // |-------------|---------------|------|--------| @@ -17,7 +26,10 @@ // | child index | child index | 16 | 14 | // | padding | - | 14 | 0 | -// value record (128 bits, [u64; 2]) +// value record (128 bits) +// note: +// - is_template: contains both static and placeholder (handled as string concat) +// - is_path: is a segment of placeholder (Value parser removes "{}") // // | category | field | bits | offset | // |----------|---------------|------|--------| diff --git a/src/core/manifest.rs b/src/core/manifest.rs index e600239..65703eb 100644 --- a/src/core/manifest.rs +++ b/src/core/manifest.rs @@ -8,15 +8,25 @@ use super::fixed_bits; use super::pool::DynamicPool; use super::parser::ParsedManifest; +/// A single token in a template value. +#[derive(Debug)] +pub enum TemplateToken { + /// Literal byte sequence. + Literal(Vec), + /// A path placeholder — intern-index sequence (file segment + field segments). + Path(Vec), +} + /// A resolved or unresolved config value produced by `build_config`. -/// State layer is responsible for resolving `Placeholder` variants via `State::get()`. +/// State layer is responsible for resolving `Path` and `Template` variants. #[derive(Debug)] pub enum ConfigValue { /// Static string value (no placeholder resolution needed). Str(String), - /// A placeholder path that must be resolved via State::get(). - /// Used for both scalar placeholders and object-valued placeholders (e.g. connection). - Placeholder(String), + /// A single path reference — intern-index sequence to resolve via State. + Path(Vec), + /// A mixed literal+path template — tokens to resolve and concatenate. + Template(Vec), /// A map of (yaml_key → db_column) pairs. Map(Vec<(String, String)>), /// Numeric client id. @@ -290,63 +300,103 @@ impl Manifest { } /// Decodes a value record into a ConfigValue. - /// If the value is a placeholder path, returns Placeholder(path_string). - /// If it's a template or static string, returns Str(string) or Placeholder for single-path tokens. + /// - Single path token (non-template) → `Path(Vec)` (intern-index segments) + /// - Mixed literal+path (template) → `Template(Vec)` + /// - Pure literal (no paths) → `Str(String)` pub fn decode_value(&self, value_idx: u16) -> Option { + const TOKEN_OFFSETS: [(u32, u32); 6] = [ + (fixed_bits::V_OFFSET_T0_IS_PATH, fixed_bits::V_OFFSET_T0_DYNAMIC), + (fixed_bits::V_OFFSET_T1_IS_PATH, fixed_bits::V_OFFSET_T1_DYNAMIC), + (fixed_bits::V_OFFSET_T2_IS_PATH, fixed_bits::V_OFFSET_T2_DYNAMIC), + (fixed_bits::V_OFFSET_T3_IS_PATH, fixed_bits::V_OFFSET_T3_DYNAMIC), + (fixed_bits::V_OFFSET_T4_IS_PATH, fixed_bits::V_OFFSET_T4_DYNAMIC), + (fixed_bits::V_OFFSET_T5_IS_PATH, fixed_bits::V_OFFSET_T5_DYNAMIC), + ]; + let vo = self.values.get(value_idx as usize).copied()?; let is_template = fixed_bits::get(vo[0], fixed_bits::V_OFFSET_IS_TEMPLATE, fixed_bits::V_MASK_IS_TEMPLATE) == 1; let is_path0 = fixed_bits::get(vo[0], fixed_bits::V_OFFSET_T0_IS_PATH, fixed_bits::V_MASK_IS_PATH) == 1; let dyn_idx0 = fixed_bits::get(vo[0], fixed_bits::V_OFFSET_T0_DYNAMIC, fixed_bits::V_MASK_DYNAMIC) as u16; - // single pure placeholder (non-template, is_path) → Placeholder + // single pure path reference (non-template) → Path if is_path0 && dyn_idx0 != 0 && !is_template { - let path = self.resolve_path(dyn_idx0)?; - return Some(ConfigValue::Placeholder(path)); + let segs = self.path_map.get(dyn_idx0 as usize)?.clone(); + return Some(ConfigValue::Path(segs)); + } + + // template: collect tokens as TemplateToken list + if is_template { + let mut tokens = Vec::new(); + for (i, (off_is_path, off_dynamic)) in TOKEN_OFFSETS.iter().enumerate() { + let word = if i < 3 { 0 } else { 1 }; + let is_path = fixed_bits::get(vo[word], *off_is_path, fixed_bits::V_MASK_IS_PATH) == 1; + let dyn_idx = fixed_bits::get(vo[word], *off_dynamic, fixed_bits::V_MASK_DYNAMIC) as u16; + if dyn_idx == 0 { break; } + if is_path { + let segs = self.path_map.get(dyn_idx as usize)?.clone(); + tokens.push(TemplateToken::Path(segs)); + } else { + let b = self.dynamic.get(dyn_idx)?.to_vec(); + tokens.push(TemplateToken::Literal(b)); + } + } + return Some(ConfigValue::Template(tokens)); } - // template or static: collect all tokens - Some(ConfigValue::Str(self.decode_value_tokens(vo)?)) + // pure literal + let b = self.dynamic.get(dyn_idx0)?.to_vec(); + Some(ConfigValue::Str(String::from_utf8_lossy(&b).into_owned())) } - /// Decodes all tokens of a value record into a raw string, - /// embedding placeholder paths as `${path}` so the caller can resolve them. - pub fn decode_value_tokens(&self, vo: [u64; 2]) -> Option { - const TOKEN_OFFSETS: [(u32, u32); 6] = [ - (fixed_bits::V_OFFSET_T0_IS_PATH, fixed_bits::V_OFFSET_T0_DYNAMIC), - (fixed_bits::V_OFFSET_T1_IS_PATH, fixed_bits::V_OFFSET_T1_DYNAMIC), - (fixed_bits::V_OFFSET_T2_IS_PATH, fixed_bits::V_OFFSET_T2_DYNAMIC), - (fixed_bits::V_OFFSET_T3_IS_PATH, fixed_bits::V_OFFSET_T3_DYNAMIC), - (fixed_bits::V_OFFSET_T4_IS_PATH, fixed_bits::V_OFFSET_T4_DYNAMIC), - (fixed_bits::V_OFFSET_T5_IS_PATH, fixed_bits::V_OFFSET_T5_DYNAMIC), - ]; + /// Reconstructs a dot-joined key string from intern-index segments. + /// Returns an error string if any segment index is invalid. + pub fn segs_to_key(&self, segs: &[u16]) -> Result { + let mut parts = Vec::with_capacity(segs.len()); + for &s in segs { + let b = self.dynamic.get(s) + .ok_or_else(|| crate::ports::provided::StateError::KeyNotFound(format!("invalid segment index {}", s)))?; + parts.push(String::from_utf8_lossy(b).into_owned()); + } + Ok(parts.join(".")) + } - let mut result = String::new(); - for (i, (off_is_path, off_dynamic)) in TOKEN_OFFSETS.iter().enumerate() { - let word = if i < 3 { 0 } else { 1 }; - let is_path = fixed_bits::get(vo[word], *off_is_path, fixed_bits::V_MASK_IS_PATH) == 1; - let dyn_idx = fixed_bits::get(vo[word], *off_dynamic, fixed_bits::V_MASK_DYNAMIC) as u16; - if dyn_idx == 0 { break; } - - if is_path { - let path = self.resolve_path(dyn_idx)?; - result.push_str("${"); - result.push_str(&path); - result.push('}'); - } else { - let b = self.dynamic.get(dyn_idx)?; - result.push_str(&String::from_utf8_lossy(b)); - } + /// Finds a field-key record by intern-index segment list. + /// `segs[0]` must be the file name segment, `segs[1..]` are the field path. + pub fn find_by_segs(&self, segs: &[u16]) -> Option { + if segs.is_empty() { return None; } + let file_name = self.dynamic.get(segs[0])?; + let file_str = core::str::from_utf8(file_name).ok()?; + let file_idx = self.files.get(file_str)?.file_key_idx; + if segs.len() == 1 { + return Some(file_idx); } - Some(result) + let file_record = self.keys.get(file_idx as usize).copied()?; + let top_level = self.children_of(file_record); + self.find_in_by_segs(&segs[1..], &top_level) } - /// Resolves a path_map index to a dot-joined path string. - fn resolve_path(&self, path_map_idx: u16) -> Option { - let segs = self.path_map.get(path_map_idx as usize)?; - let parts: Vec<&str> = segs.iter() - .filter_map(|&s| self.dynamic.get(s).and_then(|b| core::str::from_utf8(b).ok())) - .collect(); - Some(parts.join(".")) + fn find_in_by_segs(&self, segs: &[u16], candidates: &[u16]) -> Option { + let target_idx = segs[0]; + let rest = &segs[1..]; + for &idx in candidates { + let record = self.keys.get(idx as usize).copied()?; + if fixed_bits::get(record, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT) != fixed_bits::ROOT_NULL { + continue; + } + let dyn_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; + if dyn_idx != target_idx { + continue; + } + if rest.is_empty() { + return Some(idx); + } + let next = self.children_of(record); + if next.is_empty() { + return None; + } + return self.find_in_by_segs(rest, &next); + } + None } } @@ -555,12 +605,12 @@ mod tests { } #[test] - fn test_build_config_connection_is_placeholder() { + fn test_build_config_connection_is_path() { let m = cache_manifest(); let meta = m.get_meta("cache", "user"); let entries = m.build_config(meta.load.unwrap()).unwrap(); let conn = entries.iter().find(|(k, _)| k == "connection"); - assert!(matches!(conn, Some((_, ConfigValue::Placeholder(_))))); + assert!(matches!(conn, Some((_, ConfigValue::Path(_))))); } #[test] @@ -576,40 +626,43 @@ mod tests { } #[test] - fn test_build_config_key_with_template_is_str() { + fn test_build_config_key_with_template_is_template() { let m = cache_manifest(); let meta = m.get_meta("cache", "user"); let entries = m.build_config(meta.store.unwrap()).unwrap(); let key = entries.iter().find(|(k, _)| k == "key"); - assert!(matches!(key, Some((_, ConfigValue::Str(_))))); - if let Some((_, ConfigValue::Str(s))) = key { - assert!(s.contains("${")); - } + assert!(matches!(key, Some((_, ConfigValue::Template(_))))); } // --- decode_value --- #[test] - fn test_decode_value_single_placeholder() { - // connection: ${connection.tenant} → Placeholder + fn test_decode_value_single_path() { + // connection: ${connection.tenant} → Path with segments ["connection", "tenant"] let m = cache_manifest(); let meta = m.get_meta("cache", "user"); let entries = m.build_config(meta.load.unwrap()).unwrap(); let conn = entries.iter().find(|(k, _)| k == "connection"); - assert!(matches!(conn, Some((_, ConfigValue::Placeholder(p))) if p == "connection.tenant")); + if let Some((_, ConfigValue::Path(segs))) = conn { + let key = m.segs_to_key(segs).unwrap(); + assert_eq!(key, "connection.tenant"); + } else { + panic!("expected Path"); + } } #[test] - fn test_decode_value_template_embeds_placeholder() { - // key: "user:${session.sso_user_id}" → Str containing ${...} + fn test_decode_value_template_is_template_variant() { + // key: "user:${session.sso_user_id}" → Template with Literal + Path tokens let m = cache_manifest(); let meta = m.get_meta("cache", "user"); let entries = m.build_config(meta.store.unwrap()).unwrap(); let key = entries.iter().find(|(k, _)| k == "key"); - if let Some((_, ConfigValue::Str(s))) = key { - assert!(s.contains("${session.sso_user_id}")); + if let Some((_, ConfigValue::Template(tokens))) = key { + assert!(tokens.iter().any(|t| matches!(t, TemplateToken::Literal(_)))); + assert!(tokens.iter().any(|t| matches!(t, TemplateToken::Path(_)))); } else { - panic!("expected Str"); + panic!("expected Template"); } } } diff --git a/src/core/pool.rs b/src/core/pool.rs index 23a9b42..2f8939b 100644 --- a/src/core/pool.rs +++ b/src/core/pool.rs @@ -26,6 +26,11 @@ impl DynamicPool { pub fn get(&self, index: u16) -> Option<&[u8]> { self.slots.get(index as usize).map(|s| s.as_slice()) } + + /// Returns the index of an already-interned byte slice, or None if not present. + pub fn find(&self, s: &[u8]) -> Option { + self.slots.iter().position(|x| x.as_slice() == s).map(|i| i as u16) + } } impl Default for DynamicPool { diff --git a/src/state.rs b/src/state.rs index 7d389ce..7e9bb06 100644 --- a/src/state.rs +++ b/src/state.rs @@ -1,7 +1,7 @@ use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use crate::core::fixed_bits; -use crate::core::manifest::{Manifest, ConfigValue}; +use crate::core::manifest::{Manifest, ConfigValue, TemplateToken}; use crate::core::parser::{Value as ParseValue, parse}; use crate::ports::provided::{ManifestError, StateError, Value}; use crate::ports::required::FileClient; @@ -140,34 +140,39 @@ impl State { self.state_keys.iter().skip(1).position(|&k| k == key_idx).map(|p| p + 1) } - fn resolve_template(&mut self, template: &str) -> Result, StateError> { - let mut result = String::new(); - let mut remaining = template; - while let Some(start) = remaining.find("${") { - result.push_str(&remaining[..start]); - remaining = &remaining[start + 2..]; - let end = match remaining.find('}') { - Some(e) => e, - None => return Ok(None), - }; - let path = &remaining[..end]; - remaining = &remaining[end + 1..]; - let resolved = match self.get(path)? { - Some(Value::Scalar(b)) => String::from_utf8_lossy(&b).into_owned(), - _ => return Ok(None), - }; - result.push_str(&resolved); + fn get_by_path(&mut self, segs: &[u16]) -> Result, StateError> { + let key = self.manifest.segs_to_key(segs)?; + if self.called_keys.len() >= self.max_recursion || self.called_keys.contains(&key) { + return Err(StateError::RecursionLimitExceeded); } - result.push_str(remaining); - Ok(Some(result)) + let key_idx = match self.manifest.find_by_segs(segs) { + Some(idx) => idx, + None => return Err(StateError::KeyNotFound(key)), + }; + self.called_keys.insert(key.clone()); + let result = self.get_core(key_idx, &key); + self.called_keys.remove(&key); + result } fn resolve_config_value(&mut self, cv: ConfigValue) -> Result, StateError> { match cv { ConfigValue::Client(c) => Ok(Some(Value::Scalar(c.to_le_bytes().to_vec()))), - ConfigValue::Placeholder(path) => self.get(&path), - ConfigValue::Str(s) if s.contains("${") => { - Ok(self.resolve_template(&s)?.map(|s| Value::Scalar(s.into_bytes()))) + ConfigValue::Path(segs) => self.get_by_path(&segs), + ConfigValue::Template(tokens) => { + let mut result = Vec::new(); + for token in tokens { + match token { + TemplateToken::Literal(b) => result.extend_from_slice(&b), + TemplateToken::Path(segs) => { + match self.get_by_path(&segs)? { + Some(Value::Scalar(b)) => result.extend_from_slice(&b), + _ => return Ok(None), + } + } + } + } + Ok(Some(Value::Scalar(result))) } ConfigValue::Str(s) => Ok(Some(Value::Scalar(s.into_bytes()))), ConfigValue::Map(pairs) => { @@ -289,15 +294,14 @@ impl State { // CLIENT_STATE: extract key path directly from build_config without resolving if has_state_client { if let Some(load_idx) = meta.load { - let state_key = self.manifest.build_config(load_idx) + let state_key_segs = self.manifest.build_config(load_idx) .and_then(|entries| entries.into_iter().find(|(k, _)| k == "key")) .and_then(|(_, cv)| match cv { - ConfigValue::Placeholder(p) => Some(p), - ConfigValue::Str(s) => Some(s), + ConfigValue::Path(segs) => Some(segs), _ => None, }); - let result = match state_key { - Some(k) => self.get(&k), + let result = match state_key_segs { + Some(segs) => self.get_by_path(&segs), None => Ok(None), }; self.called_keys.remove(key); From 948aa469c612c6e7f0666b482fa977e6853e599b Mon Sep 17 00:00:00 2001 From: Andyou Date: Sat, 4 Apr 2026 10:20:15 +0900 Subject: [PATCH 04/41] update license to apache-2.0 --- LICENSE | 202 +++++++++++++++++++++++++++++++++++++++++----- src/core/codec.rs | 4 - 2 files changed, 181 insertions(+), 25 deletions(-) diff --git a/LICENSE b/LICENSE index d31ab9b..e815075 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,181 @@ -MIT License - -Copyright (c) 2026 Andyou - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +--- +Copyright 2026 Andyou +Licensed under the Apache License, Version 2.0 (the "License"); \ No newline at end of file diff --git a/src/core/codec.rs b/src/core/codec.rs index 04e1c4f..f255479 100644 --- a/src/core/codec.rs +++ b/src/core/codec.rs @@ -13,7 +13,6 @@ pub fn root_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::ROOT_NULL) } - pub const CLIENT_NAMES: &[(&[u8], u64)] = &[ (b"State", fixed_bits::CLIENT_STATE), (b"InMemory", fixed_bits::CLIENT_IN_MEMORY), @@ -31,7 +30,6 @@ pub fn client_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::CLIENT_NULL) } - pub const PROP_NAMES: &[(&[u8], u64)] = &[ (b"type", fixed_bits::PROP_TYPE), (b"key", fixed_bits::PROP_KEY), @@ -51,7 +49,6 @@ pub fn prop_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::PROP_NULL) } - pub const TYPE_NAMES: &[(&[u8], u64)] = &[ (b"integer", fixed_bits::TYPE_I64), (b"string", fixed_bits::TYPE_UTF8), @@ -67,7 +64,6 @@ pub fn type_encode(s: &[u8]) -> u64 { .unwrap_or(fixed_bits::TYPE_NULL) } - #[cfg(test)] mod tests { use super::*; From 828cda35c43beece2eac4babd27645876d7bf664 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sat, 4 Apr 2026 18:15:22 +0900 Subject: [PATCH 05/41] update README --- README.md | 61 +++++++++++---------------- docs/ja/README.md | 28 ++++-------- examples/manifest/tenant.yml | 82 ++++++++++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+), 56 deletions(-) create mode 100644 examples/manifest/tenant.yml diff --git a/README.md b/README.md index 19d5e2c..5764a4b 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,6 @@ # state-engine -Declarative state data management system for a process. -Structures state data on process and keeps it syncable using your store clients. -It behaves as described in YAML DSL. - -- Automates complex state lifecycles through developer-defined YAML manifests. -- Enables multi-tenant DB apps without junction tables. -- Built on a reimagined web architecture (see [## Background](#Background)). +Data labels used by a web system's runtime within a single processing cycle should have their session-context-dependent variations resolved outside of code (e.g., data should be accessible as system_context["session.user"] rather than users[session[user-id]]). state-engine processes for each label, the data retrieval methods that application developers define as a DSL in YAML files. This allows, for example, server/client differences in system_context["session.user.preference"] and multi-tenant differences in context[session.user.tenant] to be resolved appropriately through the data retrieval methods defined in YAML. This OSS is positioned as the foundational technology for the reconstructed web system architecture described in ## background. - [also README(patch translation for ja-JP )](./docs/ja/README.md) @@ -29,23 +23,19 @@ It behaves as described in YAML DSL. **Before:** ```Rust // Manual cache management -let cache_key = format!("user:{}", id); -let user = redis.get(&cache_key).or_else(|| { +let session_key = format!("user:{}", id); +let user = redis.get(&session_key).or_else(|| { let user = db.query("SELECT * FROM users WHERE id=?", id)?; - redis.set(&cache_key, &user, 3600); + redis.set(&session_key, &user, 3600); Some(user) })?; ``` **After:** ```Rust -let user = state.get("cache.user")?; +let user = state.get("session.user")?; ``` -- ✅ Multi-tenant DB without junction tables -- ✅ Automatic KVS/DB synchronization -- ✅ No manual cache invalidation - ## Installation ```toml @@ -71,7 +61,6 @@ session: client: InMemory key: "request-header-user-key" - user: _store: client: KVS @@ -98,11 +87,11 @@ user: | Interface | expected store | fn | sample | |-----------|----------------|-----|--------| | `InMemoryClient` | Local Process Memory | `get()` / `set()` / `delete()` | [InMemoryAdapter](./examples/adapters/in_memory.rs) | +| `FileClient` | File I/O | as above | [DefaultFileClient](./src/ports/default.rs) | | `EnvClient` | Environment Variables | as above | [EnvAdapter](./examples/adapters/env_client.rs) | | `KVSClient` | Key-Vlue Store | as above | [KVSAdapter](./examples/adapters/kvs_client.rs) | | `DbClient` | SQL Database | as above | [DbAdapter](./examples/adapters/db_client.rs) | | `HttpClient` | Http Request | as above | [HttpAdapter](./examples/adapters/http_client.rs) | -| `FileClient` | File I/O | as above | [DefaultFileClient](./src/ports/default.rs) | - FileClient.get is always used by State to read manifest YAMLs. - It's not essential to implement all *Client. @@ -144,7 +133,7 @@ Full working example: [examples/app/src/main.rs](./examples/app/src/main.rs) ┌─────────────────────────────────────┐ │ Required Ports (App Adapters) │ ├─────────────────────────────────────┤ -│ InMemory / KVS / DB / HTTP / File │ +│ InMemory / File / KVS / DB / HTTP │ └─────────────────────────────────────┘ ▲ │ implement @@ -156,10 +145,9 @@ see for details [Architecture.md](./docs/en/Architecture.md) ## tree ``` -./ - README.md +state-egnine/ + README.md # this Cargo.toml - docs/ # guides en/ Architecture.md @@ -168,13 +156,12 @@ see for details [Architecture.md](./docs/en/Architecture.md) README.md Architecture.md YAML-guide.md - src/ examples/ manifest/ # manifest YAML examples - connection.yml # sample 1 - cache.yml # sample 2 - session.yml # sample 3 + connection.yml + cache.yml + session.yml adapters/ app/ docker-compose.yml @@ -182,17 +169,17 @@ see for details [Architecture.md](./docs/en/Architecture.md) Dockerfile db/ src/ - main.rs - adapters.rs ``` -## tests +## test -unit tests, intergeration tests on example app (docker compose) passed +Unit tests and integration tests on docker compose ```bash +# unit test cargo test --features=logging -- --nocapture +# integration tests cd examples/app && ./run.sh ``` @@ -200,15 +187,17 @@ cd examples/app && ./run.sh **reimagined web architecture** +By substituting a portion of human activities with data processing on network-capable computers, we gain benefits such as assurance of verifiability and reduction of physical constraints. The mechanism that realizes this — receiving input as electrical signals through hardware, processing it, and outputting to designated hardware — is called a web system. To realize a web system, it is first necessary to define the conceptual framework it requires in both human language and the language of computer. + ```yaml -computer: "A network-capable node in the system." - orchestrator: "A computer responsible for internal system coordination and maintenance. (optional)" - server: "A computer that serves human users." - database: "A server that persists data without an inherent expiration and accepts CRUD operations." - terminal: "A server that provides a direct human-facing interface." - conductor: "A server that communicates independently with both a database and terminals, and keeps state data syncable between them. (optional)" +# computers structure of web system +computer: "Network-capable nodes in the system." + server: "Computers that serves human users." + fixture: "Servers that provides continuous network." + terminal: "Servers that provides human interfaces." + orchestrator: "Computers responsible for maintenance of servers. (optional)" ``` ## License -MIT \ No newline at end of file +Apache-2.0 \ No newline at end of file diff --git a/docs/ja/README.md b/docs/ja/README.md index ced71d9..cb6436a 100644 --- a/docs/ja/README.md +++ b/docs/ja/README.md @@ -1,32 +1,20 @@ # state-engine -プロセスのための宣言的なステートデータ管理システムです。 -プロセス上でステートデータを構造化し、開発者が定義するストアAPIを使って同期可能な状態を保ちます。 -YAML DSLで記述された定義に従って振る舞います。 - -- YAML manifestによる複雑なステートライフサイクルの自動化 -- 中間表を必要としないマルチテナントDbアプリケーション -- [## background](#background)記載の再定義されたwebアーキテクチャに基づいて構築 +webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。state-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このOSSは、[## background](#background)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 ## background **webシステムの構成再定義** -- computer: (ネットワーク通信機能を要する)コンピューター。 -- server: 人間(ユーザー)に奉仕するcomputer -- orchestrator: webシステムを構成するcomputerのうち、システム内部の維持を管理するもの(optional) -- database: 明示的に削除されるまでデータを維持し、terminalやconductorにCRUDを受け付けるserver -- terminal: 人間が直接触るインターフェースを提供するserver. 「端末」 -- conductor: databaseとterminalに対してそれぞれ通信し、二者の同期状態を維持するserver(optional) +人々の営みの動作の一部を、ネットワーク機能を持ったコンピューターのデータ処理で代替えすることで、その間の検証可能性の保証と、物理的制約の緩和などの恩恵を受けることができる。これを実現する、ハードウェアを通して電気信号として入力を受け取り、処理後、所定のハードウェア群に出力する仕組みのことを、webシステムと呼ぶ。webシステムの実現には、第一に、システムに必要な概念体系を、人間言語とコンピューターのビット列それぞれで定義することが必要である。 ```yaml -# terms relationship -computer: - orchestrator: - server: - database: - terminal: - conductor: +# computers structure of web system +computer: "(ネットワーク通信機能を要する)コンピューター" + server: "人間(ユーザー・開発者)に処理能力を提供する" + fixture: "継続的な待機により、ネットワーク機能を提供する" + terminal: "人間とのインターフェースを提供する。端末。" + orchestrator: "サーバー群の維持を管理する(optional)" ``` ## Architecture diff --git a/examples/manifest/tenant.yml b/examples/manifest/tenant.yml new file mode 100644 index 0000000..7b4a6f7 --- /dev/null +++ b/examples/manifest/tenant.yml @@ -0,0 +1,82 @@ +session: + user: + _load: + client: tenant_db + key: users.id.${session.user.id} + id: + _load: + client: http_request + key: authorization.user + password_hash: + _load: + client: tenant_db + key: users.id.${session.user.id} + secondary_key: password_hash + tenant: + _load: + client: common_db + key: tenants.id.${session.user.tenant.id} + id: + _load: + client: http_request + key: authorization.tenant + code: + _load: + secondary_key: code + _store: + client: redis + name: + _load: + secondary_key: name + email: + _load: + secondary_key: email + locale: + _load: + secondary_key: locale + allowed_ips: + _load: + secondary_key: allowed_ips + is_manager: + _load: + secondary_key: is_manager + name: + _load: + secondary_key: name + email: + _load: + secondary_key: email + preference: + color-mode: + _load: + secondary_key: preference.color-mode + +common_db: + _load: + client: env + host: + _load: + key: host + port: + _load: + key: port + driver: postgres + charset: UTF8 + +tenant_db: + _load: + client:common_db + key: tenants.id.${session.user.tenant.id} + + host: + _load: + secondary_key: host + port: + _load: + secondary_key: port + username: + _load: + secondary_key: username + password: + _load: + secondary_key: password \ No newline at end of file From 5977c12e3429c4243495dadc9d63e25cc3985d2e Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 00:46:36 +0900 Subject: [PATCH 06/41] underway update structure --- Cargo.toml | 3 +- README.md | 177 ++++++------- docs/{ja => }/Architecture.md | 318 ++++++++++++++++++++++++ docs/{en/YAML-guide.md => DSL_guide.md} | 170 ++++++++++++- docs/en/Architecture.md | 315 ----------------------- docs/ja/README.md | 26 -- docs/ja/YAML-guide.md | 165 ------------ examples/implements.rs | 0 examples/{manifest => }/tenant.yml | 0 9 files changed, 566 insertions(+), 608 deletions(-) rename docs/{ja => }/Architecture.md (54%) rename docs/{en/YAML-guide.md => DSL_guide.md} (51%) delete mode 100644 docs/en/Architecture.md delete mode 100644 docs/ja/README.md delete mode 100644 docs/ja/YAML-guide.md create mode 100644 examples/implements.rs rename examples/{manifest => }/tenant.yml (100%) diff --git a/Cargo.toml b/Cargo.toml index 0f560f2..a385779 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,8 +14,8 @@ categories = ["caching", "data-structures", "web-programming"] exclude = ["examples/*"] [dependencies] -serde_yaml_ng = "0.10" log = { version = "0.4", optional = true } +serde_yaml_ng = { version = "0.10", optional = true } [dev-dependencies] env_logger = "0.11" @@ -24,3 +24,4 @@ ctor = "0.2" [features] default = [] logging = ["log"] +builder = ["serde_yaml_ng"] diff --git a/README.md b/README.md index 5764a4b..c2edc14 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,16 @@ # state-engine -Data labels used by a web system's runtime within a single processing cycle should have their session-context-dependent variations resolved outside of code (e.g., data should be accessible as system_context["session.user"] rather than users[session[user-id]]). state-engine processes for each label, the data retrieval methods that application developers define as a DSL in YAML files. This allows, for example, server/client differences in system_context["session.user.preference"] and multi-tenant differences in context[session.user.tenant] to be resolved appropriately through the data retrieval methods defined in YAML. This OSS is positioned as the foundational technology for the reconstructed web system architecture described in ## background. +Data labels used by a web system's runtime within a single processing cycle should have their session-context-dependent variations resolved outside of code (e.g., data should be accessible as system_context["session.user"] rather than users[session[user-id]]). state-engine processes for each label, the data retrieval methods that application developers define as a DSL in YAML files. This allows, for example, server/client differences in system_context["session.user.preference"] and multi-tenant differences in context[session.user.tenant] to be resolved appropriately through the data retrieval methods defined in YAML. This OSS is positioned as the foundational technology for the reconstructed web system architecture described in [## background](#background). -- [also README(patch translation for ja-JP )](./docs/ja/README.md) +- [original text(ja)](#original-text-ja) ## Version -| Version | Status | Date | description | -|---------|---------|------|-------------| -| 0.1 | Released | 2026-2-12 | initial | -| 0.1.5 | Current | 2026-3-21 | improve #43 | -| 0.1.6 | Scheduled | 2026-4-5 | improve #49 #50 | +| Version | Status | Date | Description | +|---------|-----------|-----------|-------------| +| 0.1 | Released | 2026-2-12 | initial | +| 0.1.5 | Current | 2026-3-21 | improve #43 | +| 0.1.6 | Scheduled | 2026-4-5 | improve #49 #50 | ## Provided Functions @@ -49,95 +49,64 @@ state-engine = "0.1" 1. Write a yaml file. ```yaml -# manifest/example.yml session: - user-key: - _state: - type: integer - _store: - client: InMemory - key: "request-attributes-user-key" - _load: - client: InMemory - key: "request-header-user-key" - -user: - _store: - client: KVS - key: "user:${example.session.user-key}" - _load: - client: Db - table: "users" - where: "id=${example.session.user-key}" - map: - name: "name" - name: - _state: - type: string + user: + id: + _load: + client: Memory + key: "request.authorization.user.id" + name: + _load: + client: Db + key: "users.${session.user.id}.name" ``` -| case | sample | -|------|--------| -| cache in KVS | [cache.yml](./examples/manifest/cache.yml) | -| database connection config | [connection.yml](./examples/manifest/connection.yml) | -| request scope | [session.yml](./examples/manifest/session.yml) | +| case | example | +|-------------------|--------| +| multi-tenant app | [tenant.yml](./examples/manifest.yml) | -2. Implement some Required Ports for your stores. +2. Implement some required ports for your stores. -| Interface | expected store | fn | sample | -|-----------|----------------|-----|--------| -| `InMemoryClient` | Local Process Memory | `get()` / `set()` / `delete()` | [InMemoryAdapter](./examples/adapters/in_memory.rs) | -| `FileClient` | File I/O | as above | [DefaultFileClient](./src/ports/default.rs) | -| `EnvClient` | Environment Variables | as above | [EnvAdapter](./examples/adapters/env_client.rs) | -| `KVSClient` | Key-Vlue Store | as above | [KVSAdapter](./examples/adapters/kvs_client.rs) | -| `DbClient` | SQL Database | as above | [DbAdapter](./examples/adapters/db_client.rs) | -| `HttpClient` | Http Request | as above | [HttpAdapter](./examples/adapters/http_client.rs) | +| Trait | fn | example | +|---------------|----------------------------|---------| +| `StoreClient` | `get()` `set()` `delete()` | [implements.rs](./examples/implements.rs) | -- FileClient.get is always used by State to read manifest YAMLs. -- It's not essential to implement all *Client. - -3. Initialize State with your adapters and use it. +3. Initialize Manifest, Store Clients and State. ```rust +use state_engine::Manifest; use state_engine::State; use std::sync::Arc; -// Create adapter instances -let in_memory = Arc::new(InMemoryAdapter::new()); -let kvs = Arc::new(KVSAdapter::new()?); -let db = Arc::new(DbAdapter::new()?); +let memory = Arc::new(MemoryImpl::new()); +let db = Arc::new(DbImpl::new()?); + +let manifest = Manifest::new() -// Build State with adapters -let mut state = State::new("./manifest") - .with_in_memory(in_memory) - .with_kvs(kvs) +let mut state = State::new() + .with_memory(memory) .with_db(db); // Use state-engine -let user = state.get("example.user.name")?; +let user_name = state.get("session.user.name")?; ``` -Full working example: [examples/app/src/main.rs](./examples/app/src/main.rs) - ## Architecture ``` - manifestDir/*.yml - │ read via FileClient - ▼ -┌─────────────────────────────────────┐ -│ State (Public API) │ -└───────┬─────────────────────────────┘ - │ depends on - ▼ -┌─────────────────────────────────────┐ -│ Required Ports (App Adapters) │ -├─────────────────────────────────────┤ -│ InMemory / File / KVS / DB / HTTP │ -└─────────────────────────────────────┘ - ▲ - │ implement - Application +┌─────────────┐ ┌────────────────────────────────┐ +│ DSL YAMLs │------>│ Manifest (app global instance) │ +└─────────────┘compile└───────────┬────────────────────┘ + │ + ▼ +┌─────────────┐ ┌────────────────────────────────┐ +│ Application │<------│ State (request scope instance) │ +└─────────────┘provide└────────────────────────────────┘ + ▲ + │ +┌─────────────┐ ┌───────────┴────────────────────┐ +│ Implements │------>│ Store Clients (Required Ports) │ +└─────────────┘ impl └────────────────────────────────┘ ``` see for details [Architecture.md](./docs/en/Architecture.md) @@ -148,39 +117,26 @@ see for details [Architecture.md](./docs/en/Architecture.md) state-egnine/ README.md # this Cargo.toml - docs/ # guides - en/ - Architecture.md - YAML-guide.md - ja/ - README.md - Architecture.md - YAML-guide.md + docs/ + DSL_guide.md + Architecture.md + src/ + ports/ + examples/ - manifest/ # manifest YAML examples - connection.yml - cache.yml - session.yml - adapters/ + manifest.yml + implements.rs app/ - docker-compose.yml - Cargo.toml - Dockerfile - db/ - src/ ``` -## test +## Test -Unit tests and integration tests on docker compose +Passed unit and integration tests ```bash # unit test cargo test --features=logging -- --nocapture - -# integration tests -cd examples/app && ./run.sh ``` ## Background @@ -200,4 +156,25 @@ computer: "Network-capable nodes in the system." ## License -Apache-2.0 \ No newline at end of file +Apache-2.0 + +--- + +## Original Text (ja) + +webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。state-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このOSSは、[## background](#background)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 + +### 背景 + +**webシステムの構成再定義** + +人々の営みの動作の一部を、ネットワーク機能を持ったコンピューターのデータ処理で代替えすることで、その間の検証可能性の保証と、物理的制約の緩和などの恩恵を受けることができる。これを実現する、ハードウェアを通して電気信号として入力を受け取り、処理後、所定のハードウェア群に出力する仕組みのことを、webシステムと呼ぶ。webシステムの実現には、第一に、システムに必要な概念体系を、人間言語とコンピューターのビット列それぞれで定義することが必要である。 + +```yaml +# computers structure of web system +computer: "(ネットワーク通信機能を要する)コンピューター" + server: "人間(ユーザー・開発者)に処理能力を提供する" + fixture: "継続的な待機により、ネットワーク機能を提供する" + terminal: "人間とのインターフェースを提供する。端末。" + orchestrator: "サーバー群の維持を管理する(optional)" +``` \ No newline at end of file diff --git a/docs/ja/Architecture.md b/docs/Architecture.md similarity index 54% rename from docs/ja/Architecture.md rename to docs/Architecture.md index 876da21..a38809d 100644 --- a/docs/ja/Architecture.md +++ b/docs/Architecture.md @@ -2,6 +2,324 @@ ## index +- provided modules (library provided) + 1. State + +- required modules (library required*) + 1. InMemoryClient + 2. KVSClient + 3. DbClient + 4. EnvClient + 5. HttpClient + 6. FileClient + +- internal modules + 1. core::Manifest + 2. Store + 3. Load + 4. u64(fixed_bits.rs) + 5. Pools & Maps(pool.rs) + 6. parser.rs + 7. LogFormat + +*: *_client impl are not essential, optional modules. + +--- + +## provided modules + +**State** is the sole public API of the library. + +A module performing `get()`/`set()`/`delete()`/`exists()` operations on state data following the `_store`/`_load` blocks defined in manifest YAMLs. `get()` automatically attempts loading on key miss. `set()` does not trigger loading. `delete()` removes the specified key from both store and cache. `exists()` checks key existence without triggering auto-load. It maintains an instance-level cache (`state_values`) separate from persistent stores. + +State owns YAML I/O: it reads manifest files via `FileClient` and parses them into `core::Manifest` on first access. `core::Manifest` is an internal no_std struct that owns all bit-record data and provides decode/find/build_config queries. Relative placeholders in values are qualified to absolute paths at parse time. Metadata (`_store`/`_load`/`_state`) is inherited from parent nodes; child overrides parent. + +## State + +### State::get("filename.node") + +Reference the state represented by the specified node, returning value or collections. + +Returns: `Result, StateError>` + +**Operation flow:** +1. Check `called_keys` (recursion / limit detection) +2. Load manifest file via `FileClient` (first access only) +3. `core::Manifest::find()` → get key_idx +4. **Check `state_values` (by key_idx)** ← Highest priority +5. `core::Manifest::get_meta()` → get MetaIndices +6. If `_load.client == State`: skip store. Otherwise: retrieve from store (KVS/InMemoryClient) +7. On miss, auto-load via `Load::handle()` +8. Return `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` + +**Auto-load:** +- If the state key misses, attempt auto-retrieval via `Load::handle()` +- On error, return `Err(StateError::LoadFailed(LoadError))` + +**Note on _state.type:** +```yaml +tenant_id: + _state: + type: integer # Metadata only - validation/casting not yet implemented +``` + +The `_state.type` field is currently metadata-only and not enforced by State operations. + +--- + +### State::set("filename.node", value, ttl) + +Set a value to the state represented by the specified node. + +Returns: `Result` + +**Behavior:** +- Save to persistent store (KVS/InMemoryClient) +- Also save to `state_values` (instance cache) +- If store is KVS, TTL can be set + +**TTL behavior:** +- `ttl` argument specified → Use specified value +- No `ttl` argument, `_store.ttl` in YAML → Use YAML default +- No `ttl` argument, no `_store.ttl` in YAML → Maintain current value + +--- + +### State::delete("filename.node") + +Delete the {key:value} record represented by the specified node. + +Returns: `Result` + +**Behavior:** +- Delete from persistent store (KVS/InMemoryClient) +- Also delete from `state_values` (instance cache) +- After deletion, the node shows miss + +--- + +### State::exists("filename.node") + +Check if a key exists without triggering auto-load. + +Returns: `Result` + +**Behavior:** +- Check `state_values` (instance cache) first +- Then check persistent store (KVS/InMemoryClient) +- **Does NOT trigger auto-load** (unlike `get()`) +- Returns `Ok(true)` if exists, `Ok(false)` otherwise + +**Comparison with get():** +- `get()`: Returns value, triggers auto-load on miss +- `exists()`: Returns boolean, never triggers auto-load + +--- + +## required modules + +Application must implement the following traits to handle data stores: + +1. **InMemoryClient** + - expected operations: `get()`/`set()`/`delete()` + - arguments: `"key":...` from `_{store,load}.key:...` in Manifest + - expected target: Local process memory + +2. **KVSClient** + - expected operations: `get()`/`set()`/`delete()` + - trait signature: + - `fn get(&self, key: &str) -> Option` + - `fn set(&self, key: &str, value: String, ttl: Option) -> bool` + - `fn delete(&self, key: &str) -> bool` + - arguments: `"key":...` from `_{store,load}.key:...`, `ttl:...` from `_{store,load}.ttl:...`(optional) in Manifest + - expected target: Key-Value Store (Redis, etc.) + - **Important**: KVSClient handles String only (primitive type). State layer performs serialize/deserialize: + - **serialize**: All values → JSON string (preserves type: Number/String/Bool/Null/Array/Object) + - **deserialize**: JSON string → Value (accurately restores type) + +3. **DbClient** + - expected operations: `get()`/`set()`/`delete()` + - trait signature: + - `fn get(&self, connection: &Value, table: &str, columns: &[&str], where_clause: Option<&str>) -> Option>>` + - `fn set(&self, connection: &Value, table: &str, values: &HashMap, where_clause: Option<&str>) -> bool` + - `fn delete(&self, connection: &Value, table: &str, where_clause: Option<&str>) -> bool` + - arguments: `"connection":...`, `"table":...`, `"columns":...` from `_{load}.map.*:...`, `"where_clause":...`(optional) + - only for `_load.client` + +4. **EnvClient** + - expected operations: `get()`/`set()`/`delete()` + - arguments: `"key":...` from `_{load}.map.*:...` in Manifest + - expected target: environment variables + - only for `_load.client` + +5. **HttpClient** + - expected operations: `get()`/`set()`/`delete()` + - trait signature: + - `fn get(&self, url: &str, headers: Option<&HashMap>) -> Option` + - `fn set(&self, url: &str, body: Value, headers: Option<&HashMap>) -> bool` + - `fn delete(&self, url: &str, headers: Option<&HashMap>) -> bool` + - arguments: `"url":...` from `_{store,load}.url:...`, `"headers":...` from `_{store,load}.headers:...` + - expected target: HTTP endpoints + - for both `_store.client` and `_load.client` + +6. **FileClient** + - expected operations: `get()`/`set()`/`delete()` + - trait signature: + - `fn get(&self, key: &str) -> Option` + - `fn set(&self, key: &str, value: String) -> bool` + - `fn delete(&self, key: &str) -> bool` + - arguments: `"key":...` from `_{store,load}.key:...` in Manifest + - expected target: File I/O + - default impl `DefaultFileClient` is built-in (std::fs based) + - for both `_store.client` and `_load.client` + - **always used by State to read manifest YAMLs** + +--- + +## Load::handle() + +When `State::get()` misses a value, retrieve data according to `_load` settings. + +**Client types:** +- `Env` - Load from environment variables +- `Db` - Load from database +- `KVS` - Load from KVS +- `InMemory` - Load from process memory +- `Http` - Load from HTTP endpoint +- `File` - Load from file +- `State` - Reference another State key directly (does not call Load::handle()) + +**Special behavior for State client:** +```yaml +tenant_id: + _load: + client: State + key: ${org_id} # Directly returns State::get("cache.user.org_id") +``` + +When `_load.client: State`, `Load::handle()` is not called; the value of `_load.key` (placeholder already resolved) is returned directly. + +**Design rules:** +- No `_load` → No auto-load, return `Ok(None)` +- No `_load.client` → No auto-load, return `Ok(None)` +- `_load.client: State` → Use `_load.key` value directly +- Other clients → Auto-load via `Load::handle()` + +**Recursion depth limit:** +- `max_recursion = 20` +- `called_keys: HashSet` tracks keys currently being processed +- On limit exceeded or circular key detected: `Err(StateError::RecursionLimitExceeded)` + +--- + +## State::get() Detailed Flow + +``` +1. called_keys check (recursion / limit detection) + ↓ +2. Load manifest file via FileClient (first access only) + ↓ +3. core::Manifest::find() → get key_idx + ↓ +4. ★ Check state_values (by key_idx) ← Highest priority + if find_state_value(key_idx).is_some() { return Ok(Some(value)); } + ↓ +5. core::Manifest::get_meta() → get MetaIndices + ↓ +6. _load.client == State → skip store + otherwise: retrieve from store (KVS/InMemoryClient) + ↓ +7. On miss, auto-load + ├─→ build_config() resolves placeholders + ├─→ Load::handle(config) + │ ├─→ client: Db → DbClient::get() + │ ├─→ client: KVS → KVSClient::get() + │ ├─→ client: Env → EnvClient::get() + │ ├─→ client: InMemory → InMemoryClient::get() + │ ├─→ client: Http → HttpClient::get() + │ └─→ client: File → FileClient::get() + ├─→ Save to persistent store + └─→ Save to state_values + ↓ +8. Return Ok(Some(value)) / Ok(None) / Err(StateError) +``` + +--- + +## state_values (Instance Memory Cache) + +The State struct maintains an instance-level cache (`state_values: StateValueList`) separate from persistent stores (KVS/InMemoryClient). + +**Important:** This is NOT InMemoryClient. It is a variable of the State instance itself. + +**Purpose:** +1. **Speed up duplicate `State::get()` calls within the same request** +2. **Reduce access count to KVS/InMemoryClient** +3. **Avoid duplicate loads** (don't load the same key multiple times) + +**Index:** +- Keyed by `key_idx: u16` — globally unique index in KeyList +- Not keyed by store key string + +**Save timing:** +- On successful retrieval from store or load in `State::get()` +- On `State::set()` + +**Delete timing:** +- On `State::delete()` + +**Lifecycle:** +- State instance created: empty +- During State lifetime: accumulates +- State instance dropped: destroyed (memory released) + +--- + +## Placeholder Resolution Rules + +`${}` paths are **qualified to absolute paths at parse time** — no conversion happens at State runtime. + +**Qualify rule at parse time (`qualify_path()`):** +- Path contains `.` → treated as absolute, used as-is +- No `.` → converted to `filename.ancestors.path` + +**Example (`${tenant_id}` in `cache.yml` under `user._load.where`):** +``` +qualify_path("tenant_id", "cache", ["user"]) +→ "cache.user.tenant_id" +``` + +**Placeholder resolution at State runtime (`resolve_value_to_string()`):** +- Retrieve qualified path from path_map +- Call `State::get(qualified_path)` to get the value + +--- + +## error case + +**ManifestError:** +- `FileNotFound` — manifest file not found in manifest dir +- `AmbiguousFile` — two files with the same name but different extensions (`.yml` and `.yaml`) exist in manifestDir. Manifest ignores extensions (dot-separated paths represent hierarchy), so it cannot distinguish the two. Same-extension duplicates are assumed to be prevented at the OS level. +- `ParseError` — YAML parse failed + +**LoadError:** +- `ClientNotConfigured` — required client (Env/KVS/DB/HTTP/File) is not set on State +- `ConfigMissing(String)` — a required config key (key/url/table/map/connection) is missing in the manifest +- `NotFound(String)` — the client call succeeded but returned no data +- `ParseError(String)` — JSON parse error from client response + +**StoreError:** +- `ClientNotConfigured` — required client (KVS/InMemory/HTTP/File) is not set on State +- `ConfigMissing(String)` — a required config key (key/url/client) is missing in the manifest +- `SerializeError(String)` — JSON serialize error +- `UnsupportedClient(u64)` — unsupported client id in config + +--- + +## Original Text (ja) + +### index + - provided modules (ライブラリ提供モジュール) 1. State diff --git a/docs/en/YAML-guide.md b/docs/DSL_guide.md similarity index 51% rename from docs/en/YAML-guide.md rename to docs/DSL_guide.md index ca8e667..dbc9e9c 100644 --- a/docs/en/YAML-guide.md +++ b/docs/DSL_guide.md @@ -1,4 +1,4 @@ -# YAML Extended DSL guide +# DSL guide ## terms @@ -182,3 +182,171 @@ _load: - Checks if key exists without triggering auto-load - Returns `Ok(true/false)` - Lightweight existence check for conditional logic + +--- + +## Original Text (ja) + +### 用語 + +- `meta keys`: `_`で始まるkey及び、それ以下のkey群 +- `field keys`: `meta keys`では無いkey群 +- `leaf keys`: 子keyを持たず値を持つkey群 +- `value`: leaf keysの値。YAML内で省略された場合はnullが入る +- `path`: 出発keyから対象keyまで、`.`区切りでkey名を並べたパス表現 +- `qualified path`: 出発keyを対象keyの記述された`filename.`とした、一意な完全修飾パス +- `placeholder`: ${path}の形で、指定keyのState.get()の結果を参照する記述形式 +- `template`: "user${user_id}"の様に、placeholderを文字列に埋め込む記述形式 + +### rule + +- `---`によるYAML区切りは使用不可 +- `placeholder`, `template`はvalue内のみで使用可能 + +### 基本構造 + +```yaml +field_key: + _state: # ステートのメタデータ(オプション) + _store: # 保存先メタデータ (ファイルルートキーで必須, 子孫キーへ継承) + _load: # 自動ロード元メタデータ (オプション) +``` + +### コアコンセプト + +### 1. meta key 継承 + +Each field key inherit parent's meta keys, and can override: + +```yaml +_store: + client: KVS + key: "root:${id}" + +user: + _store: + key: "user:${sso_user_id}" # キーが上書きされる, client: KVSは継承 + + tenant_id: + # client: KVS, key: user:${sso_user_id}を継承 +``` + +#### 2. placeholder 解決 + +State engineは`${...}`を`State::get()`呼び出しで解決します: + +```yaml +tenant: + _load: + table: "tenants" + where: "id=${user.tenant_id}" # → State::get("user.tenant_id") +``` + +**placeholderの省略記法:** + +Manifestは`${tenant_id}`を`${cache.user.tenant_id}`(絶対パス)に変換します。 + +`${path}` のパスは、`.` を含むかどうかで絶対/相対が決まります: + +- `.` を含まない → 相対パス。parse時に `filename.ancestors.path` へ自動修飾 +- `.` を含む → 絶対パスとみなし、そのまま使用 + +```yaml +# cache.yml の user.tenant_id 内 +key: "${org_id}" # → cache.user.org_id(相対) +key: "${cache.user.org_id}" # → cache.user.org_id(絶対、同じ結果) +key: "${session.sso_user_id}" # → session.sso_user_id(別ファイル参照) +``` + +**制約:** 省略記法(相対パス)では `.` を使えないため、兄弟ノードの子を参照する場合は完全修飾パスで記述してください。 + +```yaml +# NG: user.id と書くと絶対パスとみなされ、意図しない参照になる +key: "${user.id}" # → State::get("user.id") ← ファイル名なし、KeyNotFound + +# OK: 完全修飾パスで記述する +key: "${cache.user.id}" # → State::get("cache.user.id") +``` + +#### 3. クライアント種別 + +**_store用(保存先):** +```yaml +_store: + client: InMemory # プロセスメモリ + client: KVS # Redis, Memcached等 + client: HTTP # HTTPエンドポイント +``` + +**_load用(読込元):** +```yaml +_load: + client: State # 別のStateキーを参照 + client: InMemory # プロセスメモリ + client: Env # 環境変数 + client: KVS # Redis, Memcached等 + client: Db # データベース + client: HTTP # HTTPエンドポイント +``` + +使用する各クライアントのアダプターを実装する必要があります(Required Ports参照)。 + +##### クライアント固有のパラメータ + +**_store.client: InMemory** +```yaml +_store: + client: InMemory + key: "session:${token}" # (string) ストレージキー(プレースホルダー可) +``` + +**_load.client: Env** +```yaml +_load: + client: Env + map: # (object, required) 環境変数マッピング + yaml_key: "ENV_VAR_NAME" +``` + +**_load.client: State** +```yaml +_load: + client: State + key: "${org_id}" # (string) 別のStateキーへの参照 +``` + +**_store.client: KVS** +```yaml +_store: + client: KVS + key: "user:${id}" # (string) ストレージキー(プレースホルダー可) + ttl: 3600 # (integer, optional) TTL(秒) +``` + +**_load.client: Db** +```yaml +_load: + client: Db + connection: ${connection.tenant} # (Value) 接続設定オブジェクトまたは参照 + table: "users" # (string) テーブル名 + where: "id=${user.id}" # (string, optional) WHERE句 + map: # (object, required) カラムマッピング + yaml_key: "db_column" +``` + +**_store.client: HTTP / _load.client: HTTP** +```yaml +_store: + client: HTTP + url: "https://api.example.com/state/${id}" # (string) エンドポイントURL + headers: # (object, optional) リクエストヘッダー + Authorization: "Bearer ${token}" + +_load: + client: HTTP + url: "https://api.example.com/data/${id}" # (string) エンドポイントURL + headers: # (object, optional) リクエストヘッダー + Authorization: "Bearer ${token}" + map: # (object, optional) レスポンスからのフィールド抽出 + yaml_key: "response_field" +``` \ No newline at end of file diff --git a/docs/en/Architecture.md b/docs/en/Architecture.md deleted file mode 100644 index b040b9c..0000000 --- a/docs/en/Architecture.md +++ /dev/null @@ -1,315 +0,0 @@ -# Architecture - -## index - -- provided modules (library provided) - 1. State - -- required modules (library required*) - 1. InMemoryClient - 2. KVSClient - 3. DbClient - 4. EnvClient - 5. HttpClient - 6. FileClient - -- internal modules - 1. core::Manifest - 2. Store - 3. Load - 4. u64(fixed_bits.rs) - 5. Pools & Maps(pool.rs) - 6. parser.rs - 7. LogFormat - -*: *_client impl are not essential, optional modules. - ---- - -## provided modules - -**State** is the sole public API of the library. - -A module performing `get()`/`set()`/`delete()`/`exists()` operations on state data following the `_store`/`_load` blocks defined in manifest YAMLs. `get()` automatically attempts loading on key miss. `set()` does not trigger loading. `delete()` removes the specified key from both store and cache. `exists()` checks key existence without triggering auto-load. It maintains an instance-level cache (`state_values`) separate from persistent stores. - -State owns YAML I/O: it reads manifest files via `FileClient` and parses them into `core::Manifest` on first access. `core::Manifest` is an internal no_std struct that owns all bit-record data and provides decode/find/build_config queries. Relative placeholders in values are qualified to absolute paths at parse time. Metadata (`_store`/`_load`/`_state`) is inherited from parent nodes; child overrides parent. - -## State - -### State::get("filename.node") - -Reference the state represented by the specified node, returning value or collections. - -Returns: `Result, StateError>` - -**Operation flow:** -1. Check `called_keys` (recursion / limit detection) -2. Load manifest file via `FileClient` (first access only) -3. `core::Manifest::find()` → get key_idx -4. **Check `state_values` (by key_idx)** ← Highest priority -5. `core::Manifest::get_meta()` → get MetaIndices -6. If `_load.client == State`: skip store. Otherwise: retrieve from store (KVS/InMemoryClient) -7. On miss, auto-load via `Load::handle()` -8. Return `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` - -**Auto-load:** -- If the state key misses, attempt auto-retrieval via `Load::handle()` -- On error, return `Err(StateError::LoadFailed(LoadError))` - -**Note on _state.type:** -```yaml -tenant_id: - _state: - type: integer # Metadata only - validation/casting not yet implemented -``` - -The `_state.type` field is currently metadata-only and not enforced by State operations. - ---- - -### State::set("filename.node", value, ttl) - -Set a value to the state represented by the specified node. - -Returns: `Result` - -**Behavior:** -- Save to persistent store (KVS/InMemoryClient) -- Also save to `state_values` (instance cache) -- If store is KVS, TTL can be set - -**TTL behavior:** -- `ttl` argument specified → Use specified value -- No `ttl` argument, `_store.ttl` in YAML → Use YAML default -- No `ttl` argument, no `_store.ttl` in YAML → Maintain current value - ---- - -### State::delete("filename.node") - -Delete the {key:value} record represented by the specified node. - -Returns: `Result` - -**Behavior:** -- Delete from persistent store (KVS/InMemoryClient) -- Also delete from `state_values` (instance cache) -- After deletion, the node shows miss - ---- - -### State::exists("filename.node") - -Check if a key exists without triggering auto-load. - -Returns: `Result` - -**Behavior:** -- Check `state_values` (instance cache) first -- Then check persistent store (KVS/InMemoryClient) -- **Does NOT trigger auto-load** (unlike `get()`) -- Returns `Ok(true)` if exists, `Ok(false)` otherwise - -**Comparison with get():** -- `get()`: Returns value, triggers auto-load on miss -- `exists()`: Returns boolean, never triggers auto-load - ---- - -## required modules - -Application must implement the following traits to handle data stores: - -1. **InMemoryClient** - - expected operations: `get()`/`set()`/`delete()` - - arguments: `"key":...` from `_{store,load}.key:...` in Manifest - - expected target: Local process memory - -2. **KVSClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, key: &str) -> Option` - - `fn set(&self, key: &str, value: String, ttl: Option) -> bool` - - `fn delete(&self, key: &str) -> bool` - - arguments: `"key":...` from `_{store,load}.key:...`, `ttl:...` from `_{store,load}.ttl:...`(optional) in Manifest - - expected target: Key-Value Store (Redis, etc.) - - **Important**: KVSClient handles String only (primitive type). State layer performs serialize/deserialize: - - **serialize**: All values → JSON string (preserves type: Number/String/Bool/Null/Array/Object) - - **deserialize**: JSON string → Value (accurately restores type) - -3. **DbClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, connection: &Value, table: &str, columns: &[&str], where_clause: Option<&str>) -> Option>>` - - `fn set(&self, connection: &Value, table: &str, values: &HashMap, where_clause: Option<&str>) -> bool` - - `fn delete(&self, connection: &Value, table: &str, where_clause: Option<&str>) -> bool` - - arguments: `"connection":...`, `"table":...`, `"columns":...` from `_{load}.map.*:...`, `"where_clause":...`(optional) - - only for `_load.client` - -4. **EnvClient** - - expected operations: `get()`/`set()`/`delete()` - - arguments: `"key":...` from `_{load}.map.*:...` in Manifest - - expected target: environment variables - - only for `_load.client` - -5. **HttpClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, url: &str, headers: Option<&HashMap>) -> Option` - - `fn set(&self, url: &str, body: Value, headers: Option<&HashMap>) -> bool` - - `fn delete(&self, url: &str, headers: Option<&HashMap>) -> bool` - - arguments: `"url":...` from `_{store,load}.url:...`, `"headers":...` from `_{store,load}.headers:...` - - expected target: HTTP endpoints - - for both `_store.client` and `_load.client` - -6. **FileClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, key: &str) -> Option` - - `fn set(&self, key: &str, value: String) -> bool` - - `fn delete(&self, key: &str) -> bool` - - arguments: `"key":...` from `_{store,load}.key:...` in Manifest - - expected target: File I/O - - default impl `DefaultFileClient` is built-in (std::fs based) - - for both `_store.client` and `_load.client` - - **always used by State to read manifest YAMLs** - ---- - -## Load::handle() - -When `State::get()` misses a value, retrieve data according to `_load` settings. - -**Client types:** -- `Env` - Load from environment variables -- `Db` - Load from database -- `KVS` - Load from KVS -- `InMemory` - Load from process memory -- `Http` - Load from HTTP endpoint -- `File` - Load from file -- `State` - Reference another State key directly (does not call Load::handle()) - -**Special behavior for State client:** -```yaml -tenant_id: - _load: - client: State - key: ${org_id} # Directly returns State::get("cache.user.org_id") -``` - -When `_load.client: State`, `Load::handle()` is not called; the value of `_load.key` (placeholder already resolved) is returned directly. - -**Design rules:** -- No `_load` → No auto-load, return `Ok(None)` -- No `_load.client` → No auto-load, return `Ok(None)` -- `_load.client: State` → Use `_load.key` value directly -- Other clients → Auto-load via `Load::handle()` - -**Recursion depth limit:** -- `max_recursion = 20` -- `called_keys: HashSet` tracks keys currently being processed -- On limit exceeded or circular key detected: `Err(StateError::RecursionLimitExceeded)` - ---- - -## State::get() Detailed Flow - -``` -1. called_keys check (recursion / limit detection) - ↓ -2. Load manifest file via FileClient (first access only) - ↓ -3. core::Manifest::find() → get key_idx - ↓ -4. ★ Check state_values (by key_idx) ← Highest priority - if find_state_value(key_idx).is_some() { return Ok(Some(value)); } - ↓ -5. core::Manifest::get_meta() → get MetaIndices - ↓ -6. _load.client == State → skip store - otherwise: retrieve from store (KVS/InMemoryClient) - ↓ -7. On miss, auto-load - ├─→ build_config() resolves placeholders - ├─→ Load::handle(config) - │ ├─→ client: Db → DbClient::get() - │ ├─→ client: KVS → KVSClient::get() - │ ├─→ client: Env → EnvClient::get() - │ ├─→ client: InMemory → InMemoryClient::get() - │ ├─→ client: Http → HttpClient::get() - │ └─→ client: File → FileClient::get() - ├─→ Save to persistent store - └─→ Save to state_values - ↓ -8. Return Ok(Some(value)) / Ok(None) / Err(StateError) -``` - ---- - -## state_values (Instance Memory Cache) - -The State struct maintains an instance-level cache (`state_values: StateValueList`) separate from persistent stores (KVS/InMemoryClient). - -**Important:** This is NOT InMemoryClient. It is a variable of the State instance itself. - -**Purpose:** -1. **Speed up duplicate `State::get()` calls within the same request** -2. **Reduce access count to KVS/InMemoryClient** -3. **Avoid duplicate loads** (don't load the same key multiple times) - -**Index:** -- Keyed by `key_idx: u16` — globally unique index in KeyList -- Not keyed by store key string - -**Save timing:** -- On successful retrieval from store or load in `State::get()` -- On `State::set()` - -**Delete timing:** -- On `State::delete()` - -**Lifecycle:** -- State instance created: empty -- During State lifetime: accumulates -- State instance dropped: destroyed (memory released) - ---- - -## Placeholder Resolution Rules - -`${}` paths are **qualified to absolute paths at parse time** — no conversion happens at State runtime. - -**Qualify rule at parse time (`qualify_path()`):** -- Path contains `.` → treated as absolute, used as-is -- No `.` → converted to `filename.ancestors.path` - -**Example (`${tenant_id}` in `cache.yml` under `user._load.where`):** -``` -qualify_path("tenant_id", "cache", ["user"]) -→ "cache.user.tenant_id" -``` - -**Placeholder resolution at State runtime (`resolve_value_to_string()`):** -- Retrieve qualified path from path_map -- Call `State::get(qualified_path)` to get the value - ---- - -## error case - -**ManifestError:** -- `FileNotFound` — manifest file not found in manifest dir -- `AmbiguousFile` — two files with the same name but different extensions (`.yml` and `.yaml`) exist in manifestDir. Manifest ignores extensions (dot-separated paths represent hierarchy), so it cannot distinguish the two. Same-extension duplicates are assumed to be prevented at the OS level. -- `ParseError` — YAML parse failed - -**LoadError:** -- `ClientNotConfigured` — required client (Env/KVS/DB/HTTP/File) is not set on State -- `ConfigMissing(String)` — a required config key (key/url/table/map/connection) is missing in the manifest -- `NotFound(String)` — the client call succeeded but returned no data -- `ParseError(String)` — JSON parse error from client response - -**StoreError:** -- `ClientNotConfigured` — required client (KVS/InMemory/HTTP/File) is not set on State -- `ConfigMissing(String)` — a required config key (key/url/client) is missing in the manifest -- `SerializeError(String)` — JSON serialize error -- `UnsupportedClient(u64)` — unsupported client id in config diff --git a/docs/ja/README.md b/docs/ja/README.md deleted file mode 100644 index cb6436a..0000000 --- a/docs/ja/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# state-engine - -webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。state-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このOSSは、[## background](#background)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 - -## background - -**webシステムの構成再定義** - -人々の営みの動作の一部を、ネットワーク機能を持ったコンピューターのデータ処理で代替えすることで、その間の検証可能性の保証と、物理的制約の緩和などの恩恵を受けることができる。これを実現する、ハードウェアを通して電気信号として入力を受け取り、処理後、所定のハードウェア群に出力する仕組みのことを、webシステムと呼ぶ。webシステムの実現には、第一に、システムに必要な概念体系を、人間言語とコンピューターのビット列それぞれで定義することが必要である。 - -```yaml -# computers structure of web system -computer: "(ネットワーク通信機能を要する)コンピューター" - server: "人間(ユーザー・開発者)に処理能力を提供する" - fixture: "継続的な待機により、ネットワーク機能を提供する" - terminal: "人間とのインターフェースを提供する。端末。" - orchestrator: "サーバー群の維持を管理する(optional)" -``` - -## Architecture - -[Architecture.md](./Architecture.md) を参照のこと - -## License - -MIT diff --git a/docs/ja/YAML-guide.md b/docs/ja/YAML-guide.md deleted file mode 100644 index 458a9e1..0000000 --- a/docs/ja/YAML-guide.md +++ /dev/null @@ -1,165 +0,0 @@ -# YAML Extended DSL guide - -## 用語 - -- `meta keys`: `_`で始まるkey及び、それ以下のkey群 -- `field keys`: `meta keys`では無いkey群 -- `leaf keys`: 子keyを持たず値を持つkey群 -- `value`: leaf keysの値。YAML内で省略された場合はnullが入る -- `path`: 出発keyから対象keyまで、`.`区切りでkey名を並べたパス表現 -- `qualified path`: 出発keyを対象keyの記述された`filename.`とした、一意な完全修飾パス -- `placeholder`: ${path}の形で、指定keyのState.get()の結果を参照する記述形式 -- `template`: "user${user_id}"の様に、placeholderを文字列に埋め込む記述形式 - -## rule - -- `---`によるYAML区切りは使用不可 -- `placeholder`, `template`はvalue内のみで使用可能 - -## 基本構造 - -```yaml -field_key: - _state: # ステートのメタデータ(オプション) - _store: # 保存先メタデータ (ファイルルートキーで必須, 子孫キーへ継承) - _load: # 自動ロード元メタデータ (オプション) -``` - -## コアコンセプト - -### 1. meta key 継承 - -Each field key inherit parent's meta keys, and can override: - -```yaml -_store: - client: KVS - key: "root:${id}" - -user: - _store: - key: "user:${sso_user_id}" # キーが上書きされる, client: KVSは継承 - - tenant_id: - # client: KVS, key: user:${sso_user_id}を継承 -``` - -### 2. placeholder 解決 - -State engineは`${...}`を`State::get()`呼び出しで解決します: - -```yaml -tenant: - _load: - table: "tenants" - where: "id=${user.tenant_id}" # → State::get("user.tenant_id") -``` - -**placeholderの省略記法:** - -Manifestは`${tenant_id}`を`${cache.user.tenant_id}`(絶対パス)に変換します。 - -`${path}` のパスは、`.` を含むかどうかで絶対/相対が決まります: - -- `.` を含まない → 相対パス。parse時に `filename.ancestors.path` へ自動修飾 -- `.` を含む → 絶対パスとみなし、そのまま使用 - -```yaml -# cache.yml の user.tenant_id 内 -key: "${org_id}" # → cache.user.org_id(相対) -key: "${cache.user.org_id}" # → cache.user.org_id(絶対、同じ結果) -key: "${session.sso_user_id}" # → session.sso_user_id(別ファイル参照) -``` - -**制約:** 省略記法(相対パス)では `.` を使えないため、兄弟ノードの子を参照する場合は完全修飾パスで記述してください。 - -```yaml -# NG: user.id と書くと絶対パスとみなされ、意図しない参照になる -key: "${user.id}" # → State::get("user.id") ← ファイル名なし、KeyNotFound - -# OK: 完全修飾パスで記述する -key: "${cache.user.id}" # → State::get("cache.user.id") -``` - -### 3. クライアント種別 - -**_store用(保存先):** -```yaml -_store: - client: InMemory # プロセスメモリ - client: KVS # Redis, Memcached等 - client: HTTP # HTTPエンドポイント -``` - -**_load用(読込元):** -```yaml -_load: - client: State # 別のStateキーを参照 - client: InMemory # プロセスメモリ - client: Env # 環境変数 - client: KVS # Redis, Memcached等 - client: Db # データベース - client: HTTP # HTTPエンドポイント -``` - -使用する各クライアントのアダプターを実装する必要があります(Required Ports参照)。 - -#### クライアント固有のパラメータ - -**_store.client: InMemory** -```yaml -_store: - client: InMemory - key: "session:${token}" # (string) ストレージキー(プレースホルダー可) -``` - -**_load.client: Env** -```yaml -_load: - client: Env - map: # (object, required) 環境変数マッピング - yaml_key: "ENV_VAR_NAME" -``` - -**_load.client: State** -```yaml -_load: - client: State - key: "${org_id}" # (string) 別のStateキーへの参照 -``` - -**_store.client: KVS** -```yaml -_store: - client: KVS - key: "user:${id}" # (string) ストレージキー(プレースホルダー可) - ttl: 3600 # (integer, optional) TTL(秒) -``` - -**_load.client: Db** -```yaml -_load: - client: Db - connection: ${connection.tenant} # (Value) 接続設定オブジェクトまたは参照 - table: "users" # (string) テーブル名 - where: "id=${user.id}" # (string, optional) WHERE句 - map: # (object, required) カラムマッピング - yaml_key: "db_column" -``` - -**_store.client: HTTP / _load.client: HTTP** -```yaml -_store: - client: HTTP - url: "https://api.example.com/state/${id}" # (string) エンドポイントURL - headers: # (object, optional) リクエストヘッダー - Authorization: "Bearer ${token}" - -_load: - client: HTTP - url: "https://api.example.com/data/${id}" # (string) エンドポイントURL - headers: # (object, optional) リクエストヘッダー - Authorization: "Bearer ${token}" - map: # (object, optional) レスポンスからのフィールド抽出 - yaml_key: "response_field" -``` \ No newline at end of file diff --git a/examples/implements.rs b/examples/implements.rs new file mode 100644 index 0000000..e69de29 diff --git a/examples/manifest/tenant.yml b/examples/tenant.yml similarity index 100% rename from examples/manifest/tenant.yml rename to examples/tenant.yml From 08c496e6a52fad46fb62644bab109f4f092d8b14 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 07:51:05 +0900 Subject: [PATCH 07/41] rename library --- README.md | 22 +++++++++++----------- docs/Architecture.md | 25 +++++++++++++++++++++++-- docs/{DSL_guide.md => Dsl_guide.md} | 0 3 files changed, 34 insertions(+), 13 deletions(-) rename docs/{DSL_guide.md => Dsl_guide.md} (100%) diff --git a/README.md b/README.md index c2edc14..733f5cf 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# state-engine +# context-engine -Data labels used by a web system's runtime within a single processing cycle should have their session-context-dependent variations resolved outside of code (e.g., data should be accessible as system_context["session.user"] rather than users[session[user-id]]). state-engine processes for each label, the data retrieval methods that application developers define as a DSL in YAML files. This allows, for example, server/client differences in system_context["session.user.preference"] and multi-tenant differences in context[session.user.tenant] to be resolved appropriately through the data retrieval methods defined in YAML. This OSS is positioned as the foundational technology for the reconstructed web system architecture described in [## background](#background). +Data labels used by a web system's runtime within a single processing cycle should have their session-context-dependent variations resolved outside of code (e.g., data should be accessible as system_context["session.user"] rather than users[session[user-id]]). context-engine processes for each label, the data retrieval methods that application developers define as a DSL in YAML files. This allows, for example, server/client differences in system_context["session.user.preference"] and multi-tenant differences in context[session.user.tenant] to be resolved appropriately through the data retrieval methods defined in YAML. This library is a foundational technology for the reconstructed web system architecture(see [## background](#background)). - [original text(ja)](#original-text-ja) @@ -16,9 +16,9 @@ Data labels used by a web system's runtime within a single processing cycle shou | mod | description | fn | |-------|------|---------| -| **State** | operates state data following manifest YAMLs | `get()`, `set()`, `delete()`, `exists()` | +| State | operates state data following manifest YAMLs | `get()`, `set()`, `delete()`, `exists()` | -## Why state-engine? +## Why context-engine? **Before:** ```Rust @@ -41,7 +41,7 @@ let user = state.get("session.user")?; ```toml # Cargo.toml [dependencies] -state-engine = "0.1" +context-engine = "0.1" ``` ## Quick Start @@ -74,8 +74,8 @@ session: 3. Initialize Manifest, Store Clients and State. ```rust -use state_engine::Manifest; -use state_engine::State; +use context_engine::Manifest; +use context_engine::State; use std::sync::Arc; let memory = Arc::new(MemoryImpl::new()); @@ -87,7 +87,7 @@ let mut state = State::new() .with_memory(memory) .with_db(db); -// Use state-engine +// Use context-engine let user_name = state.get("session.user.name")?; ``` @@ -114,11 +114,11 @@ see for details [Architecture.md](./docs/en/Architecture.md) ## tree ``` -state-egnine/ +./ README.md # this Cargo.toml docs/ - DSL_guide.md + Dsl_guide.md Architecture.md src/ @@ -162,7 +162,7 @@ Apache-2.0 ## Original Text (ja) -webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。state-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このOSSは、[## background](#background)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 +webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。context-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このライブラリは、[## background](#background)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 ### 背景 diff --git a/docs/Architecture.md b/docs/Architecture.md index a38809d..6b51249 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -1,6 +1,27 @@ -# Architecture +# アーキテクチャ -## index +## ライブラリ要件 + +- README 3行目参照 +- システムが認識するべき概念を階層構造の名前空間で表現できたとする。この時、名前空間から導かれる全通りの(部分含む)パスが、ランタイムの単一処理スコープで操作する可能性のある値のキーを網羅している。このキー群の値全てを、DSLにて漏れなく取得方法の定義を宣言する。 + +## 機能構成 + +- parse & compile: DSLを読み込み、n次元疎集合割り出しの最適解である、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する +- toraversal: 上記データ群を保持し、トラバーサルによってメモリ位置群を取得する +- adressing & operation: Manifestに対応した1層mapを保持し、アプリケーションからの呼び出しに応じて値の操作を行う。リクエスト処理スコープインスタンス。 + +## モジュール構成 + +- Dsl: +- Manifest: fn +- State + +| mod | description | ports | +|-------|------|---------| +| Dsl | DSLを読み込み、n次元疎集合割り出しの最適解である、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する | new(Vec<(u64, u32)>),compile(&[&Path]) | +| Index | Dsl:compile(DSL)を呼び出し、アドレスリスト(Box<(u64, u32)>)を保持し、トラバーサルによってメモリ位置群を取得する | `toraverse()` | +| Context | operates state data following manifest YAMLs | `toraverse()` | - provided modules (library provided) 1. State diff --git a/docs/DSL_guide.md b/docs/Dsl_guide.md similarity index 100% rename from docs/DSL_guide.md rename to docs/Dsl_guide.md From af1489d4c0122fde1bd2ded32e329f10060b0c09 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 07:56:14 +0900 Subject: [PATCH 08/41] update Cargo.toml --- Cargo.toml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a385779..89decaa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,16 +1,16 @@ [package] -name = "state-engine" -version = "0.1.5" +name = "context-engine" +version = "0.1.6-alpha.1" authors = ["Andyou "] description = "Declarative state data management system for process" edition = "2024" license = "MIT" -repository = "https://github.com/animagram-jp/state-engine" +repository = "https://github.com/animagram-jp/context-engine" homepage = "https://github.com/animagram-jp/" readme = "README.md" -keywords = ["web", "yaml", "state-management", "declarative", "dsl"] -categories = ["caching", "data-structures", "web-programming"] +keywords = ["web", "addressing-management", "declarative", "dsl"] +categories = ["web-system", "core-library", "addressing"] exclude = ["examples/*"] [dependencies] @@ -24,4 +24,4 @@ ctor = "0.2" [features] default = [] logging = ["log"] -builder = ["serde_yaml_ng"] +builder = ["serde_yaml_ng"] \ No newline at end of file From 928108ffd94f2ae6af511602efe93557853f37cc Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 07:59:48 +0900 Subject: [PATCH 09/41] update --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 733f5cf..db381aa 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Data labels used by a web system's runtime within a single processing cycle shou |---------|-----------|-----------|-------------| | 0.1 | Released | 2026-2-12 | initial | | 0.1.5 | Current | 2026-3-21 | improve #43 | -| 0.1.6 | Scheduled | 2026-4-5 | improve #49 #50 | +| 0.1.6 | Scheduled | 2026-4 | rename crate | ## Provided Functions From e01d7cede16b9f11d50bc8b61c495b4fd1b348fe Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 08:02:58 +0900 Subject: [PATCH 10/41] update md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index db381aa..96fd383 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Data labels used by a web system's runtime within a single processing cycle shou |---------|-----------|-----------|-------------| | 0.1 | Released | 2026-2-12 | initial | | 0.1.5 | Current | 2026-3-21 | improve #43 | -| 0.1.6 | Scheduled | 2026-4 | rename crate | +| 0.1.6-alpha.1 | Alpha Release | 2026-4-5 | rename crate | ## Provided Functions From 570f5b7642f503ebe048d10f899293792c907cd9 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 08:16:48 +0900 Subject: [PATCH 11/41] update Cargo.toml --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 89decaa..dfa2b34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,8 +9,8 @@ license = "MIT" repository = "https://github.com/animagram-jp/context-engine" homepage = "https://github.com/animagram-jp/" readme = "README.md" -keywords = ["web", "addressing-management", "declarative", "dsl"] -categories = ["web-system", "core-library", "addressing"] +keywords = ["context", "namespace", "yaml", "addressing"] +categories = ["config", "data-structures", "template-engine", "parser-implementations", "web-programming"] exclude = ["examples/*"] [dependencies] From cb271e01aa91d0a6349223fdd282925f4d79b6ac Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 08:44:07 +0900 Subject: [PATCH 12/41] update --- README.md | 28 +-- docs/Architecture.md | 479 ++++++++++--------------------------------- 2 files changed, 119 insertions(+), 388 deletions(-) diff --git a/README.md b/README.md index 96fd383..dfa210a 100644 --- a/README.md +++ b/README.md @@ -65,27 +65,21 @@ session: |-------------------|--------| | multi-tenant app | [tenant.yml](./examples/manifest.yml) | -2. Implement some required ports for your stores. +2. Implement `StoreClient` and `StoreRegistry` for your stores. -| Trait | fn | example | -|---------------|----------------------------|---------| -| `StoreClient` | `get()` `set()` `delete()` | [implements.rs](./examples/implements.rs) | +| Trait | description | example | +|-----------------|------------------------------------------|---------| +| `StoreClient` | `get()` `set()` `delete()` per store | [implements.rs](./examples/implements.rs) | +| `StoreRegistry` | maps YAML client names to `StoreClient`s | [implements.rs](./examples/implements.rs) | -3. Initialize Manifest, Store Clients and State. +3. Initialize State with your registry. ```rust -use context_engine::Manifest; use context_engine::State; -use std::sync::Arc; -let memory = Arc::new(MemoryImpl::new()); -let db = Arc::new(DbImpl::new()?); +let stores = MyStores::new()?; -let manifest = Manifest::new() - -let mut state = State::new() - .with_memory(memory) - .with_db(db); +let mut state = State::new(stores); // Use context-engine let user_name = state.get("session.user.name")?; @@ -105,11 +99,11 @@ let user_name = state.get("session.user.name")?; ▲ │ ┌─────────────┐ ┌───────────┴────────────────────┐ -│ Implements │------>│ Store Clients (Required Ports) │ +│ Implements │------>│ StoreRegistry (Required Port) │ └─────────────┘ impl └────────────────────────────────┘ ``` -see for details [Architecture.md](./docs/en/Architecture.md) +see for details [Architecture.md](./docs/Architecture.md) ## tree @@ -177,4 +171,4 @@ computer: "(ネットワーク通信機能を要する)コンピュータ fixture: "継続的な待機により、ネットワーク機能を提供する" terminal: "人間とのインターフェースを提供する。端末。" orchestrator: "サーバー群の維持を管理する(optional)" -``` \ No newline at end of file +``` diff --git a/docs/Architecture.md b/docs/Architecture.md index 6b51249..7dfd424 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -8,31 +8,23 @@ ## 機能構成 - parse & compile: DSLを読み込み、n次元疎集合割り出しの最適解である、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する -- toraversal: 上記データ群を保持し、トラバーサルによってメモリ位置群を取得する -- adressing & operation: Manifestに対応した1層mapを保持し、アプリケーションからの呼び出しに応じて値の操作を行う。リクエスト処理スコープインスタンス。 +- traversal: 上記データ群を保持し、トラバーサルによってメモリ位置群を取得する +- addressing & operation: Manifestに対応した1層mapを保持し、アプリケーションからの呼び出しに応じて値の操作を行う。リクエスト処理スコープインスタンス。 ## モジュール構成 -- Dsl: -- Manifest: fn -- State - | mod | description | ports | |-------|------|---------| | Dsl | DSLを読み込み、n次元疎集合割り出しの最適解である、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する | new(Vec<(u64, u32)>),compile(&[&Path]) | -| Index | Dsl:compile(DSL)を呼び出し、アドレスリスト(Box<(u64, u32)>)を保持し、トラバーサルによってメモリ位置群を取得する | `toraverse()` | -| Context | operates state data following manifest YAMLs | `toraverse()` | +| Index | Dsl:compile(DSL)を呼び出し、アドレスリスト(Box<(u64, u32)>)を保持し、トラバーサルによってメモリ位置群を取得する | `traverse()` | +| Context | operates state data following manifest YAMLs | `traverse()` | - provided modules (library provided) 1. State -- required modules (library required*) - 1. InMemoryClient - 2. KVSClient - 3. DbClient - 4. EnvClient - 5. HttpClient - 6. FileClient +- required modules (library required*) + 1. StoreClient + 2. StoreRegistry - internal modules 1. core::Manifest @@ -43,7 +35,7 @@ 6. parser.rs 7. LogFormat -*: *_client impl are not essential, optional modules. +*: optional。FileClientのデフォルト実装のみ内蔵。 --- @@ -53,7 +45,7 @@ A module performing `get()`/`set()`/`delete()`/`exists()` operations on state data following the `_store`/`_load` blocks defined in manifest YAMLs. `get()` automatically attempts loading on key miss. `set()` does not trigger loading. `delete()` removes the specified key from both store and cache. `exists()` checks key existence without triggering auto-load. It maintains an instance-level cache (`state_values`) separate from persistent stores. -State owns YAML I/O: it reads manifest files via `FileClient` and parses them into `core::Manifest` on first access. `core::Manifest` is an internal no_std struct that owns all bit-record data and provides decode/find/build_config queries. Relative placeholders in values are qualified to absolute paths at parse time. Metadata (`_store`/`_load`/`_state`) is inherited from parent nodes; child overrides parent. +State owns YAML I/O: it reads manifest files via the built-in `DefaultFileClient` and parses them into `core::Manifest` on first access. `core::Manifest` is an internal no_std struct that owns all bit-record data and provides decode/find/build_config queries. Relative placeholders in values are qualified to absolute paths at parse time. Metadata (`_store`/`_load`/`_state`) is inherited from parent nodes; child overrides parent. ## State @@ -65,11 +57,11 @@ Returns: `Result, StateError>` **Operation flow:** 1. Check `called_keys` (recursion / limit detection) -2. Load manifest file via `FileClient` (first access only) -3. `core::Manifest::find()` → get key_idx -4. **Check `state_values` (by key_idx)** ← Highest priority +2. Load manifest file via `DefaultFileClient` (first access only) +3. Traverse intern list with path string → locate key +4. **Check `state_values` (by path)** ← Highest priority 5. `core::Manifest::get_meta()` → get MetaIndices -6. If `_load.client == State`: skip store. Otherwise: retrieve from store (KVS/InMemoryClient) +6. If `_load.client == State`: skip store. Otherwise: `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` 7. On miss, auto-load via `Load::handle()` 8. Return `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` @@ -95,14 +87,9 @@ Set a value to the state represented by the specified node. Returns: `Result` **Behavior:** -- Save to persistent store (KVS/InMemoryClient) +- Save via `StoreRegistry::client_for(yaml_name)` → `StoreClient::set()` - Also save to `state_values` (instance cache) -- If store is KVS, TTL can be set - -**TTL behavior:** -- `ttl` argument specified → Use specified value -- No `ttl` argument, `_store.ttl` in YAML → Use YAML default -- No `ttl` argument, no `_store.ttl` in YAML → Maintain current value +- TTL and other store-specific args are handled by the `StoreClient` impl --- @@ -113,7 +100,7 @@ Delete the {key:value} record represented by the specified node. Returns: `Result` **Behavior:** -- Delete from persistent store (KVS/InMemoryClient) +- Delete via `StoreRegistry::client_for(yaml_name)` → `StoreClient::delete()` - Also delete from `state_values` (instance cache) - After deletion, the node shows miss @@ -127,73 +114,62 @@ Returns: `Result` **Behavior:** - Check `state_values` (instance cache) first -- Then check persistent store (KVS/InMemoryClient) +- Then check via `StoreClient::get()` - **Does NOT trigger auto-load** (unlike `get()`) - Returns `Ok(true)` if exists, `Ok(false)` otherwise -**Comparison with get():** -- `get()`: Returns value, triggers auto-load on miss -- `exists()`: Returns boolean, never triggers auto-load - --- ## required modules -Application must implement the following traits to handle data stores: - -1. **InMemoryClient** - - expected operations: `get()`/`set()`/`delete()` - - arguments: `"key":...` from `_{store,load}.key:...` in Manifest - - expected target: Local process memory - -2. **KVSClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, key: &str) -> Option` - - `fn set(&self, key: &str, value: String, ttl: Option) -> bool` - - `fn delete(&self, key: &str) -> bool` - - arguments: `"key":...` from `_{store,load}.key:...`, `ttl:...` from `_{store,load}.ttl:...`(optional) in Manifest - - expected target: Key-Value Store (Redis, etc.) - - **Important**: KVSClient handles String only (primitive type). State layer performs serialize/deserialize: - - **serialize**: All values → JSON string (preserves type: Number/String/Bool/Null/Array/Object) - - **deserialize**: JSON string → Value (accurately restores type) - -3. **DbClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, connection: &Value, table: &str, columns: &[&str], where_clause: Option<&str>) -> Option>>` - - `fn set(&self, connection: &Value, table: &str, values: &HashMap, where_clause: Option<&str>) -> bool` - - `fn delete(&self, connection: &Value, table: &str, where_clause: Option<&str>) -> bool` - - arguments: `"connection":...`, `"table":...`, `"columns":...` from `_{load}.map.*:...`, `"where_clause":...`(optional) - - only for `_load.client` - -4. **EnvClient** - - expected operations: `get()`/`set()`/`delete()` - - arguments: `"key":...` from `_{load}.map.*:...` in Manifest - - expected target: environment variables - - only for `_load.client` - -5. **HttpClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, url: &str, headers: Option<&HashMap>) -> Option` - - `fn set(&self, url: &str, body: Value, headers: Option<&HashMap>) -> bool` - - `fn delete(&self, url: &str, headers: Option<&HashMap>) -> bool` - - arguments: `"url":...` from `_{store,load}.url:...`, `"headers":...` from `_{store,load}.headers:...` - - expected target: HTTP endpoints - - for both `_store.client` and `_load.client` - -6. **FileClient** - - expected operations: `get()`/`set()`/`delete()` - - trait signature: - - `fn get(&self, key: &str) -> Option` - - `fn set(&self, key: &str, value: String) -> bool` - - `fn delete(&self, key: &str) -> bool` - - arguments: `"key":...` from `_{store,load}.key:...` in Manifest - - expected target: File I/O - - default impl `DefaultFileClient` is built-in (std::fs based) - - for both `_store.client` and `_load.client` - - **always used by State to read manifest YAMLs** +### StoreClient + +単一ストアの操作を提供するtrait。`key`は予約引数として明示し、追加の任意引数は`args`のflatなHashMapで渡す。 + +```rust +pub trait StoreClient: Send + Sync { + fn get(&self, key: &str, args: &HashMap<&str, Value>) -> Option; + fn set(&self, key: &str, args: &HashMap<&str, Value>) -> bool; + fn delete(&self, key: &str, args: &HashMap<&str, Value>) -> bool; +} +``` + +- `key`: manifest の `_{store,load}.key` の値。予約引数。 +- `args`: ttl・connection・headers 等、ストア種別ごとの任意引数。利用者がimpl内で定義・参照する。 +- 内部可変性・スレッド安全性はimplementor側の責任。 + +### StoreRegistry + +YAMLの`client:`名称とStoreClientの対応を管理するtrait。利用者がimplし、Stateに渡す。 + +```rust +pub trait StoreRegistry { + fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient>; +} +``` + +- ライブラリはYAML名称の文字列をそのまま`client_for()`に渡してmatchを回す。 +- YAML上の名義(`"Memory"`, `"KVS"`, `"Db"`等)は利用者が自由に定義する。 + +**実装例:** +```rust +struct MyStores { + memory: Arc, + kvs: Arc, + db: Arc, +} + +impl StoreRegistry for MyStores { + fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient> { + match yaml_name { + "Memory" => Some(self.memory.as_ref()), + "KVS" => Some(self.kvs.as_ref()), + "Db" => Some(self.db.as_ref()), + _ => None, + } + } +} +``` --- @@ -201,14 +177,7 @@ Application must implement the following traits to handle data stores: When `State::get()` misses a value, retrieve data according to `_load` settings. -**Client types:** -- `Env` - Load from environment variables -- `Db` - Load from database -- `KVS` - Load from KVS -- `InMemory` - Load from process memory -- `Http` - Load from HTTP endpoint -- `File` - Load from file -- `State` - Reference another State key directly (does not call Load::handle()) +`_load.client` の値を `StoreRegistry::client_for()` に渡し、対応する `StoreClient::get()` を呼ぶ。 **Special behavior for State client:** ```yaml @@ -224,7 +193,7 @@ When `_load.client: State`, `Load::handle()` is not called; the value of `_load. - No `_load` → No auto-load, return `Ok(None)` - No `_load.client` → No auto-load, return `Ok(None)` - `_load.client: State` → Use `_load.key` value directly -- Other clients → Auto-load via `Load::handle()` +- Other clients → `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` **Recursion depth limit:** - `max_recursion = 20` @@ -233,61 +202,16 @@ When `_load.client: State`, `Load::handle()` is not called; the value of `_load. --- -## State::get() Detailed Flow - -``` -1. called_keys check (recursion / limit detection) - ↓ -2. Load manifest file via FileClient (first access only) - ↓ -3. core::Manifest::find() → get key_idx - ↓ -4. ★ Check state_values (by key_idx) ← Highest priority - if find_state_value(key_idx).is_some() { return Ok(Some(value)); } - ↓ -5. core::Manifest::get_meta() → get MetaIndices - ↓ -6. _load.client == State → skip store - otherwise: retrieve from store (KVS/InMemoryClient) - ↓ -7. On miss, auto-load - ├─→ build_config() resolves placeholders - ├─→ Load::handle(config) - │ ├─→ client: Db → DbClient::get() - │ ├─→ client: KVS → KVSClient::get() - │ ├─→ client: Env → EnvClient::get() - │ ├─→ client: InMemory → InMemoryClient::get() - │ ├─→ client: Http → HttpClient::get() - │ └─→ client: File → FileClient::get() - ├─→ Save to persistent store - └─→ Save to state_values - ↓ -8. Return Ok(Some(value)) / Ok(None) / Err(StateError) -``` - ---- - ## state_values (Instance Memory Cache) -The State struct maintains an instance-level cache (`state_values: StateValueList`) separate from persistent stores (KVS/InMemoryClient). +The State struct maintains an instance-level cache (`state_values`) separate from persistent stores. -**Important:** This is NOT InMemoryClient. It is a variable of the State instance itself. +**Important:** This is NOT a StoreClient. It is a variable of the State instance itself. **Purpose:** -1. **Speed up duplicate `State::get()` calls within the same request** -2. **Reduce access count to KVS/InMemoryClient** -3. **Avoid duplicate loads** (don't load the same key multiple times) - -**Index:** -- Keyed by `key_idx: u16` — globally unique index in KeyList -- Not keyed by store key string - -**Save timing:** -- On successful retrieval from store or load in `State::get()` -- On `State::set()` - -**Delete timing:** -- On `State::delete()` +1. Speed up duplicate `State::get()` calls within the same request +2. Reduce access count to stores +3. Avoid duplicate loads **Lifecycle:** - State instance created: empty @@ -311,7 +235,6 @@ qualify_path("tenant_id", "cache", ["user"]) ``` **Placeholder resolution at State runtime (`resolve_value_to_string()`):** -- Retrieve qualified path from path_map - Call `State::get(qualified_path)` to get the value --- @@ -320,20 +243,19 @@ qualify_path("tenant_id", "cache", ["user"]) **ManifestError:** - `FileNotFound` — manifest file not found in manifest dir -- `AmbiguousFile` — two files with the same name but different extensions (`.yml` and `.yaml`) exist in manifestDir. Manifest ignores extensions (dot-separated paths represent hierarchy), so it cannot distinguish the two. Same-extension duplicates are assumed to be prevented at the OS level. +- `AmbiguousFile` — two files with the same name but different extensions (`.yml` and `.yaml`) exist in manifestDir - `ParseError` — YAML parse failed **LoadError:** -- `ClientNotConfigured` — required client (Env/KVS/DB/HTTP/File) is not set on State -- `ConfigMissing(String)` — a required config key (key/url/table/map/connection) is missing in the manifest +- `ClientNotFound(String)` — `StoreRegistry::client_for()` returned `None` for the given yaml_name +- `ConfigMissing(String)` — a required config key is missing in the manifest - `NotFound(String)` — the client call succeeded but returned no data -- `ParseError(String)` — JSON parse error from client response +- `ParseError(String)` — parse error from client response **StoreError:** -- `ClientNotConfigured` — required client (KVS/InMemory/HTTP/File) is not set on State -- `ConfigMissing(String)` — a required config key (key/url/client) is missing in the manifest -- `SerializeError(String)` — JSON serialize error -- `UnsupportedClient(u64)` — unsupported client id in config +- `ClientNotFound(String)` — `StoreRegistry::client_for()` returned `None` for the given yaml_name +- `ConfigMissing(String)` — a required config key is missing in the manifest +- `SerializeError(String)` — serialize error --- @@ -344,13 +266,9 @@ qualify_path("tenant_id", "cache", ["user"]) - provided modules (ライブラリ提供モジュール) 1. State -- required modules (ライブラリ要求モジュール*) - 1. InMemoryClient - 2. KVSClient - 3. DbClient - 4. EnvClient - 5. HttpClient - 6. FileClient +- required modules (ライブラリ要求モジュール*) + 1. StoreClient + 2. StoreRegistry - internal modules (内部モジュール) 1. core::Manifest @@ -361,7 +279,7 @@ qualify_path("tenant_id", "cache", ["user"]) 6. parser.rs 7. LogFormat -*: いずれもoptional(必須ではない) +*: いずれもoptional(必須ではない)。FileClientのデフォルト実装のみ内蔵。 --- @@ -375,95 +293,36 @@ qualify_path("tenant_id", "cache", ["user"]) manifest YAMLの`_store`/`_load`定義に従い、`get()` / `set()` / `delete()` / `exists()`操作を提供するmoduleです。`get()`はkey missをトリガーに`_load`定義に基づいて自動ロードを試みます。`set()`は自動ロードを引き起こしません。`delete()`はストアとインスタンスキャッシュ両方から削除します。`exists()`は自動ロードを引き起こさずにkey存在確認を行います。 -StateはYAML I/Oを担います。`FileClient`経由でmanifestファイルを読み込み、初回アクセス時に`core::Manifest`へparseします。`core::Manifest`はno_stdの内部structで、全bitレコードデータを所有しdecode/find/build_configクエリを提供します。`_store`/`_load`/`_state`メタデータは親から子へ継承され、子が上書きできます。 - 2. Required Ports -ライブラリ動作時にimpl実装が必要なmoduleのtraits - - 1. **InMemoryClient** - - 必要なメソッド: `get()`/`set()`/`delete()` - - 渡される引数: `"key": Manifestの_{store,load}.key:の値` - - 想定対象ストア: ローカルプロセスメモリ - - インスタンスメモリのState::cacheにて、_store.clientの値に依らず、キャッシュが常にされている点に留意して下さい。 - 2. **KVSClient** - - 必要なメソッド: `get()`/`set()`/`delete()` - - traitシグネチャ: - - `fn get(&self, key: &str) -> Option` - - `fn set(&self, key: &str, value: String, ttl: Option) -> bool` - - `fn delete(&self, key: &str) -> bool` - - 渡される引数: `"key": Manifestの_{store,load}.key:の値`, `ttl: Manifestの_{store,load}.ttl:の値(オプション)` - - 想定対象ストア: Key-Valueストア(Redis等) - - **重要**: KVSClientはString型のみを扱う(プリミティブ型)。State層がserialize/deserializeを実行: - - **serialize**: 全ての値 → JSON文字列(型情報を保持: Number/String/Bool/Null/Array/Object) - - **deserialize**: JSON文字列 → Value(型を正確に復元) - - KVSにはJSON文字列としてデータを保存。JSON形式でKVSネイティブ型に依存せず型情報を保持。 - 3. **DbClient** - - 必要なメソッド: `get()`/`set()`/`delete()` - - traitシグネチャ: - - `fn get(&self, connection: &Value, table: &str, columns: &[&str], where_clause: Option<&str>) -> Option>>` - - `fn set(&self, connection: &Value, table: &str, values: &HashMap, where_clause: Option<&str>) -> bool` - - `fn delete(&self, connection: &Value, table: &str, where_clause: Option<&str>) -> bool` - - 渡される引数: `"connection": YAML記載の_{load}.connection:の値`, `"table": YAML記載の_{load}.table:の値`, `"columns": YAML記載の_{load}.map.*:の値`, `"where_clause": YAML記載の_{load}.where:の値` - - 想定対象ストア: SQLデータベース - - _load.client: のみに使用対応 - 4. **EnvClient** - - 必要なメソッド: `get()`/`set()`/`delete()` - - 渡される引数: `"key": Manifestの_{load}.map.*:の値` - - 想定対象ストア: 環境変数 - - _load.client: のみに使用対応 - 5. **HttpClient** - - 必要なメソッド: `get()`/`set()`/`delete()` - - traitシグネチャ: - - `fn get(&self, url: &str, headers: Option<&HashMap>) -> Option` - - `fn set(&self, url: &str, body: Value, headers: Option<&HashMap>) -> bool` - - `fn delete(&self, url: &str, headers: Option<&HashMap>) -> bool` - - 渡される引数: `"url": YAML記載の_{store,load}.url:の値`, `"headers": YAML記載の_{store,load}.headers:の値` - - 想定対象ストア: HTTPエンドポイント - - _store/_load両方に使用対応 - 6. **FileClient** - - 必要なメソッド: `get()`/`set()`/`delete()` - - traitシグネチャ: - - `fn get(&self, key: &str) -> Option` - - `fn set(&self, key: &str, value: String) -> bool` - - `fn delete(&self, key: &str) -> bool` - - 渡される引数: `"key": Manifestの_{store,load}.key:の値` - - 想定対象ストア: ファイルI/O - - デフォルト実装 `DefaultFileClient` を内蔵(std::fsベース) - - _store/_load両方に使用対応 - - **StateがmanifestのYAML読み込みに常時使用する** +ライブラリ動作時にimpl実装が必要なtraits + +**StoreClient** + +単一ストアのget/set/deleteを提供するtrait。`key`は予約引数。`args`にttl等の任意引数をflatなHashMapで渡す。内部可変性はimplementor側の責任。 + +**StoreRegistry** + +YAMLの`client:`文字列と`StoreClient`の対応を管理するtrait。利用者がimplしてStateに渡す。ライブラリ側はYAML名を`client_for()`に渡してdispatchする。YAML上の名義は利用者が自由に定義できる。 ## State ### State::get("filename.node") -指定されたノードが表すステート(state obj)を参照し、値またはcollectionを返却する。 +指定されたノードが表すステートを参照し、値またはcollectionを返却する。 戻り値: `Result, StateError>` **動作フロー:** 1. `called_keys` チェック(再帰・上限検出) -2. `FileClient`経由でmanifestファイルをロード(未ロード時のみ) -3. `core::Manifest::find()` → key_idx 取得 +2. `DefaultFileClient`経由でmanifestファイルをロード(未ロード時のみ) +3. intern listをパス文字列で検索・トラバース → key位置を特定 4. **state_values (インスタンスキャッシュ) をチェック** ← 最優先 5. `core::Manifest::get_meta()` → MetaIndices 取得 -6. `_load.client == State` の場合はストアをスキップ。それ以外: ストア (KVS/InMemoryClient) から取得 +6. `_load.client == State` の場合はストアをスキップ。それ以外: `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` 7. **miss時、`Load::handle()` で自動ロード** 8. `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` を返却 -**自動ロード:** -- 指定されたノードのステートキーがmissした場合、`Load::handle()` で自動取得を試みる -- `Load::handle()` がエラーの場合、`Err(StateError::LoadFailed(LoadError))` を返す - -**_state.typeについての注意:** -```yaml -tenant_id: - _state: - type: integer # メタデータのみ - 検証/キャストは未実装 -``` - -`_state.type`フィールドは現在メタデータのみで、State操作では強制されません。 - --- ### State::set("filename.node", value, ttl) @@ -473,14 +332,9 @@ tenant_id: 戻り値: `Result` **動作:** -- 永続ストア (KVS/InMemoryClient) に保存 +- `StoreRegistry::client_for(yaml_name)` → `StoreClient::set()` で保存 - state_values (インスタンスキャッシュ) にも保存 -- ストアがKVSの場合、TTLを設定可能 - -**TTL動作:** -- `ttl` 引数が指定された → 指定値を使用 -- `ttl` 引数なし、YAMLに `_store.ttl` あり → YAMLのデフォルト値を使用 -- `ttl` 引数なし、YAMLに `_store.ttl` なし → 現在の値を維持 +- ttl等のストア固有引数はStoreClient impl側で管理 --- @@ -490,11 +344,6 @@ tenant_id: 戻り値: `Result` -**動作:** -- 永続ストア (KVS/InMemoryClient) から削除 -- state_values (インスタンスキャッシュ) からも削除 -- 削除後、そのノードは miss を示す - --- ### State::exists("filename.node") @@ -503,152 +352,40 @@ tenant_id: 戻り値: `Result` -**動作:** -- 最初に state_values (インスタンスキャッシュ) をチェック -- 次に永続ストア (KVS/InMemoryClient) をチェック -- **自動ロードをトリガーしない** (`get()` とは異なる) -- 存在する場合 `Ok(true)`、それ以外 `Ok(false)` を返す - -**get() との比較:** -- `get()`: 値を返す、miss時に自動ロードをトリガー -- `exists()`: 真偽値を返す、自動ロードは決してトリガーしない - --- ## Load::handle() `State::get()` が値をmissした際、`_load` 設定に従ってデータを取得する。 -**クライアント種別:** -- `Env` - 環境変数からロード -- `Db` - データベースからロード -- `KVS` - KVSからロード -- `InMemory` - プロセスメモリからロード -- `Http` - HTTPエンドポイントからロード -- `File` - ファイルからロード -- `State` - 別のStateキーを参照(Load::handle()を呼ばない) - -**State clientの特殊動作:** -```yaml -tenant_id: - _load: - client: State - key: ${org_id} # State::get("cache.user.org_id")を直接返す -``` - -`_load.client: State` の場合、`Load::handle()` は呼ばれず、`_load.key` の値(プレースホルダー解決済み)が直接返される。 +`_load.client` の値を `StoreRegistry::client_for()` に渡し、対応する `StoreClient::get()` を呼ぶ。 **設計ルール:** - `_load` なし → 自動ロードなし、`Ok(None)` を返す - `_load.client` なし → 自動ロードなし、`Ok(None)` を返す -- `_load.client: State` → `_load.key` の値を直接使用(Load::handle()を呼ばない) -- その他のclient → `Load::handle()` で自動ロード +- `_load.client: State` → `_load.key` の値を直接使用 +- その他のclient → `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` **再帰深度制限:** - `max_recursion = 20` -- `called_keys: HashSet` で処理中のキーを管理 - 上限超過または同一キーの再帰検出時に `Err(StateError::RecursionLimitExceeded)` を返す --- -## State::get() 詳細フロー - -``` -1. called_keys チェック(再帰・上限検出) - ↓ -2. FileClient経由でmanifestファイルをロード(未ロード時のみ) - ↓ -3. core::Manifest::find() → key_idx 取得 - ↓ -4. ★ state_values をチェック (key_idx) ← 最優先 - if find_state_value(key_idx).is_some() { return Ok(Some(value)); } - ↓ -5. core::Manifest::get_meta() → MetaIndices 取得 - ↓ -6. _load.client == State の場合はストアをスキップ - それ以外: ストア (KVS/InMemoryClient) から取得 - ↓ -7. miss時、自動ロード - ├─→ build_config() でプレースホルダーを解決 - ├─→ Load::handle(config) - │ ├─→ client: Db → DbClient::get() - │ ├─→ client: KVS → KVSClient::get() - │ ├─→ client: Env → EnvClient::get() - │ ├─→ client: InMemory → InMemoryClient::get() - │ ├─→ client: Http → HttpClient::get() - │ └─→ client: File → FileClient::get() - ├─→ 永続ストアに保存 - └─→ state_values に保存 - ↓ -8. Ok(Some(value)) / Ok(None) / Err(StateError) を返却 -``` - ---- - -## state_values (インスタンスメモリキャッシュ) - -State構造体は、永続ストア(KVS/InMemoryClient)とは別に、インスタンスレベルのキャッシュ(`state_values: StateValueList`)を保持します。 - -**重要:** これはInMemoryClientではありません。Stateインスタンス自体の変数です。 - -**目的:** -1. **同一リクエスト内での重複`State::get()`呼び出しを高速化** -2. **KVS/InMemoryClientへのアクセス回数を削減** -3. **重複ロードを回避する設計**(同じキーを複数回ロードしない) - -**インデックス:** -- `key_idx: u16` — KeyList上のグローバルユニークなindex をキーとして保存 -- 永続ストアのキー文字列ではなく、key_idxで引く設計 - -**保存タイミング:** -- `State::get()`でストアまたはロードから取得成功時 -- `State::set()`時 - -**削除タイミング:** -- `State::delete()`時 - -**ライフサイクル:** -- Stateインスタンス生成: 空 -- State稼働中: 蓄積 -- Stateインスタンス破棄: 破棄(メモリ解放) - ---- - -## プレースホルダー解決ルール - -`${}` 内のパスは **parse時に qualified path へ変換済み**。State実行時に変換処理は行わない。 - -**parse時の qualify ルール(`qualify_path()`):** -- パスに `.` を含む場合 → 絶対パスとみなしそのまま使用 -- `.` を含まない場合 → `filename.ancestors.path` に変換 - -**例(`cache.yml` の `user._load.where` 内 `${tenant_id}`):** -``` -qualify_path("tenant_id", "cache", ["user"]) -→ "cache.user.tenant_id" -``` - -**State実行時のプレースホルダー解決(`resolve_value_to_string()`):** -- path_map から qualified path を取り出し -- `State::get(qualified_path)` を呼んで値を取得 - ---- - ## error case **ManifestError:** - `FileNotFound` — manifestディレクトリにファイルが見つからない -- `AmbiguousFile` — manifestDir内に拡張子違いの同名ファイルが2つ存在する(`.yml`と`.yaml`)。ドット区切りを階層表現とするため拡張子を無視し、区別できない。同拡張子の同名ファイルはOSレベルでの非許容を想定。 +- `AmbiguousFile` — manifestDir内に拡張子違いの同名ファイルが2つ存在する - `ParseError` — YAMLのパース失敗 **LoadError:** -- `ClientNotConfigured` — 必要なclient(Env/KVS/DB/HTTP/File)がStateに未設定 -- `ConfigMissing(String)` — manifest内に必須のconfigキー(key/url/table/map/connection)が欠落 +- `ClientNotFound(String)` — `StoreRegistry::client_for()` が None を返した +- `ConfigMissing(String)` — manifest内に必須のconfigキーが欠落 - `NotFound(String)` — clientの呼び出しは成功したがデータが存在しなかった -- `ParseError(String)` — clientレスポンスのJSONパースエラー +- `ParseError(String)` — clientレスポンスのパースエラー **StoreError:** -- `ClientNotConfigured` — 必要なclient(KVS/InMemory/HTTP/File)がStateに未設定 -- `ConfigMissing(String)` — manifest内に必須のconfigキー(key/url/client)が欠落 -- `SerializeError(String)` — JSONシリアライズエラー -- `UnsupportedClient(u64)` — configに未対応のclient idが指定された +- `ClientNotFound(String)` — `StoreRegistry::client_for()` が None を返した +- `ConfigMissing(String)` — manifest内に必須のconfigキーが欠落 +- `SerializeError(String)` — シリアライズエラー From c8a0485238182c19f4127dd8156e07e3c5546813 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 19:19:01 +0900 Subject: [PATCH 13/41] underway refactoring --- README.md | 24 ++-- docs/Architecture.md | 263 +++++++----------------------------------- examples/tenant.yml | 7 +- src/context.rs | 0 src/dsl.rs | 66 +++++++++++ src/index.rs | 0 src/ports/provided.rs | 6 +- src/tree.rs | 0 8 files changed, 128 insertions(+), 238 deletions(-) create mode 100644 src/context.rs create mode 100644 src/dsl.rs create mode 100644 src/index.rs create mode 100644 src/tree.rs diff --git a/README.md b/README.md index dfa210a..791af79 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Data labels used by a web system's runtime within a single processing cycle shou | mod | description | fn | |-------|------|---------| -| State | operates state data following manifest YAMLs | `get()`, `set()`, `delete()`, `exists()` | +| `Context` | operates context | `get`, `set`, `delete`, `exists` | ## Why context-engine? @@ -25,7 +25,7 @@ Data labels used by a web system's runtime within a single processing cycle shou // Manual cache management let session_key = format!("user:{}", id); let user = redis.get(&session_key).or_else(|| { - let user = db.query("SELECT * FROM users WHERE id=?", id)?; + let user = db.query("SELECT id, email, name FROM users WHERE id=?", id)?; redis.set(&session_key, &user, 3600); Some(user) })?; @@ -33,7 +33,7 @@ let user = redis.get(&session_key).or_else(|| { **After:** ```Rust -let user = state.get("session.user")?; +let user = state.get("session.user.name")?; ``` ## Installation @@ -61,15 +61,15 @@ session: key: "users.${session.user.id}.name" ``` -| case | example | -|-------------------|--------| -| multi-tenant app | [tenant.yml](./examples/manifest.yml) | +| Case | Example | +|-------------------|---------| +| multi-tenant app | [tenant.yml](./examples/tenant.yml) | 2. Implement `StoreClient` and `StoreRegistry` for your stores. -| Trait | description | example | +| Trait | Description | Example | |-----------------|------------------------------------------|---------| -| `StoreClient` | `get()` `set()` `delete()` per store | [implements.rs](./examples/implements.rs) | +| `StoreClient` | `get()` `set()` `delete()` | [implements.rs](./examples/implements.rs) | | `StoreRegistry` | maps YAML client names to `StoreClient`s | [implements.rs](./examples/implements.rs) | 3. Initialize State with your registry. @@ -133,6 +133,10 @@ Passed unit and integration tests cargo test --features=logging -- --nocapture ``` +## License + +Apache-2.0 + ## Background **reimagined web architecture** @@ -148,10 +152,6 @@ computer: "Network-capable nodes in the system." orchestrator: "Computers responsible for maintenance of servers. (optional)" ``` -## License - -Apache-2.0 - --- ## Original Text (ja) diff --git a/docs/Architecture.md b/docs/Architecture.md index 7dfd424..cba3200 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -1,8 +1,10 @@ -# アーキテクチャ +# Architecture + +*warning*: Temporarily, Japanese and English combind in this file. ## ライブラリ要件 -- README 3行目参照 +- README 3行目 参照 - システムが認識するべき概念を階層構造の名前空間で表現できたとする。この時、名前空間から導かれる全通りの(部分含む)パスが、ランタイムの単一処理スコープで操作する可能性のある値のキーを網羅している。このキー群の値全てを、DSLにて漏れなく取得方法の定義を宣言する。 ## 機能構成 @@ -13,114 +15,53 @@ ## モジュール構成 -| mod | description | ports | -|-------|------|---------| -| Dsl | DSLを読み込み、n次元疎集合割り出しの最適解である、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する | new(Vec<(u64, u32)>),compile(&[&Path]) | -| Index | Dsl:compile(DSL)を呼び出し、アドレスリスト(Box<(u64, u32)>)を保持し、トラバーサルによってメモリ位置群を取得する | `traverse()` | -| Context | operates state data following manifest YAMLs | `traverse()` | - -- provided modules (library provided) - 1. State - -- required modules (library required*) - 1. StoreClient - 2. StoreRegistry - -- internal modules - 1. core::Manifest - 2. Store - 3. Load - 4. u64(fixed_bits.rs) - 5. Pools & Maps(pool.rs) - 6. parser.rs - 7. LogFormat - -*: optional。FileClientのデフォルト実装のみ内蔵。 +### 実体部 ---- - -## provided modules - -**State** is the sole public API of the library. +| Mod | Description | Ports | Filename | +|-----|-------------|-------|----------| +| Tree | YAMLファイルを読み込んでProvided::Tree型にパースし、Dsl::compileの出力を実行ファイルに書き込む | write | tree.rs | +| Dsl | Tree型のDSLを読み込み、n次元疎集合割り出しの最適解である、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する | new, compile | dsl.rs | +| Index | Dsl:compile(DSL)を呼び出し、アドレスリスト(Box<(u64, u32)>)を保持し、トラバーサルによってメモリ位置群を取得する | new, traverse | index.rs | +| Context | コンテクストデータの操作を行うリクエスト処理スコープの実行インスタンス | new, get, set, delete, exists | context.rs | -A module performing `get()`/`set()`/`delete()`/`exists()` operations on state data following the `_store`/`_load` blocks defined in manifest YAMLs. `get()` automatically attempts loading on key miss. `set()` does not trigger loading. `delete()` removes the specified key from both store and cache. `exists()` checks key existence without triggering auto-load. It maintains an instance-level cache (`state_values`) separate from persistent stores. +* Portsはpub fnのこと +* new()であっても、引数はVec等の標準型依存を明示するべき。construct状態は避ける +* Tree::write()は引数にoptionを取って、「Dsl::compileの出力を」をskipしてValueのまま書き込むオプションを追加予定 +* Context.new()内の各StoreClientは、Arcで記述する。ClientResistoryを新規導入したので、検討余地あるかも +* context.rsが煩雑になるようなら、内部modとしてLoadとStoreを切り出す必要があるかも。係数明示不足による所有複雑化に注意 -State owns YAML I/O: it reads manifest files via the built-in `DefaultFileClient` and parses them into `core::Manifest` on first access. `core::Manifest` is an internal no_std struct that owns all bit-record data and provides decode/find/build_config queries. Relative placeholders in values are qualified to absolute paths at parse time. Metadata (`_store`/`_load`/`_state`) is inherited from parent nodes; child overrides parent. +### Portモジュール -## State - -### State::get("filename.node") +| Mod | Description | Ports | Filename | +|-----|-------------|------|----------| +| Context | Contextのtrait | - | provided.rs | +| StoreClient | *Clientの基底 | - | required.rs | +| ClientResistory | *Clientの登録用 | - | provided.rs | -Reference the state represented by the specified node, returning value or collections. +### 開発用モジュール -Returns: `Result, StateError>` - -**Operation flow:** -1. Check `called_keys` (recursion / limit detection) -2. Load manifest file via `DefaultFileClient` (first access only) -3. Traverse intern list with path string → locate key -4. **Check `state_values` (by path)** ← Highest priority -5. `core::Manifest::get_meta()` → get MetaIndices -6. If `_load.client == State`: skip store. Otherwise: `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` -7. On miss, auto-load via `Load::handle()` -8. Return `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` +| Mod | Description | Port | Filename | +|-------|------|---------| +| Error | Provided Port | - | provided.rs | +| Log | feature=logging限定のマクロ | fn_log! | provided.rs | -**Auto-load:** -- If the state key misses, attempt auto-retrieval via `Load::handle()` -- On error, return `Err(StateError::LoadFailed(LoadError))` +## 用語 -**Note on _state.type:** ```yaml -tenant_id: - _state: - type: integer # Metadata only - validation/casting not yet implemented +key: n層マップDSLの最末端value以外の要素 +keyword: keyの名前文字列 +field_key: 自身と親祖先がkeywordが'_'で始まらないkey +meta_key: keywordが'_'始まりのkeyと、その子孫key +leaf_key: 子にkeyを持たず値を持つkey +value: leaf keysの値。DSL内で省略された場合はnullが充てられる +path: 単一のfield_keyを表す、'.'区切りkeywordのチェーン +qualified_path: DSL内で一意な完全修飾パス +placeholder: key参照記述("${path}")。valueのみに適用 +template: placeholderと静的な文字列を混合した、動的生成テンプレート。valueのみに適用 +called_path: Stateに渡されるパス文字列 ``` -The `_state.type` field is currently metadata-only and not enforced by State operations. - ---- - -### State::set("filename.node", value, ttl) - -Set a value to the state represented by the specified node. - -Returns: `Result` - -**Behavior:** -- Save via `StoreRegistry::client_for(yaml_name)` → `StoreClient::set()` -- Also save to `state_values` (instance cache) -- TTL and other store-specific args are handled by the `StoreClient` impl - ---- - -### State::delete("filename.node") - -Delete the {key:value} record represented by the specified node. - -Returns: `Result` - -**Behavior:** -- Delete via `StoreRegistry::client_for(yaml_name)` → `StoreClient::delete()` -- Also delete from `state_values` (instance cache) -- After deletion, the node shows miss - ---- - -### State::exists("filename.node") - -Check if a key exists without triggering auto-load. - -Returns: `Result` - -**Behavior:** -- Check `state_values` (instance cache) first -- Then check via `StoreClient::get()` -- **Does NOT trigger auto-load** (unlike `get()`) -- Returns `Ok(true)` if exists, `Ok(false)` otherwise - ---- - -## required modules +## mod:fn詳細仕様 ### StoreClient @@ -173,43 +114,14 @@ impl StoreRegistry for MyStores { --- -## Load::handle() - -When `State::get()` misses a value, retrieve data according to `_load` settings. - -`_load.client` の値を `StoreRegistry::client_for()` に渡し、対応する `StoreClient::get()` を呼ぶ。 - -**Special behavior for State client:** -```yaml -tenant_id: - _load: - client: State - key: ${org_id} # Directly returns State::get("cache.user.org_id") -``` - -When `_load.client: State`, `Load::handle()` is not called; the value of `_load.key` (placeholder already resolved) is returned directly. - -**Design rules:** -- No `_load` → No auto-load, return `Ok(None)` -- No `_load.client` → No auto-load, return `Ok(None)` -- `_load.client: State` → Use `_load.key` value directly -- Other clients → `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` - -**Recursion depth limit:** -- `max_recursion = 20` -- `called_keys: HashSet` tracks keys currently being processed -- On limit exceeded or circular key detected: `Err(StateError::RecursionLimitExceeded)` - ---- +## Context Instance Cache -## state_values (Instance Memory Cache) - -The State struct maintains an instance-level cache (`state_values`) separate from persistent stores. +An instance-level cache separate from persistent stores. **Important:** This is NOT a StoreClient. It is a variable of the State instance itself. **Purpose:** -1. Speed up duplicate `State::get()` calls within the same request +1. Speed up duplicate `Context.get()` calls within the same request 2. Reduce access count to stores 3. Avoid duplicate loads @@ -218,8 +130,6 @@ The State struct maintains an instance-level cache (`state_values`) separate fro - During State lifetime: accumulates - State instance dropped: destroyed (memory released) ---- - ## Placeholder Resolution Rules `${}` paths are **qualified to absolute paths at parse time** — no conversion happens at State runtime. @@ -237,8 +147,6 @@ qualify_path("tenant_id", "cache", ["user"]) **Placeholder resolution at State runtime (`resolve_value_to_string()`):** - Call `State::get(qualified_path)` to get the value ---- - ## error case **ManifestError:** @@ -261,42 +169,6 @@ qualify_path("tenant_id", "cache", ["user"]) ## Original Text (ja) -### index - -- provided modules (ライブラリ提供モジュール) - 1. State - -- required modules (ライブラリ要求モジュール*) - 1. StoreClient - 2. StoreRegistry - -- internal modules (内部モジュール) - 1. core::Manifest - 2. Store - 3. Load - 4. u64(fixed_bits.rs) - 5. Pools & Maps(pool.rs) - 6. parser.rs - 7. LogFormat - -*: いずれもoptional(必須ではない)。FileClientのデフォルト実装のみ内蔵。 - ---- - -## Ports - -ライブラリの外部向けインターフェース定義modules - -1. Provided Port - -**State** がライブラリ唯一の公開APIです。 - -manifest YAMLの`_store`/`_load`定義に従い、`get()` / `set()` / `delete()` / `exists()`操作を提供するmoduleです。`get()`はkey missをトリガーに`_load`定義に基づいて自動ロードを試みます。`set()`は自動ロードを引き起こしません。`delete()`はストアとインスタンスキャッシュ両方から削除します。`exists()`は自動ロードを引き起こさずにkey存在確認を行います。 - -2. Required Ports - -ライブラリ動作時にimpl実装が必要なtraits - **StoreClient** 単一ストアのget/set/deleteを提供するtrait。`key`は予約引数。`args`にttl等の任意引数をflatなHashMapで渡す。内部可変性はimplementor側の責任。 @@ -323,55 +195,6 @@ YAMLの`client:`文字列と`StoreClient`の対応を管理するtrait。利用 7. **miss時、`Load::handle()` で自動ロード** 8. `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` を返却 ---- - -### State::set("filename.node", value, ttl) - -指定されたノードが表すステートに値をセットする。 - -戻り値: `Result` - -**動作:** -- `StoreRegistry::client_for(yaml_name)` → `StoreClient::set()` で保存 -- state_values (インスタンスキャッシュ) にも保存 -- ttl等のストア固有引数はStoreClient impl側で管理 - ---- - -### State::delete("filename.node") - -指定されたノードが表す {key:value} レコードを削除する。 - -戻り値: `Result` - ---- - -### State::exists("filename.node") - -自動ロードをトリガーせずに、キーの存在確認を行う。 - -戻り値: `Result` - ---- - -## Load::handle() - -`State::get()` が値をmissした際、`_load` 設定に従ってデータを取得する。 - -`_load.client` の値を `StoreRegistry::client_for()` に渡し、対応する `StoreClient::get()` を呼ぶ。 - -**設計ルール:** -- `_load` なし → 自動ロードなし、`Ok(None)` を返す -- `_load.client` なし → 自動ロードなし、`Ok(None)` を返す -- `_load.client: State` → `_load.key` の値を直接使用 -- その他のclient → `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` - -**再帰深度制限:** -- `max_recursion = 20` -- 上限超過または同一キーの再帰検出時に `Err(StateError::RecursionLimitExceeded)` を返す - ---- - ## error case **ManifestError:** diff --git a/examples/tenant.yml b/examples/tenant.yml index 7b4a6f7..e69dec5 100644 --- a/examples/tenant.yml +++ b/examples/tenant.yml @@ -1,8 +1,9 @@ session: user: _load: - client: tenant_db - key: users.id.${session.user.id} + client: TenantDb + key: "users.id.${session.user.id}" + config: ${tenant_db_client_config} id: _load: client: http_request @@ -63,7 +64,7 @@ common_db: driver: postgres charset: UTF8 -tenant_db: +tenant_db_client_config: _load: client:common_db key: tenants.id.${session.user.tenant.id} diff --git a/src/context.rs b/src/context.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/dsl.rs b/src/dsl.rs new file mode 100644 index 0000000..e9e6508 --- /dev/null +++ b/src/dsl.rs @@ -0,0 +1,66 @@ +pub struct Dsl { + paths: Vec<[u64]> // 固定長pathリスト。[0]がroot + children: Vec<[u32]> // 全pathの子path indexをフラットに連結 + leaves: Vec<[u8]> // leafデータのバイト列(継承解決済み_load/_store情報) + interning: Vec<[u8]> // 文字列のバイト列をフラットに連結(変動長)予約・client以外のkeyword及び値全部。 + interning_idx: Vec<[u64]> // offset + len interningのport用list +} + +impl Dsl { + pub fn new(Vec<[u64]>, Vec<[u32]>, Vec<[u8]>, Vec<[u64]>) -> Self{ + + }; + pub fn compile(&Privided::Tree) -> Vec<[u64]>, Vec<[u32]>, Vec<[u8]>, Vec<[u64]>{ + + }; +} +// Dsl::compile(Provided:Value);の出力 +// +// paths: Box<[u64]> // 固定長pathリスト。[0]がroot +// children: Box<[u32]> // 全pathの子path indexをフラットに連結 +// leaves: Box<[u8]> // leafデータのバイト列(継承解決済み_load/_store情報) +// interning: Box<[u8]> // 文字列のバイト列をフラットに連結(変動長)予約・client以外のkeyword及び値全部。 +// interning_idx: Box<[u64]> // offset + len interningのport用list + +// paths ([u64]) +// +// | field | bits | +// |------------|------| +// | is_leaf | 1 | +// | offset | 32 | +// | count | 8 | // is_leaf=0: 下4bit=子path数(1~16), 上4bit unused +// | | | // is_leaf=1: 上4bit=load_args count, 下4bit=store_args count +// | padding | 23 | + +// - `is_leaf=0`: path。`children[offset..offset+count[3:0]]` にpath indexが並ぶ +// - `is_leaf=1`: leaf path。`leaves[offset..]` にleafデータのバイト列が並ぶ。サイズは固定部+load_count×64bit+store_count×64bitで算出 +// - leafデータは継承解決済みの`_load`/`_store`情報 + +// children ([u32]) +// +// | field | bits | +// |----------|------| +// | path_idx | 32 | // path境界はpath.countで持つ + +// leaves +// +// | category | field | bits | +// |-------------|----------|------| +// | keyword | keyword_idx | 32 | // interning_idx +// | | value_idx | 32 | // dslにハードコードされてる値。interning_idx +// | _load | client_idx | 4 | // スクリプト内で定数化済み +// | | key_idx | 32 | // interning_idx +// | _store | client_idx | 4 | +// | | key_idx | 32 | +// | | padding | 24 | // ここまでを32の倍数bitに調整 +// | _load.args | args_key_idx[0] | 32 | // count分繰り返し。interning_idx +// | | args_value_idx[0] | 32 | // interning_idx +// | _store.args | args_key_idx[0] | 32 | // 同上 +// | | args_value_idx[0] | 32 | + +pub const CLIENT_NULL: u64 = 0b00; +pub const CLIENT_STATE: u64 = 0b01; + +pub const PROP_NULL: u64 = 0b00; +pub const PROP_KEY: u64 = 0b01; +pub const PROP_MAP: u64 = 0b10; \ No newline at end of file diff --git a/src/index.rs b/src/index.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/ports/provided.rs b/src/ports/provided.rs index 28e655e..c4becdc 100644 --- a/src/ports/provided.rs +++ b/src/ports/provided.rs @@ -1,10 +1,10 @@ /// The value type used throughout state-engine's public API. /// Binding-agnostic — no serde, no std beyond Vec. #[derive(Debug, PartialEq, Clone)] -pub enum Value { +pub enum Tree { Scalar(Vec), - Sequence(Vec), - Mapping(Vec<(Vec, Value)>), + Sequence(Vec), + Mapping(Vec<(Vec, Tree)>), Null, } diff --git a/src/tree.rs b/src/tree.rs new file mode 100644 index 0000000..e69de29 From c64291be90521ba9bcd8b3965521aedcdccd6791 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 19:49:33 +0900 Subject: [PATCH 14/41] add Tree for feature precompile --- Cargo.toml | 12 ++-- src/dsl.rs | 155 ++++++++++++++++++++++++++---------------- src/lib.rs | 23 +++---- src/log_format.rs | 34 ++++----- src/ports.rs | 1 - src/ports/default.rs | 15 ---- src/ports/provided.rs | 95 +++++++++++++------------- src/ports/required.rs | 99 ++++++--------------------- src/tree.rs | 107 +++++++++++++++++++++++++++++ 9 files changed, 300 insertions(+), 241 deletions(-) delete mode 100644 src/ports/default.rs diff --git a/Cargo.toml b/Cargo.toml index dfa2b34..2d91a8d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,14 +14,14 @@ categories = ["config", "data-structures", "template-engine", "parser-implementa exclude = ["examples/*"] [dependencies] -log = { version = "0.4", optional = true } -serde_yaml_ng = { version = "0.10", optional = true } +log = { version = "0.4", optional = true } +serde_yaml_ng = { version = "0.10", optional = true } [dev-dependencies] env_logger = "0.11" -ctor = "0.2" +ctor = "0.2" [features] -default = [] -logging = ["log"] -builder = ["serde_yaml_ng"] \ No newline at end of file +default = [] +logging = ["log"] +precompile = ["serde_yaml_ng"] diff --git a/src/dsl.rs b/src/dsl.rs index e9e6508..17ddf80 100644 --- a/src/dsl.rs +++ b/src/dsl.rs @@ -1,66 +1,105 @@ +use crate::ports::provided::Tree; + +// ── client_idx constants (4bit, stored in leaves) ───────────────────────────── + +pub const CLIENT_NULL: u8 = 0b0000; +pub const CLIENT_STATE: u8 = 0b0001; + +// ── prop constants ──────────────────────────────────────────────────────────── + +pub const PROP_NULL: u8 = 0b00; +pub const PROP_KEY: u8 = 0b01; +pub const PROP_MAP: u8 = 0b10; + +// ── path field masks (u64) ──────────────────────────────────────────────────── +// +// | field | bits | +// |---------|-------| +// | is_leaf | 1 | +// | offset | 32 | +// | count | 8 | // is_leaf=0: [3:0]=子path数, [7:4]=unused +// | | | // is_leaf=1: [7:4]=load_args count, [3:0]=store_args count +// | padding | 23 | + +pub const PATH_IS_LEAF_SHIFT: u64 = 63; +pub const PATH_OFFSET_SHIFT: u64 = 23; +pub const PATH_COUNT_SHIFT: u64 = 15; + +pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; +pub const PATH_OFFSET_MASK: u64 = 0xffff_ffff << PATH_OFFSET_SHIFT; +pub const PATH_COUNT_MASK: u64 = 0xff << PATH_COUNT_SHIFT; + +// ── Dsl ─────────────────────────────────────────────────────────────────────── + pub struct Dsl { - paths: Vec<[u64]> // 固定長pathリスト。[0]がroot - children: Vec<[u32]> // 全pathの子path indexをフラットに連結 - leaves: Vec<[u8]> // leafデータのバイト列(継承解決済み_load/_store情報) - interning: Vec<[u8]> // 文字列のバイト列をフラットに連結(変動長)予約・client以外のkeyword及び値全部。 - interning_idx: Vec<[u64]> // offset + len interningのport用list + paths: Box<[u64]>, + children: Box<[u32]>, + leaves: Box<[u8]>, + interning: Box<[u8]>, + interning_idx: Box<[u64]>, } impl Dsl { - pub fn new(Vec<[u64]>, Vec<[u32]>, Vec<[u8]>, Vec<[u64]>) -> Self{ + pub fn new( + paths: Box<[u64]>, + children: Box<[u32]>, + leaves: Box<[u8]>, + interning: Box<[u8]>, + interning_idx: Box<[u64]>, + ) -> Self { + Self { paths, children, leaves, interning, interning_idx } + } - }; - pub fn compile(&Privided::Tree) -> Vec<[u64]>, Vec<[u32]>, Vec<[u8]>, Vec<[u64]>{ - - }; + pub fn compile(tree: &Tree) -> ( + Box<[u64]>, + Box<[u32]>, + Box<[u8]>, + Box<[u8]>, + Box<[u64]>, + ) { + let mut compiler = Compiler::new(); + compiler.walk(tree); + compiler.finish() + } } -// Dsl::compile(Provided:Value);の出力 -// -// paths: Box<[u64]> // 固定長pathリスト。[0]がroot -// children: Box<[u32]> // 全pathの子path indexをフラットに連結 -// leaves: Box<[u8]> // leafデータのバイト列(継承解決済み_load/_store情報) -// interning: Box<[u8]> // 文字列のバイト列をフラットに連結(変動長)予約・client以外のkeyword及び値全部。 -// interning_idx: Box<[u64]> // offset + len interningのport用list -// paths ([u64]) -// -// | field | bits | -// |------------|------| -// | is_leaf | 1 | -// | offset | 32 | -// | count | 8 | // is_leaf=0: 下4bit=子path数(1~16), 上4bit unused -// | | | // is_leaf=1: 上4bit=load_args count, 下4bit=store_args count -// | padding | 23 | - -// - `is_leaf=0`: path。`children[offset..offset+count[3:0]]` にpath indexが並ぶ -// - `is_leaf=1`: leaf path。`leaves[offset..]` にleafデータのバイト列が並ぶ。サイズは固定部+load_count×64bit+store_count×64bitで算出 -// - leafデータは継承解決済みの`_load`/`_store`情報 - -// children ([u32]) -// -// | field | bits | -// |----------|------| -// | path_idx | 32 | // path境界はpath.countで持つ +// ── Compiler (internal) ─────────────────────────────────────────────────────── -// leaves -// -// | category | field | bits | -// |-------------|----------|------| -// | keyword | keyword_idx | 32 | // interning_idx -// | | value_idx | 32 | // dslにハードコードされてる値。interning_idx -// | _load | client_idx | 4 | // スクリプト内で定数化済み -// | | key_idx | 32 | // interning_idx -// | _store | client_idx | 4 | -// | | key_idx | 32 | -// | | padding | 24 | // ここまでを32の倍数bitに調整 -// | _load.args | args_key_idx[0] | 32 | // count分繰り返し。interning_idx -// | | args_value_idx[0] | 32 | // interning_idx -// | _store.args | args_key_idx[0] | 32 | // 同上 -// | | args_value_idx[0] | 32 | - -pub const CLIENT_NULL: u64 = 0b00; -pub const CLIENT_STATE: u64 = 0b01; - -pub const PROP_NULL: u64 = 0b00; -pub const PROP_KEY: u64 = 0b01; -pub const PROP_MAP: u64 = 0b10; \ No newline at end of file +struct Compiler { + paths: Vec, + children: Vec, + leaves: Vec, + interning: Vec, + interning_idx: Vec, +} + +impl Compiler { + fn new() -> Self { + Self { + paths: Vec::new(), + children: Vec::new(), + leaves: Vec::new(), + interning: Vec::new(), + interning_idx: Vec::new(), + } + } + + fn walk(&mut self, _tree: &Tree) { + todo!("compile Tree into flat lists") + } + + /// Intern a byte string, returning its interning_idx index. + fn intern(&mut self, _s: &[u8]) -> u32 { + todo!() + } + + fn finish(self) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>) { + ( + self.paths.into_boxed_slice(), + self.children.into_boxed_slice(), + self.leaves.into_boxed_slice(), + self.interning.into_boxed_slice(), + self.interning_idx.into_boxed_slice(), + ) + } +} diff --git a/src/lib.rs b/src/lib.rs index 2a6f673..0d86033 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,20 +1,13 @@ -mod core; -pub mod codec_value; pub mod log_format; pub mod ports; -pub mod load; -pub mod store; -pub mod state; +pub mod context; +pub mod tree; +pub mod dsl; pub use log_format::LogFormat; -pub use ports::provided::State as StateTrait; -pub use ports::default::DefaultFileClient; -pub use state::State; - -pub use ports::required::{ - DbClient, EnvClient, - KVSClient, InMemoryClient, - HttpClient, FileClient, +pub use ports::provided::{ + Tree, + ParseError, LoadError, StoreError, ContextError, + Context, }; - -pub use ports::provided::{ManifestError, StateError, LoadError, StoreError, Value}; +pub use ports::required::{StoreClient, StoreRegistry, SetOutcome}; diff --git a/src/log_format.rs b/src/log_format.rs index 0cb90fb..326113c 100644 --- a/src/log_format.rs +++ b/src/log_format.rs @@ -1,4 +1,4 @@ -use crate::ports::provided::Value; +use crate::ports::provided::Tree; /// # Examples /// ``` @@ -15,30 +15,30 @@ impl LogFormat { format!("{}::{}({})", class, fn_name, args_str) } - /// Format Value for log output + /// Format Tree for log output /// /// # Examples /// ``` - /// use state_engine::{LogFormat, Value}; + /// use state_engine::{LogFormat, Tree}; /// - /// assert_eq!(LogFormat::format_arg(&Value::Scalar(b"text".to_vec())), "'text'"); - /// assert_eq!(LogFormat::format_arg(&Value::Null), "null"); - /// assert_eq!(LogFormat::format_arg(&Value::Sequence(vec![])), "[]"); - /// assert_eq!(LogFormat::format_arg(&Value::Mapping(vec![])), "{}"); - /// assert_eq!(LogFormat::format_arg(&Value::Sequence(vec![Value::Null, Value::Null, Value::Null])), "[3 items]"); - /// assert_eq!(LogFormat::format_arg(&Value::Mapping(vec![(b"a".to_vec(), Value::Null)])), "{1 fields}"); + /// assert_eq!(LogFormat::format_arg(&Tree::Scalar(b"text".to_vec())), "'text'"); + /// assert_eq!(LogFormat::format_arg(&Tree::Null), "null"); + /// assert_eq!(LogFormat::format_arg(&Tree::Sequence(vec![])), "[]"); + /// assert_eq!(LogFormat::format_arg(&Tree::Mapping(vec![])), "{}"); + /// assert_eq!(LogFormat::format_arg(&Tree::Sequence(vec![Tree::Null, Tree::Null, Tree::Null])), "[3 items]"); + /// assert_eq!(LogFormat::format_arg(&Tree::Mapping(vec![(b"a".to_vec(), Tree::Null)])), "{1 fields}"); /// ``` - pub fn format_arg(value: &Value) -> String { + pub fn format_arg(value: &Tree) -> String { match value { - Value::Scalar(b) => { + Tree::Scalar(b) => { let s = String::from_utf8_lossy(b); if s.len() > 50 { format!("'{}'...", &s[..47]) } else { format!("'{}'", s) } } - Value::Sequence(arr) if arr.is_empty() => "[]".to_string(), - Value::Sequence(arr) => format!("[{} items]", arr.len()), - Value::Mapping(obj) if obj.is_empty() => "{}".to_string(), - Value::Mapping(obj) => format!("{{{} fields}}", obj.len()), - Value::Null => "null".to_string(), + Tree::Sequence(arr) if arr.is_empty() => "[]".to_string(), + Tree::Sequence(arr) => format!("[{} items]", arr.len()), + Tree::Mapping(obj) if obj.is_empty() => "{}".to_string(), + Tree::Mapping(obj) => format!("{{{} fields}}", obj.len()), + Tree::Null => "null".to_string(), } } @@ -99,7 +99,7 @@ mod tests { #[test] fn test_format_arg_long_string() { let long_str = "a".repeat(60); - let result = LogFormat::format_arg(&Value::Scalar(long_str.into_bytes())); + let result = LogFormat::format_arg(&Tree::Scalar(long_str.into_bytes())); assert!(result.starts_with("'aaa")); assert!(result.ends_with("'...")); assert_eq!(result.len(), 52); diff --git a/src/ports.rs b/src/ports.rs index fda2a87..3936de2 100644 --- a/src/ports.rs +++ b/src/ports.rs @@ -1,6 +1,5 @@ pub mod provided; pub mod required; -pub mod default; pub use provided::*; pub use required::*; diff --git a/src/ports/default.rs b/src/ports/default.rs deleted file mode 100644 index 2e2b444..0000000 --- a/src/ports/default.rs +++ /dev/null @@ -1,15 +0,0 @@ -use crate::ports::required::FileClient; - -pub struct DefaultFileClient; - -impl FileClient for DefaultFileClient { - fn get(&self, path: &str) -> Option> { - std::fs::read(path).ok() - } - fn set(&self, path: &str, value: Vec) -> bool { - std::fs::write(path, value).is_ok() - } - fn delete(&self, path: &str) -> bool { - std::fs::remove_file(path).is_ok() - } -} diff --git a/src/ports/provided.rs b/src/ports/provided.rs index c4becdc..567952d 100644 --- a/src/ports/provided.rs +++ b/src/ports/provided.rs @@ -1,5 +1,19 @@ -/// The value type used throughout state-engine's public API. -/// Binding-agnostic — no serde, no std beyond Vec. +// Request-scoped context handle. Manages state per DSL definition. +pub trait Context { + /// Returns value from instance cache → _store, triggers _load on miss. + fn get(&mut self, key: &str) -> Result, ContextError>; + + /// Writes value to _store. Returns Ok(false) if no _store is configured. + fn set(&mut self, key: &str, value: Tree) -> Result; + + /// Removes value from _store. + fn delete(&mut self, key: &str) -> Result; + + /// Checks existence in cache or _store. Does not trigger _load. + fn exists(&mut self, key: &str) -> Result; +} + +// The value type used throughout context-engine's public API. #[derive(Debug, PartialEq, Clone)] pub enum Tree { Scalar(Vec), @@ -8,102 +22,85 @@ pub enum Tree { Null, } +// ── Errors ──────────────────────────────────────────────────────────────────── + #[derive(Debug, PartialEq)] -pub enum ManifestError { +pub enum ParseError { FileNotFound(String), AmbiguousFile(String), ParseError(String), } -impl std::fmt::Display for ManifestError { +impl std::fmt::Display for ParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ManifestError::FileNotFound(msg) => write!(f, "FileNotFound: {}", msg), - ManifestError::AmbiguousFile(msg) => write!(f, "AmbiguousFile: {}", msg), - ManifestError::ParseError(msg) => write!(f, "ParseError: {}", msg), + ParseError::FileNotFound(msg) => write!(f, "FileNotFound: {}", msg), + ParseError::AmbiguousFile(msg) => write!(f, "AmbiguousFile: {}", msg), + ParseError::ParseError(msg) => write!(f, "ParseError: {}", msg), } } } #[derive(Debug, PartialEq)] pub enum LoadError { - /// Required client (Env/KVS/DB/HTTP/File) is not configured. - ClientNotConfigured, - /// A required config key (key/url/table/map/connection) is missing. + /// StoreRegistry::client_for() returned None for the given yaml_name. + ClientNotFound(String), + /// A required config key is missing in the manifest. ConfigMissing(String), /// The client call succeeded but returned no data. NotFound(String), - /// JSON parse error from client response. + /// Parse error from client response. ParseError(String), } impl std::fmt::Display for LoadError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - LoadError::ClientNotConfigured => write!(f, "ClientNotConfigured"), - LoadError::ConfigMissing(msg) => write!(f, "ConfigMissing: {}", msg), - LoadError::NotFound(msg) => write!(f, "NotFound: {}", msg), - LoadError::ParseError(msg) => write!(f, "ParseError: {}", msg), + LoadError::ClientNotFound(msg) => write!(f, "ClientNotFound: {}", msg), + LoadError::ConfigMissing(msg) => write!(f, "ConfigMissing: {}", msg), + LoadError::NotFound(msg) => write!(f, "NotFound: {}", msg), + LoadError::ParseError(msg) => write!(f, "ParseError: {}", msg), } } } #[derive(Debug, PartialEq)] pub enum StoreError { - /// Required client (KVS/InMemory/HTTP/File) is not configured. - ClientNotConfigured, - /// A required config key (key/url/client) is missing. + /// StoreRegistry::client_for() returned None for the given yaml_name. + ClientNotFound(String), + /// A required config key is missing in the manifest. ConfigMissing(String), - /// JSON serialize error. + /// Serialize error. SerializeError(String), - /// Unsupported client id in config. - UnsupportedClient(u64), } impl std::fmt::Display for StoreError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - StoreError::ClientNotConfigured => write!(f, "ClientNotConfigured"), - StoreError::ConfigMissing(msg) => write!(f, "ConfigMissing: {}", msg), - StoreError::SerializeError(msg) => write!(f, "SerializeError: {}", msg), - StoreError::UnsupportedClient(id) => write!(f, "UnsupportedClient: {}", id), + StoreError::ClientNotFound(msg) => write!(f, "ClientNotFound: {}", msg), + StoreError::ConfigMissing(msg) => write!(f, "ConfigMissing: {}", msg), + StoreError::SerializeError(msg) => write!(f, "SerializeError: {}", msg), } } } #[derive(Debug, PartialEq)] -pub enum StateError { - ManifestLoadFailed(String), +pub enum ContextError { + ParseFailed(String), KeyNotFound(String), RecursionLimitExceeded, StoreFailed(StoreError), LoadFailed(LoadError), } -impl std::fmt::Display for StateError { +impl std::fmt::Display for ContextError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - StateError::ManifestLoadFailed(msg) => write!(f, "ManifestLoadFailed: {}", msg), - StateError::KeyNotFound(msg) => write!(f, "KeyNotFound: {}", msg), - StateError::RecursionLimitExceeded => write!(f, "RecursionLimitExceeded"), - StateError::StoreFailed(e) => write!(f, "StoreFailed: {}", e), - StateError::LoadFailed(e) => write!(f, "LoadFailed: {}", e), + ContextError::ParseFailed(msg) => write!(f, "ParseFailed: {}", msg), + ContextError::KeyNotFound(msg) => write!(f, "KeyNotFound: {}", msg), + ContextError::RecursionLimitExceeded => write!(f, "RecursionLimitExceeded"), + ContextError::StoreFailed(e) => write!(f, "StoreFailed: {}", e), + ContextError::LoadFailed(e) => write!(f, "LoadFailed: {}", e), } } } - -/// The primary interface for state-engine. Manages state per manifest definition. -pub trait State { - /// Returns value from _store, or triggers _load on miss. - fn get(&mut self, key: &str) -> Result, StateError>; - - /// Writes value to _store. Returns Ok(false) if no _store is configured. - /// `ttl` overrides manifest definition (KVS only). - fn set(&mut self, key: &str, value: Value, ttl: Option) -> Result; - - /// Removes value from _store. - fn delete(&mut self, key: &str) -> Result; - - /// Checks existence in cache or _store. Does not trigger _load. - fn exists(&mut self, key: &str) -> Result; -} diff --git a/src/ports/required.rs b/src/ports/required.rs index 9daf34f..f197ce9 100644 --- a/src/ports/required.rs +++ b/src/ports/required.rs @@ -1,86 +1,25 @@ -use crate::ports::provided::Value; +use std::collections::HashMap; +use crate::ports::provided::Tree; -/// In-process memory store. Internal mutability is the implementor's responsibility. -pub trait InMemoryClient: Send + Sync { - fn get(&self, key: &str) -> Option; - fn set(&self, key: &str, value: Value) -> bool; - fn delete(&self, key: &str) -> bool; +// Outcome of a StoreClient::set call. +pub enum SetOutcome { + Created, + Updated, } -/// KVS store. Serialization/deserialization is handled by the adapter. -/// Internal mutability is the implementor's responsibility. -pub trait KVSClient: Send + Sync { - fn get(&self, key: &str) -> Option>; - /// `ttl` in seconds. - fn set(&self, key: &str, value: Vec, ttl: Option) -> bool; - fn delete(&self, key: &str) -> bool; +// Single-store adapter. Implemented by the library user per backing store. +// +// - `key`: the value of `_load.key` / `_store.key` from the manifest. Reserved arg. +// - `args`: all other manifest args (ttl, connection, headers, etc.) as a flat map. +// The implementor defines and reads whatever keys it needs. +// - Thread-safety and internal mutability are the implementor's responsibility. +pub trait StoreClient: Send + Sync { + fn get(&self, key: &str, args: &HashMap<&str, Tree>) -> Option; + fn set(&self, key: &str, args: &HashMap<&str, Tree>) -> Option; + fn delete(&self, key: &str, args: &HashMap<&str, Tree>) -> bool; } -/// Environment / config store. -/// `keys` is the list of external key names (map values from manifest). -/// Returns values in the same order as `keys`. -/// Internal mutability is the implementor's responsibility. -pub trait EnvClient: Send + Sync { - fn get(&self, keys: &[Vec]) -> Option>; - fn set(&self, key: &str, value: Vec) -> bool; - fn delete(&self, key: &str) -> bool; -} - -/// Relational DB client. -/// Do NOT call State inside DbClient — it would cause recursion. -/// `connection` is a Value::Mapping resolved from the manifest. -/// `keys` is the list of db column names (map values from manifest). -/// Returns values in the same order as `keys`. -pub trait DbClient: Send + Sync { - fn get( - &self, - connection: &Value, - table: &str, - keys: &[Vec], - where_clause: Option<&[u8]>, - ) -> Option>; - fn set( - &self, - connection: &Value, - table: &str, - keys: &[Vec], - where_clause: Option<&[u8]>, - ) -> bool; - fn delete( - &self, - connection: &Value, - table: &str, - where_clause: Option<&[u8]>, - ) -> bool; -} - -/// HTTP client. -/// `keys` is the list of response field names (map values from manifest). -/// Returns values in the same order as `keys`. -/// `headers` is an optional list of (name, value) byte pairs. -pub trait HttpClient: Send + Sync { - fn get( - &self, - url: &str, - keys: &[Vec], - headers: Option<&[(Vec, Vec)]>, - ) -> Option>; - fn set( - &self, - url: &str, - body: Value, - headers: Option<&[(Vec, Vec)]>, - ) -> bool; - fn delete( - &self, - url: &str, - headers: Option<&[(Vec, Vec)]>, - ) -> bool; -} - -/// File client. -pub trait FileClient: Send + Sync { - fn get(&self, key: &str) -> Option>; - fn set(&self, key: &str, value: Vec) -> bool; - fn delete(&self, key: &str) -> bool; +/// Dispatches yaml_name → StoreClient. Implemented by the library user. +pub trait StoreRegistry { + fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient>; } diff --git a/src/tree.rs b/src/tree.rs index e69de29..a52ac7a 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -0,0 +1,107 @@ +#[cfg(feature = "precompile")] +mod inner { + use crate::ports::provided::Tree as Value; + + pub struct Tree { + paths: Box<[u64]>, + children: Box<[u32]>, + leaves: Box<[u8]>, + interning: Box<[u8]>, + interning_idx: Box<[u64]>, + } + + impl Tree { + pub fn new( + paths: Box<[u64]>, + children: Box<[u32]>, + leaves: Box<[u8]>, + interning: Box<[u8]>, + interning_idx: Box<[u64]>, + ) -> Self { + Self { paths, children, leaves, interning, interning_idx } + } + + pub fn write(&self, path: &str) -> std::io::Result<()> { + let mut out = String::new(); + out.push_str("// @generated — do not edit by hand\n\n"); + push_u64_slice(&mut out, "PATHS", &self.paths); + push_u32_slice(&mut out, "CHILDREN", &self.children); + push_u8_slice (&mut out, "LEAVES", &self.leaves); + push_u8_slice (&mut out, "INTERNING", &self.interning); + push_u64_slice(&mut out, "INTERNING_IDX", &self.interning_idx); + std::fs::write(path, out) + } + + /// Parse a YAML byte slice into a `Value` tree. + pub fn parse(src: &[u8]) -> Result { + let s = std::str::from_utf8(src) + .map_err(|e| format!("UTF-8 error: {e}"))?; + let yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(s) + .map_err(|e| format!("YAML parse error: {e}"))?; + Ok(yaml_to_tree(yaml)) + } + } + + fn yaml_to_tree(v: serde_yaml_ng::Value) -> Value { + match v { + serde_yaml_ng::Value::Mapping(m) => Value::Mapping( + m.into_iter() + .filter_map(|(k, v)| { + if let serde_yaml_ng::Value::String(s) = k { + Some((s.into_bytes(), yaml_to_tree(v))) + } else { + None + } + }) + .collect(), + ), + serde_yaml_ng::Value::Sequence(s) => { + Value::Sequence(s.into_iter().map(yaml_to_tree).collect()) + } + serde_yaml_ng::Value::String(s) => Value::Scalar(s.into_bytes()), + serde_yaml_ng::Value::Number(n) => Value::Scalar(n.to_string().into_bytes()), + serde_yaml_ng::Value::Bool(b) => Value::Scalar(b.to_string().into_bytes()), + serde_yaml_ng::Value::Null => Value::Null, + _ => Value::Null, + } + } + + fn push_u64_slice(out: &mut String, name: &str, data: &[u64]) { + out.push_str(&format!("pub static {name}: &[u64] = &[\n")); + for chunk in data.chunks(8) { + out.push_str(" "); + for v in chunk { + out.push_str(&format!("0x{v:016x}, ")); + } + out.push('\n'); + } + out.push_str("];\n\n"); + } + + fn push_u32_slice(out: &mut String, name: &str, data: &[u32]) { + out.push_str(&format!("pub static {name}: &[u32] = &[\n")); + for chunk in data.chunks(8) { + out.push_str(" "); + for v in chunk { + out.push_str(&format!("0x{v:08x}, ")); + } + out.push('\n'); + } + out.push_str("];\n\n"); + } + + fn push_u8_slice(out: &mut String, name: &str, data: &[u8]) { + out.push_str(&format!("pub static {name}: &[u8] = &[\n")); + for chunk in data.chunks(16) { + out.push_str(" "); + for v in chunk { + out.push_str(&format!("0x{v:02x}, ")); + } + out.push('\n'); + } + out.push_str("];\n\n"); + } +} + +#[cfg(feature = "precompile")] +pub use inner::Tree; From b728796b2c6d2d0bfdffd45729917273866797a5 Mon Sep 17 00:00:00 2001 From: Andyou Date: Sun, 5 Apr 2026 23:32:35 +0900 Subject: [PATCH 15/41] update signature --- src/context.rs | 260 +++++++++++++++++++++++++++++++++++++++++++++++++ src/index.rs | 99 +++++++++++++++++++ src/lib.rs | 1 + 3 files changed, 360 insertions(+) diff --git a/src/context.rs b/src/context.rs index e69de29..f9ce408 100644 --- a/src/context.rs +++ b/src/context.rs @@ -0,0 +1,260 @@ +use std::collections::HashSet; +use std::sync::Arc; + +use crate::index::Index; +use crate::ports::provided::{Context as ContextTrait, ContextError, StoreError, LoadError, Tree}; +use crate::ports::required::{StoreRegistry, SetOutcome}; + +// ── Context ─────────────────────────────────────────────────────────────────── + +pub struct Context<'r> { + index: Arc, + registry: &'r dyn StoreRegistry, + cache_keys: Vec, // path_idx + cache_vals: Vec, // parallel to cache_keys + called_keys: HashSet, + max_recursion: usize, +} + +impl<'r> Context<'r> { + pub fn new(index: Arc, registry: &'r dyn StoreRegistry) -> Self { + Self { + index, + registry, + cache_keys: Vec::new(), + cache_vals: Vec::new(), + called_keys: HashSet::new(), + max_recursion: 20, + } + } + + fn cache_get(&self, path_idx: u32) -> Option<&Tree> { + self.cache_keys.iter() + .position(|&k| k == path_idx) + .and_then(|i| self.cache_vals.get(i)) + } + + fn cache_set(&mut self, path_idx: u32, value: Tree) { + if let Some(i) = self.cache_keys.iter().position(|&k| k == path_idx) { + self.cache_vals[i] = value; + } else { + self.cache_keys.push(path_idx); + self.cache_vals.push(value); + } + } + + fn cache_remove(&mut self, path_idx: u32) { + if let Some(i) = self.cache_keys.iter().position(|&k| k == path_idx) { + self.cache_keys[i] = u32::MAX; + self.cache_vals[i] = Tree::Null; + } + } + + fn guard_recursion(&self, path_idx: u32) -> Result<(), ContextError> { + if self.called_keys.len() >= self.max_recursion || self.called_keys.contains(&path_idx) { + return Err(ContextError::RecursionLimitExceeded); + } + Ok(()) + } +} + +// ── Context trait impl ──────────────────────────────────────────────────────── + +impl<'r> ContextTrait for Context<'r> { + fn get(&mut self, key: &str) -> Result, ContextError> { + let leaves = self.index.traverse(key); + if leaves.is_empty() { + return Err(ContextError::KeyNotFound(key.to_string())); + } + + // single leaf → return value directly + // multiple leaves → return Mapping of leaf results + if leaves.len() == 1 { + let leaf = &leaves[0]; + self.guard_recursion(leaf.path_idx)?; + self.called_keys.insert(leaf.path_idx); + + let result = self.resolve_leaf(leaf.path_idx, leaf.leaf_offset); + + self.called_keys.remove(&leaf.path_idx); + result + } else { + let mut pairs: Vec<(Vec, Tree)> = Vec::new(); + for leaf in leaves.iter() { + self.guard_recursion(leaf.path_idx)?; + self.called_keys.insert(leaf.path_idx); + + let value = self.resolve_leaf(leaf.path_idx, leaf.leaf_offset)?; + + self.called_keys.remove(&leaf.path_idx); + if let Some(v) = value { + let keyword = self.index.keyword_of(leaf.path_idx).to_vec(); + pairs.push((keyword, v)); + } + } + Ok(if pairs.is_empty() { None } else { Some(Tree::Mapping(pairs)) }) + } + } + + fn set(&mut self, key: &str, value: Tree) -> Result { + let leaves = self.index.traverse(key); + if leaves.is_empty() { + return Err(ContextError::KeyNotFound(key.to_string())); + } + let leaf = &leaves[0]; + + let (yaml_name, args) = self.index.store_args(leaf.leaf_offset); + let client = self.registry.client_for(yaml_name) + .ok_or_else(|| ContextError::StoreFailed( + StoreError::ClientNotFound(yaml_name.to_string()) + ))?; + + let store_key = args.get("key").and_then(|v| { + if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + }).ok_or_else(|| ContextError::StoreFailed( + StoreError::ConfigMissing("key".to_string()) + ))?; + + let args_ref: std::collections::HashMap<&str, Tree> = args.iter() + .map(|(k, v)| (k.as_str(), v.clone())) + .collect(); + + match client.set(store_key, &args_ref) { + Some(SetOutcome::Created) | Some(SetOutcome::Updated) => { + self.cache_set(leaf.path_idx, value); + Ok(true) + } + None => Ok(false), + } + } + + fn delete(&mut self, key: &str) -> Result { + let leaves = self.index.traverse(key); + if leaves.is_empty() { + return Err(ContextError::KeyNotFound(key.to_string())); + } + let leaf = &leaves[0]; + + let (yaml_name, args) = self.index.store_args(leaf.leaf_offset); + let client = self.registry.client_for(yaml_name) + .ok_or_else(|| ContextError::StoreFailed( + StoreError::ClientNotFound(yaml_name.to_string()) + ))?; + + let store_key = args.get("key").and_then(|v| { + if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + }).ok_or_else(|| ContextError::StoreFailed( + StoreError::ConfigMissing("key".to_string()) + ))?; + + let args_ref: std::collections::HashMap<&str, Tree> = args.iter() + .map(|(k, v)| (k.as_str(), v.clone())) + .collect(); + + let ok = client.delete(store_key, &args_ref); + if ok { + self.cache_remove(leaf.path_idx); + } + Ok(ok) + } + + fn exists(&mut self, key: &str) -> Result { + let leaves = self.index.traverse(key); + if leaves.is_empty() { + return Err(ContextError::KeyNotFound(key.to_string())); + } + let leaf = &leaves[0]; + + if let Some(v) = self.cache_get(leaf.path_idx) { + return Ok(!matches!(v, Tree::Null)); + } + + let (yaml_name, args) = self.index.store_args(leaf.leaf_offset); + let Some(client) = self.registry.client_for(yaml_name) else { + return Ok(false); + }; + + let store_key = match args.get("key").and_then(|v| { + if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + }) { + Some(k) => k, + None => return Ok(false), + }; + + let args_ref: std::collections::HashMap<&str, Tree> = args.iter() + .map(|(k, v)| (k.as_str(), v.clone())) + .collect(); + + Ok(client.get(store_key, &args_ref).is_some()) + } +} + +// ── private helpers ─────────────────────────────────────────────────────────── + +impl<'r> Context<'r> { + fn resolve_leaf(&mut self, path_idx: u32, leaf_offset: u32) -> Result, ContextError> { + // 1. instance cache + if let Some(v) = self.cache_get(path_idx) { + return Ok(Some(v.clone())); + } + + // 2. _store + let (store_name, store_args) = self.index.store_args(leaf_offset); + if !store_name.is_empty() { + if let Some(client) = self.registry.client_for(store_name) { + let key = store_args.get("key").and_then(|v| { + if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + }).ok_or_else(|| ContextError::StoreFailed( + StoreError::ConfigMissing("key".to_string()) + ))?; + let args_ref: std::collections::HashMap<&str, Tree> = store_args.iter() + .map(|(k, v)| (k.as_str(), v.clone())) + .collect(); + if let Some(value) = client.get(key, &args_ref) { + self.cache_set(path_idx, value.clone()); + return Ok(Some(value)); + } + } + } + + // 3. _load + let (load_name, load_args) = self.index.load_args(leaf_offset); + if load_name.is_empty() { + return Ok(None); + } + let client = self.registry.client_for(load_name) + .ok_or_else(|| ContextError::LoadFailed( + LoadError::ClientNotFound(load_name.to_string()) + ))?; + let key = load_args.get("key").and_then(|v| { + if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + }).ok_or_else(|| ContextError::LoadFailed( + LoadError::ConfigMissing("key".to_string()) + ))?; + let args_ref: std::collections::HashMap<&str, Tree> = load_args.iter() + .map(|(k, v)| (k.as_str(), v.clone())) + .collect(); + let value = client.get(key, &args_ref) + .ok_or_else(|| ContextError::LoadFailed( + LoadError::NotFound(key.to_string()) + ))?; + + // write-through to _store if configured + if !store_name.is_empty() { + if let Some(store_client) = self.registry.client_for(store_name) { + let store_key = store_args.get("key").and_then(|v| { + if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + }); + if let Some(sk) = store_key { + let sargs: std::collections::HashMap<&str, Tree> = store_args.iter() + .map(|(k, v)| (k.as_str(), v.clone())) + .collect(); + store_client.set(sk, &sargs); + } + } + } + + self.cache_set(path_idx, value.clone()); + Ok(Some(value)) + } +} diff --git a/src/index.rs b/src/index.rs index e69de29..42cb421 100644 --- a/src/index.rs +++ b/src/index.rs @@ -0,0 +1,99 @@ +use crate::dsl::{ + PATH_IS_LEAF_MASK, PATH_OFFSET_SHIFT, PATH_OFFSET_MASK, PATH_COUNT_SHIFT, PATH_COUNT_MASK, +}; + +// ── LeafRef ─────────────────────────────────────────────────────────────────── + +pub struct LeafRef { + pub path_idx: u32, + pub leaf_offset: u32, +} + +// ── Index ───────────────────────────────────────────────────────────────────── + +pub struct Index { + paths: Box<[u64]>, + children: Box<[u32]>, + leaves: Box<[u8]>, + interning: Box<[u8]>, + interning_idx: Box<[u64]>, +} + +impl Index { + pub fn new( + paths: Box<[u64]>, + children: Box<[u32]>, + leaves: Box<[u8]>, + interning: Box<[u8]>, + interning_idx: Box<[u64]>, + ) -> Self { + Self { paths, children, leaves, interning, interning_idx } + } + + /// Traverse to the path node matching `path` (dot-separated keywords), + /// then collect all leaf descendants into a flat list. + pub fn traverse(&self, path: &str) -> Box<[LeafRef]> { + let mut result = Vec::new(); + let Some(path_idx) = self.find(path) else { + return result.into_boxed_slice(); + }; + self.collect_leaves(path_idx, &mut result); + result.into_boxed_slice() + } + + /// Walk the interning list to find the path_idx matching the dot-separated `path`. + fn find(&self, path: &str) -> Option { + let mut current: u32 = 0; // root + for keyword in path.split('.') { + current = self.find_child(current, keyword.as_bytes())?; + } + Some(current) + } + + /// Among the children of `path_idx`, find the one whose interning keyword matches `keyword`. + fn find_child(&self, path_idx: u32, keyword: &[u8]) -> Option { + let path = self.paths[path_idx as usize]; + let offset = ((path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; + let count = (((path & PATH_COUNT_MASK) >> PATH_COUNT_SHIFT) & 0xf) as usize; + + for i in 0..count { + let child_idx = self.children[offset + i]; + if self.keyword_of(child_idx) == keyword { + return Some(child_idx); + } + } + None + } + + /// Recursively collect all leaf descendants of `path_idx`. + fn collect_leaves(&self, path_idx: u32, out: &mut Vec) { + let path = self.paths[path_idx as usize]; + if path & PATH_IS_LEAF_MASK != 0 { + let leaf_offset = ((path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as u32; + out.push(LeafRef { path_idx, leaf_offset }); + return; + } + let offset = ((path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; + let count = (((path & PATH_COUNT_MASK) >> PATH_COUNT_SHIFT) & 0xf) as usize; + for i in 0..count { + self.collect_leaves(self.children[offset + i], out); + } + } + + /// Resolve the keyword bytes of a path node from the interning list. + pub fn keyword_of(&self, path_idx: u32) -> &[u8] { + todo!("resolve keyword from interning via path_idx") + } + + /// Extract _load client yaml_name and args from leaves at `leaf_offset`. + /// Returns ("", empty) if no _load is configured. + pub fn load_args(&self, leaf_offset: u32) -> (&str, std::collections::HashMap) { + todo!("decode _load from leaves[leaf_offset..]") + } + + /// Extract _store client yaml_name and args from leaves at `leaf_offset`. + /// Returns ("", empty) if no _store is configured. + pub fn store_args(&self, leaf_offset: u32) -> (&str, std::collections::HashMap) { + todo!("decode _store from leaves[leaf_offset..]") + } +} diff --git a/src/lib.rs b/src/lib.rs index 0d86033..28f5e11 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,6 +3,7 @@ pub mod ports; pub mod context; pub mod tree; pub mod dsl; +pub mod index; pub use log_format::LogFormat; pub use ports::provided::{ From b76963ded55854984d46ae5dd4e87dd63a9182cb Mon Sep 17 00:00:00 2001 From: Andyou Date: Mon, 6 Apr 2026 01:44:37 +0900 Subject: [PATCH 16/41] update yml --- examples/tenant.yml | 111 ++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 60 deletions(-) diff --git a/examples/tenant.yml b/examples/tenant.yml index e69dec5..5d2686f 100644 --- a/examples/tenant.yml +++ b/examples/tenant.yml @@ -1,83 +1,74 @@ session: user: - _load: - client: TenantDb - key: "users.id.${session.user.id}" - config: ${tenant_db_client_config} id: _load: - client: http_request - key: authorization.user + client: Memory + key: "request.authorization.user" + _load: + client: TenantDb + key: "users.id.${session.user.id}" + config: ${tenant_db_config} + map: + password_hash: "password_hash" + name: "name" + email: "email" + is_manager: "is_manager" + preference.color_mode: "color_mode" password_hash: - _load: - client: tenant_db - key: users.id.${session.user.id} - secondary_key: password_hash + name: + email: + is_manager: + preference: + color-mode: + tenant: - _load: - client: common_db - key: tenants.id.${session.user.tenant.id} id: _load: - client: http_request - key: authorization.tenant + client: Memory + key: "request.authorization.tenant" + _load: + client: CommonDb + config: ${common_db_config} + key: "tenants.id.${session.user.tenant.id}" + map: + code: "code" + name: "name" + email: "email" + locale: "locale" + allowed_ips: "allowed_ips" code: - _load: - secondary_key: code - _store: - client: redis name: - _load: - secondary_key: name email: - _load: - secondary_key: email locale: - _load: - secondary_key: locale allowed_ips: - _load: - secondary_key: allowed_ips - is_manager: - _load: - secondary_key: is_manager - name: - _load: - secondary_key: name - email: - _load: - secondary_key: email - preference: - color-mode: - _load: - secondary_key: preference.color-mode -common_db: +common_db_config: _load: - client: env + client: Env + map: + host: "COMMON_DB_HOST" + port: "COMMON_DB_PORT" + username: "COMMON_DB_USERNAME" + password: "COMMON_DB_PASSWORD" host: - _load: - key: host port: - _load: - key: port - driver: postgres - charset: UTF8 + username: + password: + driver: "postgres" + charset: "UTF8" -tenant_db_client_config: +tenant_db_config: _load: - client:common_db - key: tenants.id.${session.user.tenant.id} - + client: CommonDb + key: "tenants.id.${session.user.tenant.id}" + map: + host: "host" + port: "port" + username: "username" + password: "password" host: - _load: - secondary_key: host port: - _load: - secondary_key: port username: - _load: - secondary_key: username password: - _load: - secondary_key: password \ No newline at end of file + driver: "postgres" + charset: "UTF8" \ No newline at end of file From c082cea84da2d8588f812cb4a56e90e8ee17f262 Mon Sep 17 00:00:00 2001 From: Andyou Date: Mon, 6 Apr 2026 02:17:16 +0900 Subject: [PATCH 17/41] u yml --- examples/tenant.yml | 71 +++++++++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/examples/tenant.yml b/examples/tenant.yml index 5d2686f..33d1216 100644 --- a/examples/tenant.yml +++ b/examples/tenant.yml @@ -4,22 +4,24 @@ session: _load: client: Memory key: "request.authorization.user" + _load: client: TenantDb key: "users.id.${session.user.id}" - config: ${tenant_db_config} + connection: ${connection.tenant_db} map: password_hash: "password_hash" name: "name" email: "email" is_manager: "is_manager" preference.color_mode: "color_mode" + password_hash: name: email: is_manager: preference: - color-mode: + color_mode: tenant: id: @@ -28,7 +30,7 @@ session: key: "request.authorization.tenant" _load: client: CommonDb - config: ${common_db_config} + connection: ${connection.common_db} key: "tenants.id.${session.user.tenant.id}" map: code: "code" @@ -36,39 +38,44 @@ session: email: "email" locale: "locale" allowed_ips: "allowed_ips" + code: name: email: locale: allowed_ips: -common_db_config: - _load: - client: Env - map: - host: "COMMON_DB_HOST" - port: "COMMON_DB_PORT" - username: "COMMON_DB_USERNAME" - password: "COMMON_DB_PASSWORD" - host: - port: - username: - password: - driver: "postgres" - charset: "UTF8" +connection: + common_db: + _load: + client: Env + map: + host: "COMMON_DB_HOST" + port: "COMMON_DB_PORT" + username: "COMMON_DB_USERNAME" + password: "COMMON_DB_PASSWORD" + + host: + port: + username: + password: + driver: "postgres" + charset: "UTF8" -tenant_db_config: - _load: - client: CommonDb - key: "tenants.id.${session.user.tenant.id}" - map: - host: "host" - port: "port" - username: "username" - password: "password" - host: - port: - username: - password: - driver: "postgres" - charset: "UTF8" \ No newline at end of file + tenant_db: + _load: + client: CommonDb + connection: ${connection.common_db} + key: "tenants.id.${session.user.tenant.id}" + map: + host: "host" + port: "port" + username: "username" + password: "password" + + host: + port: + username: + password: + driver: "postgres" + charset: "UTF8" From e2c7e6b793f31e962d4ad4457a29ecd3081bc2f6 Mon Sep 17 00:00:00 2001 From: Andyou Date: Mon, 6 Apr 2026 08:15:45 +0900 Subject: [PATCH 18/41] update repo --- README.md | 20 +- docs/Architecture.md | 29 +- docs/Dsl_guide.md | 367 +++-------------- examples/adapters/db_client.rs | 168 -------- examples/adapters/env_client.rs | 23 -- examples/adapters/http_client.rs | 25 -- examples/adapters/in_memory.rs | 36 -- examples/adapters/kvs_client.rs | 82 ---- examples/adapters/mod.rs | 11 - examples/app/.env | 10 - examples/app/Cargo.toml | 22 - examples/app/Dockerfile | 19 - examples/app/README.md | 309 -------------- examples/app/db/000_create_databases.sh | 7 - examples/app/db/001_init.sql | 52 --- examples/app/docker-compose.yml | 51 --- examples/app/run.sh | 8 - examples/app/src/adapters.rs | 10 - examples/app/src/main.rs | 338 --------------- examples/implements.rs | 116 ++++++ examples/manifest/cache.yml | 54 --- examples/manifest/connection.yml | 74 ---- examples/manifest/session.yml | 11 - examples/tenant.yml | 21 +- src/{codec_value.rs => codec.rs} | 2 +- src/lib.rs | 1 + src/load.rs | 524 ------------------------ src/ports.rs | 2 + src/store.rs | 456 --------------------- src/{core => unused}/codec.rs | 0 src/{core => unused}/fixed_bits.rs | 0 src/{core => unused}/manifest.rs | 0 src/{core => unused}/mod.rs | 0 src/{core => unused}/parser.rs | 0 src/{core => unused}/pool.rs | 0 src/{ => unused}/state.rs | 0 36 files changed, 228 insertions(+), 2620 deletions(-) delete mode 100644 examples/adapters/db_client.rs delete mode 100644 examples/adapters/env_client.rs delete mode 100644 examples/adapters/http_client.rs delete mode 100644 examples/adapters/in_memory.rs delete mode 100644 examples/adapters/kvs_client.rs delete mode 100644 examples/adapters/mod.rs delete mode 100644 examples/app/.env delete mode 100644 examples/app/Cargo.toml delete mode 100644 examples/app/Dockerfile delete mode 100644 examples/app/README.md delete mode 100644 examples/app/db/000_create_databases.sh delete mode 100644 examples/app/db/001_init.sql delete mode 100644 examples/app/docker-compose.yml delete mode 100755 examples/app/run.sh delete mode 100644 examples/app/src/adapters.rs delete mode 100644 examples/app/src/main.rs delete mode 100644 examples/manifest/cache.yml delete mode 100644 examples/manifest/connection.yml delete mode 100644 examples/manifest/session.yml rename src/{codec_value.rs => codec.rs} (99%) delete mode 100644 src/load.rs delete mode 100644 src/store.rs rename src/{core => unused}/codec.rs (100%) rename src/{core => unused}/fixed_bits.rs (100%) rename src/{core => unused}/manifest.rs (100%) rename src/{core => unused}/mod.rs (100%) rename src/{core => unused}/parser.rs (100%) rename src/{core => unused}/pool.rs (100%) rename src/{ => unused}/state.rs (100%) diff --git a/README.md b/README.md index 791af79..f3242bd 100644 --- a/README.md +++ b/README.md @@ -109,19 +109,27 @@ see for details [Architecture.md](./docs/Architecture.md) ``` ./ - README.md # this + README.md Cargo.toml docs/ - Dsl_guide.md Architecture.md - + Dsl_guide.md src/ + lib.rs + context.rs + dsl.rs + index.rs + tree.rs + log_format.rs + codec.rs + ports.rs ports/ - + provided.rs + required.rs + unused/ # reference: old implementation examples/ - manifest.yml + tenant.yml implements.rs - app/ ``` ## Test diff --git a/docs/Architecture.md b/docs/Architecture.md index cba3200..98d3c74 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -56,8 +56,8 @@ leaf_key: 子にkeyを持たず値を持つkey value: leaf keysの値。DSL内で省略された場合はnullが充てられる path: 単一のfield_keyを表す、'.'区切りkeywordのチェーン qualified_path: DSL内で一意な完全修飾パス -placeholder: key参照記述("${path}")。valueのみに適用 -template: placeholderと静的な文字列を混合した、動的生成テンプレート。valueのみに適用 +placeholder: key参照記述("${path}")。valueのみに適用。単独記述時はis_template=falseとして扱い、値をそのままコピーする(string化しない) +template: placeholderと静的な文字列を混合した動的生成文字列。valueのみに適用。is_template=trueとして扱い、解決時にstring化する called_path: Stateに渡されるパス文字列 ``` @@ -114,7 +114,7 @@ impl StoreRegistry for MyStores { --- -## Context Instance Cache +## Instance Cache An instance-level cache separate from persistent stores. @@ -144,8 +144,9 @@ qualify_path("tenant_id", "cache", ["user"]) → "cache.user.tenant_id" ``` -**Placeholder resolution at State runtime (`resolve_value_to_string()`):** -- Call `State::get(qualified_path)` to get the value +**Placeholder resolution at runtime:** +- `is_template=false`(単独 `${path}`): `Context.get(qualified_path)` の値をそのままコピー(string化しない) +- `is_template=true`(文字列混在): 各placeholderを `Context.get()` で解決しstringとして結合 ## error case @@ -179,21 +180,19 @@ YAMLの`client:`文字列と`StoreClient`の対応を管理するtrait。利用 ## State -### State::get("filename.node") +### Context.get() -指定されたノードが表すステートを参照し、値またはcollectionを返却する。 +指定されたノードが表す値群を参照し、値またはcollectionを返却する。 戻り値: `Result, StateError>` **動作フロー:** -1. `called_keys` チェック(再帰・上限検出) -2. `DefaultFileClient`経由でmanifestファイルをロード(未ロード時のみ) -3. intern listをパス文字列で検索・トラバース → key位置を特定 -4. **state_values (インスタンスキャッシュ) をチェック** ← 最優先 -5. `core::Manifest::get_meta()` → MetaIndices 取得 -6. `_load.client == State` の場合はストアをスキップ。それ以外: `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` -7. **miss時、`Load::handle()` で自動ロード** -8. `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` を返却 +1. called チェック(再帰・上限検出) +2. `Index::traverse()` +3. cache (インスタンスキャッシュ) をチェック +4. `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` +5. **miss時、`Load::handle()` で自動ロード** +6. `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` を返却 ## error case diff --git a/docs/Dsl_guide.md b/docs/Dsl_guide.md index dbc9e9c..b1b9f1d 100644 --- a/docs/Dsl_guide.md +++ b/docs/Dsl_guide.md @@ -1,352 +1,113 @@ # DSL guide -## terms +## 用語 -- `meta keys`: keys prefixed with `_`, along with all keys nested beneath them -- `field keys`: keys that are not meta keys -- `leaf keys`: keys that hold a value instead of child keys -- `value`: a leaf key's value; equals null when omitted in YAML -- `path`: dot-separated key names leading from a start key to the target key -- `qualified path`: a path starting with `filename.`, uniquely identifying a key across all files -- `placeholder`: notation in the form `${path}` that references the result of `State::get()` for the specified key -- `template`: notation that embeds one or more placeholders into a string, such as `"user:${user_id}"` +``` +key: n層マップDSLの最末端value以外の要素 +keyword: keyの名前文字列 +field_key: 自身と親祖先のkeywordが'_'で始まらないkey +meta_key: keywordが'_'始まりのkeyと、その子孫key +leaf_key: 子にkeyを持たず値を持つkey +value: leaf_keyの値。DSL内で省略された場合はnullが充てられる +path: 単一のfield_keyを表す、'.'区切りkeywordのチェーン +qualified_path: DSL内で一意な完全修飾パス +placeholder: key参照記述("${path}")。valueのみに適用。 + 単独記述時はis_template=falseとして扱い、値をそのままコピーする(string化しない) +template: placeholderと静的な文字列を混合した動的生成文字列。valueのみに適用。 + is_template=trueとして扱い、解決時にstring化する +called_path: Context.get()等に渡されるパス文字列 +``` -## rules +## Rules - YAML document separators (`---`) are not supported -- `placeholder` and `template` are only valid inside values +- `${}` (placeholder / template) are only valid inside values ## Basic Structure ```yaml field_key: - _state: # Data type definition (optional) - _store: # Where to save (required at root, inherited by children) + _store: # Where to save (inherited by descendants, overridable) _load: # Where to load from (optional) + child_key: + # inherits _store from parent ``` ## Core Concepts -### 1. meta key inheritance +### 1. meta_key Inheritance -Each field key inherits parent's meta keys, and can override: +Each field_key inherits parent meta_keys and can override individual fields: ```yaml _store: - client: KVS + client: Kvs key: "root:${id}" user: _store: - key: "user:${sso_user_id}" # Override only key, inherit client: KVS + key: "user:${user_id}" # overrides key only; client: Kvs inherited - tenant_id: - # Inherits _store from parent (client: KVS, key: user:${sso_user_id}) + name: + # inherits _store: { client: Kvs, key: "user:${user_id}" } ``` -### 2. Placeholder Resolution - -State engine resolves `${...}` by calling `State::get()`: - -```yaml -tenant: - _load: - table: "tenants" - where: "id=${user.tenant_id}" # → State::get("user.tenant_id") -``` +`_store` inheritance rule: child's `_store` fields overwrite matching keys; unspecified fields are inherited as-is. -**Placeholder shorthand:** +### 2. Placeholder / Template -Whether a path is absolute or relative is determined by whether it contains `.`: +`${}` paths are qualified to absolute paths at parse time. -- No `.` → relative path, automatically qualified to `filename.ancestors.path` at parse time -- Contains `.` → treated as absolute path, used as-is +**Qualify rule (`qualify_path()`):** +- No `.` → relative; converted to `filename.ancestors.keyword` at parse time +- Contains `.` → treated as absolute, used as-is ```yaml -# Inside user.tenant_id in cache.yml -key: "${org_id}" # → cache.user.org_id (relative) -key: "${cache.user.org_id}" # → cache.user.org_id (absolute, same result) -key: "${session.sso_user_id}" # → session.sso_user_id (cross-file reference) +# Inside tenant.yml under session.user._load +key: "${session.user.id}" # absolute — used as-is +key: "${id}" # relative → tenant.session.user.id ``` -**Limitation:** The shorthand (relative path) cannot contain `.`, so to reference a child of a sibling node, use a fully qualified path: +**is_template:** +- `${path}` alone → `is_template=false`。値をそのままコピー(string化しない) +- `"prefix:${path}"` etc. → `is_template=true`。全placeholderをContext.get()で解決しstring結合 -```yaml -# NG: treated as absolute path, KeyNotFound (no filename prefix) -key: "${user.id}" # → State::get("user.id") +### 3. _store / _load args -# OK: use fully qualified path -key: "${cache.user.id}" # → State::get("cache.user.id") -``` +`client:` 以外の全フィールドはimplementor定義の任意args。ライブラリは関知しない。 -### 3. Client Types - -**For _store** (where to save): ```yaml _store: - client: InMemory # Process memory - client: KVS # Redis, Memcached - client: HTTP # HTTP endpoint -``` + client: Kvs + key: "user:${user.id}" # reserved + ttl: 3600 # implementor-defined -**For _load** (where to load from): -```yaml _load: - client: State # Reference another State key - client: InMemory # Process memory - client: Env # Environment variables - client: KVS # Redis, Memcached - client: Db # Database - client: HTTP # HTTP endpoint + client: TenantDb + key: "users.id.${session.user.id}" # reserved + connection: ${connection.tenant_db} # implementor-defined + map: + name: "name" + email: "email" ``` -You must implement an adapter for each client you use (see Required Ports). +`key:` は予約引数。それ以外はimplementorが`args: &HashMap<&str, Tree>`から取り出して使う。 -#### Client-Specific Parameters +### 4. map -**_store.client: InMemory** -```yaml -_store: - client: InMemory - key: "session:${token}" # (string) Storage key (placeholders allowed) -``` +`_load.map:` でparent field_keyの子fieldにDB列等をマッピングする。 -**_load.client: Env** ```yaml -_load: - client: Env - map: # (object, required) Environment variable mapping - yaml_key: "ENV_VAR_NAME" +session: + user: + _load: + client: TenantDb + key: "users.id.${session.user.id}" + map: + name: "name" + email: "email" + name: + email: ``` -**_load.client: State** -```yaml -_load: - client: State - key: "${org_id}" # (string) Reference to another state key -``` - -**_store.client: KVS** -```yaml -_store: - client: KVS - key: "user:${id}" # (string) Storage key (placeholders allowed) - ttl: 3600 # (integer, optional) TTL in seconds -``` - -**_load.client: Db** -```yaml -_load: - client: Db - connection: ${connection.tenant} # (Value) Connection config object or reference - table: "users" # (string) Table name - where: "id=${user.id}" # (string, optional) WHERE clause - map: # (object, required) Column mapping - yaml_key: "db_column" -``` - -**_store.client: HTTP / _load.client: HTTP** -```yaml -_store: - client: HTTP - url: "https://api.example.com/state/${id}" # (string) Endpoint URL (placeholders allowed) - headers: # (object, optional) Request headers - Authorization: "Bearer ${token}" - -_load: - client: HTTP - url: "https://api.example.com/data/${id}" # (string) Endpoint URL (placeholders allowed) - headers: # (object, optional) Request headers - Authorization: "Bearer ${token}" - map: # (object, optional) Field extraction from response - yaml_key: "response_field" -``` - -## State Methods - -**State::get(key)** -> `Result, StateError>` -- Retrieves value from instance cache / store -- Triggers auto-load on miss if `_load` is defined -- Returns `Ok(Some(value))` on hit, `Ok(None)` on miss with no load, `Err` on error - -**State::set(key, value, ttl)** -> `Result` -- Saves value to persistent store and instance cache -- Does NOT trigger auto-load -- TTL parameter is optional (KVS only) - -**State::delete(key)** -> `Result` -- Removes key from both persistent store and instance cache -- Key will show as miss after deletion - -**State::exists(key)** -> `Result` -- Checks if key exists without triggering auto-load -- Returns `Ok(true/false)` -- Lightweight existence check for conditional logic - ---- - -## Original Text (ja) - -### 用語 - -- `meta keys`: `_`で始まるkey及び、それ以下のkey群 -- `field keys`: `meta keys`では無いkey群 -- `leaf keys`: 子keyを持たず値を持つkey群 -- `value`: leaf keysの値。YAML内で省略された場合はnullが入る -- `path`: 出発keyから対象keyまで、`.`区切りでkey名を並べたパス表現 -- `qualified path`: 出発keyを対象keyの記述された`filename.`とした、一意な完全修飾パス -- `placeholder`: ${path}の形で、指定keyのState.get()の結果を参照する記述形式 -- `template`: "user${user_id}"の様に、placeholderを文字列に埋め込む記述形式 - -### rule - -- `---`によるYAML区切りは使用不可 -- `placeholder`, `template`はvalue内のみで使用可能 - -### 基本構造 - -```yaml -field_key: - _state: # ステートのメタデータ(オプション) - _store: # 保存先メタデータ (ファイルルートキーで必須, 子孫キーへ継承) - _load: # 自動ロード元メタデータ (オプション) -``` - -### コアコンセプト - -### 1. meta key 継承 - -Each field key inherit parent's meta keys, and can override: - -```yaml -_store: - client: KVS - key: "root:${id}" - -user: - _store: - key: "user:${sso_user_id}" # キーが上書きされる, client: KVSは継承 - - tenant_id: - # client: KVS, key: user:${sso_user_id}を継承 -``` - -#### 2. placeholder 解決 - -State engineは`${...}`を`State::get()`呼び出しで解決します: - -```yaml -tenant: - _load: - table: "tenants" - where: "id=${user.tenant_id}" # → State::get("user.tenant_id") -``` - -**placeholderの省略記法:** - -Manifestは`${tenant_id}`を`${cache.user.tenant_id}`(絶対パス)に変換します。 - -`${path}` のパスは、`.` を含むかどうかで絶対/相対が決まります: - -- `.` を含まない → 相対パス。parse時に `filename.ancestors.path` へ自動修飾 -- `.` を含む → 絶対パスとみなし、そのまま使用 - -```yaml -# cache.yml の user.tenant_id 内 -key: "${org_id}" # → cache.user.org_id(相対) -key: "${cache.user.org_id}" # → cache.user.org_id(絶対、同じ結果) -key: "${session.sso_user_id}" # → session.sso_user_id(別ファイル参照) -``` - -**制約:** 省略記法(相対パス)では `.` を使えないため、兄弟ノードの子を参照する場合は完全修飾パスで記述してください。 - -```yaml -# NG: user.id と書くと絶対パスとみなされ、意図しない参照になる -key: "${user.id}" # → State::get("user.id") ← ファイル名なし、KeyNotFound - -# OK: 完全修飾パスで記述する -key: "${cache.user.id}" # → State::get("cache.user.id") -``` - -#### 3. クライアント種別 - -**_store用(保存先):** -```yaml -_store: - client: InMemory # プロセスメモリ - client: KVS # Redis, Memcached等 - client: HTTP # HTTPエンドポイント -``` - -**_load用(読込元):** -```yaml -_load: - client: State # 別のStateキーを参照 - client: InMemory # プロセスメモリ - client: Env # 環境変数 - client: KVS # Redis, Memcached等 - client: Db # データベース - client: HTTP # HTTPエンドポイント -``` - -使用する各クライアントのアダプターを実装する必要があります(Required Ports参照)。 - -##### クライアント固有のパラメータ - -**_store.client: InMemory** -```yaml -_store: - client: InMemory - key: "session:${token}" # (string) ストレージキー(プレースホルダー可) -``` - -**_load.client: Env** -```yaml -_load: - client: Env - map: # (object, required) 環境変数マッピング - yaml_key: "ENV_VAR_NAME" -``` - -**_load.client: State** -```yaml -_load: - client: State - key: "${org_id}" # (string) 別のStateキーへの参照 -``` - -**_store.client: KVS** -```yaml -_store: - client: KVS - key: "user:${id}" # (string) ストレージキー(プレースホルダー可) - ttl: 3600 # (integer, optional) TTL(秒) -``` - -**_load.client: Db** -```yaml -_load: - client: Db - connection: ${connection.tenant} # (Value) 接続設定オブジェクトまたは参照 - table: "users" # (string) テーブル名 - where: "id=${user.id}" # (string, optional) WHERE句 - map: # (object, required) カラムマッピング - yaml_key: "db_column" -``` - -**_store.client: HTTP / _load.client: HTTP** -```yaml -_store: - client: HTTP - url: "https://api.example.com/state/${id}" # (string) エンドポイントURL - headers: # (object, optional) リクエストヘッダー - Authorization: "Bearer ${token}" - -_load: - client: HTTP - url: "https://api.example.com/data/${id}" # (string) エンドポイントURL - headers: # (object, optional) リクエストヘッダー - Authorization: "Bearer ${token}" - map: # (object, optional) レスポンスからのフィールド抽出 - yaml_key: "response_field" -``` \ No newline at end of file +map対象のfield_keyは別途leaf宣言が必要。 diff --git a/examples/adapters/db_client.rs b/examples/adapters/db_client.rs deleted file mode 100644 index b42f6ef..0000000 --- a/examples/adapters/db_client.rs +++ /dev/null @@ -1,168 +0,0 @@ -/// DbClient implementation using PostgreSQL -/// -/// Implements the DbClient Required Port. - -use state_engine::Value; -use state_engine::ports::required::DbClient; -use std::collections::HashMap; -use std::sync::Mutex; - -pub struct DbAdapter { - pool: Mutex>, -} - -impl DbAdapter { - pub fn new() -> Self { - Self { - pool: Mutex::new(HashMap::new()), - } - } - - fn get_connection_name(config: &Value) -> Result { - let fields = match config { - Value::Mapping(f) => f, - _ => return Err("connection must be a mapping".to_string()), - }; - - let tag = fields.iter() - .find(|(k, _)| k == b"tag") - .and_then(|(_, v)| match v { Value::Scalar(b) => std::str::from_utf8(b).ok(), _ => None }) - .ok_or_else(|| "Missing 'tag' field in connection config".to_string())?; - - if tag == "common" { - Ok(format!("connection.{}", tag)) - } else if tag == "tenant" { - let id = fields.iter() - .find(|(k, _)| k == b"id") - .and_then(|(_, v)| match v { Value::Scalar(b) => std::str::from_utf8(b).ok(), _ => None }) - .ok_or_else(|| "Missing 'id' field for tenant connection".to_string())?; - Ok(format!("connection.{}{}", tag, id)) - } else { - Err(format!("Unsupported tag: {}", tag)) - } - } - - async fn connect_from_config(config: &Value) -> Result { - let fields = match config { - Value::Mapping(f) => f, - _ => return Err("connection must be a mapping".to_string()), - }; - - let scalar = |key: &[u8]| -> Option<&str> { - fields.iter() - .find(|(k, _)| k.as_slice() == key) - .and_then(|(_, v)| match v { Value::Scalar(b) => std::str::from_utf8(b).ok(), _ => None }) - }; - - let host = scalar(b"host").ok_or("Missing host")?; - let port_str = scalar(b"port").unwrap_or("5432"); - let port = port_str.parse::().unwrap_or(5432); - let database = scalar(b"database").ok_or("Missing database")?; - let username = scalar(b"username").ok_or("Missing username")?; - let password = scalar(b"password").ok_or("Missing password")?; - - let conn_str = format!( - "host={} port={} dbname={} user={} password={}", - host, port, database, username, password - ); - - let (client, connection) = tokio_postgres::connect(&conn_str, tokio_postgres::NoTls) - .await - .map_err(|e| format!("Failed to connect: {}", e))?; - - tokio::spawn(async move { - if let Err(e) = connection.await { - eprintln!("Connection error: {}", e); - } - }); - - Ok(client) - } -} - -impl DbClient for DbAdapter { - fn get( - &self, - connection: &Value, - table: &str, - map: &[(Vec, Vec)], - where_clause: Option<&[u8]>, - ) -> Option> { - let runtime = tokio::runtime::Runtime::new().ok()?; - - runtime.block_on(async { - let conn_name = Self::get_connection_name(connection).ok()?; - let mut pool_lock = self.pool.lock().unwrap(); - - if !pool_lock.contains_key(&conn_name) { - let client = Self::connect_from_config(connection).await.ok()?; - pool_lock.insert(conn_name.clone(), client); - } - - let client = pool_lock.get(&conn_name)?; - - let col_names: Vec<&str> = map.iter() - .filter_map(|(_, v)| std::str::from_utf8(v).ok()) - .collect(); - let column_list = if col_names.is_empty() { "*".to_string() } else { col_names.join(", ") }; - - let where_str = where_clause.and_then(|b| std::str::from_utf8(b).ok()); - let query = if let Some(wc) = where_str { - format!("SELECT {} FROM {} WHERE {}", column_list, table, wc) - } else { - format!("SELECT {} FROM {}", column_list, table) - }; - - let rows = client.query(&query, &[]).await.ok()?; - - let mut results = Vec::new(); - for row in rows { - let mut fields = Vec::new(); - for (idx, column) in row.columns().iter().enumerate() { - let val: Value = match column.type_() { - &tokio_postgres::types::Type::INT4 => { - row.try_get::<_, i32>(idx) - .map(|v| Value::Scalar(v.to_string().into_bytes())) - .unwrap_or(Value::Null) - } - &tokio_postgres::types::Type::INT8 => { - row.try_get::<_, i64>(idx) - .map(|v| Value::Scalar(v.to_string().into_bytes())) - .unwrap_or(Value::Null) - } - &tokio_postgres::types::Type::TEXT | &tokio_postgres::types::Type::VARCHAR => { - row.try_get::<_, String>(idx) - .map(|v| Value::Scalar(v.into_bytes())) - .unwrap_or(Value::Null) - } - &tokio_postgres::types::Type::BOOL => { - row.try_get::<_, bool>(idx) - .map(|v| Value::Scalar(if v { b"true".to_vec() } else { b"false".to_vec() })) - .unwrap_or(Value::Null) - } - _ => Value::Null, - }; - fields.push((column.name().as_bytes().to_vec(), val)); - } - results.push(Value::Mapping(fields)); - } - - Some(results) - }) - } - - fn set( - &self, - _connection: &Value, - _table: &str, - _map: &[(Vec, Vec)], - _where_clause: Option<&[u8]>, - ) -> bool { false } - - fn delete( - &self, - _connection: &Value, - _table: &str, - _where_clause: Option<&[u8]>, - ) -> bool { false } -} diff --git a/examples/adapters/env_client.rs b/examples/adapters/env_client.rs deleted file mode 100644 index 8b8788b..0000000 --- a/examples/adapters/env_client.rs +++ /dev/null @@ -1,23 +0,0 @@ -/// EnvClient implementation -/// -/// Implements the EnvClient Required Port. -/// Provides access to environment variables. - -use state_engine::ports::required::EnvClient; - -pub struct EnvAdapter; - -impl EnvAdapter { - pub fn new() -> Self { - Self - } -} - -impl EnvClient for EnvAdapter { - fn get(&self, key: &str) -> Option> { - std::env::var(key).ok().map(|s| s.into_bytes()) - } - - fn set(&self, _key: &str, _value: Vec) -> bool { false } - fn delete(&self, _key: &str) -> bool { false } -} diff --git a/examples/adapters/http_client.rs b/examples/adapters/http_client.rs deleted file mode 100644 index 671e87f..0000000 --- a/examples/adapters/http_client.rs +++ /dev/null @@ -1,25 +0,0 @@ -/// HttpClient implementation (mock for testing) -/// -/// Implements the HttpClient Required Port. -/// Returns a fixed health response for any URL. - -use state_engine::Value; -use state_engine::ports::required::HttpClient; - -pub struct HttpAdapter; - -impl HttpClient for HttpAdapter { - fn get(&self, _url: &str, _headers: Option<&[(Vec, Vec)]>) -> Option { - Some(Value::Mapping(vec![ - (b"status".to_vec(), Value::Scalar(b"ok".to_vec())), - ])) - } - - fn set(&self, _url: &str, _body: Value, _headers: Option<&[(Vec, Vec)]>) -> bool { - true - } - - fn delete(&self, _url: &str, _headers: Option<&[(Vec, Vec)]>) -> bool { - true - } -} diff --git a/examples/adapters/in_memory.rs b/examples/adapters/in_memory.rs deleted file mode 100644 index d398c6f..0000000 --- a/examples/adapters/in_memory.rs +++ /dev/null @@ -1,36 +0,0 @@ -/// InMemoryClient implementation -/// -/// Implements the InMemoryClient Required Port. -/// Manages in-memory key-value storage for the current process. - -use state_engine::Value; -use std::collections::HashMap; -use std::sync::Mutex; -use state_engine::ports::required::InMemoryClient; - -pub struct InMemoryAdapter { - data: Mutex>, -} - -impl InMemoryAdapter { - pub fn new() -> Self { - Self { - data: Mutex::new(HashMap::new()), - } - } -} - -impl InMemoryClient for InMemoryAdapter { - fn get(&self, key: &str) -> Option { - self.data.lock().unwrap().get(key).cloned() - } - - fn set(&self, key: &str, value: Value) -> bool { - self.data.lock().unwrap().insert(key.to_string(), value); - true - } - - fn delete(&self, key: &str) -> bool { - self.data.lock().unwrap().remove(key).is_some() - } -} diff --git a/examples/adapters/kvs_client.rs b/examples/adapters/kvs_client.rs deleted file mode 100644 index 9dfa21c..0000000 --- a/examples/adapters/kvs_client.rs +++ /dev/null @@ -1,82 +0,0 @@ -/// KVSClient implementation using Redis -/// -/// Implements the KVSClient Required Port. - -use state_engine::ports::required::KVSClient; -use std::sync::Mutex; - -pub struct KVSAdapter { - client: Mutex, -} - -impl KVSAdapter { - pub fn raw_get(&self, key: &str) -> Option> { - let client = self.client.lock().unwrap(); - let mut conn = client.get_connection().ok()?; - redis::cmd("GET") - .arg(key) - .query::>>(&mut conn) - .ok() - .flatten() - } - - pub fn new() -> Result { - let host = std::env::var("REDIS_HOST").unwrap_or_else(|_| "localhost".to_string()); - let port = std::env::var("REDIS_PORT").unwrap_or_else(|_| "6379".to_string()); - let url = format!("redis://{}:{}", host, port); - - let client = redis::Client::open(url) - .map_err(|e| format!("Failed to create Redis client: {}", e))?; - - Ok(Self { client: Mutex::new(client) }) - } -} - -impl KVSClient for KVSAdapter { - fn get(&self, key: &str) -> Option> { - let client = self.client.lock().unwrap(); - let mut conn = client.get_connection().ok()?; - redis::cmd("GET") - .arg(key) - .query::>>(&mut conn) - .ok() - .flatten() - } - - fn set(&self, key: &str, value: Vec, ttl: Option) -> bool { - let client = self.client.lock().unwrap(); - let mut conn = match client.get_connection() { - Ok(c) => c, - Err(_) => return false, - }; - - let result: Result<(), _> = if let Some(ttl_secs) = ttl { - redis::cmd("SETEX") - .arg(key) - .arg(ttl_secs) - .arg(value) - .query(&mut conn) - } else { - redis::cmd("SET") - .arg(key) - .arg(value) - .query(&mut conn) - }; - - result.is_ok() - } - - fn delete(&self, key: &str) -> bool { - let client = self.client.lock().unwrap(); - let mut conn = match client.get_connection() { - Ok(c) => c, - Err(_) => return false, - }; - - let result: Result = redis::cmd("DEL") - .arg(key) - .query(&mut conn); - - result.map(|count| count > 0).unwrap_or(false) - } -} diff --git a/examples/adapters/mod.rs b/examples/adapters/mod.rs deleted file mode 100644 index 0835594..0000000 --- a/examples/adapters/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -pub mod in_memory; -pub mod env_client; -pub mod kvs_client; -pub mod db_client; -pub mod http_client; - -pub use in_memory::InMemoryAdapter; -pub use env_client::EnvAdapter; -pub use kvs_client::KVSAdapter; -pub use db_client::DbAdapter; -pub use http_client::HttpAdapter; diff --git a/examples/app/.env b/examples/app/.env deleted file mode 100644 index a9eba93..0000000 --- a/examples/app/.env +++ /dev/null @@ -1,10 +0,0 @@ -DB_HOST=example-postgres -DB_PORT=5432 -DB_DATABASE=common_db -DB_USERNAME=user -DB_PASSWORD=p - -REDIS_HOST=redis -REDIS_PORT=6379 - -APP_DEBUG=true diff --git a/examples/app/Cargo.toml b/examples/app/Cargo.toml deleted file mode 100644 index 4add038..0000000 --- a/examples/app/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[workspace] - -[package] -name = "state-engine-sample" -version = "0.1.0" -edition = "2024" -description = "Sample application using state-engine" -license = "MIT" - -[[bin]] -name = "state-engine-sample" -path = "src/main.rs" - - -[dependencies] -state-engine = { path = "../.." } -env_logger = "0.11" -redis = "0.24" -tokio-postgres = "0.7" -tokio = { version = "1", features = ["rt", "rt-multi-thread"] } - -[dev-dependencies] diff --git a/examples/app/Dockerfile b/examples/app/Dockerfile deleted file mode 100644 index 9e5635a..0000000 --- a/examples/app/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM rust:1-slim AS builder -WORKDIR /build - -COPY Cargo.toml ./Cargo.toml -COPY src/ ./src/ -COPY examples/adapters/ ./examples/adapters/ -COPY examples/app/ ./examples/app/ - -RUN cargo build --release --features state-engine/logging \ - --manifest-path examples/app/Cargo.toml - -FROM debian:bookworm-slim - -WORKDIR /app - -COPY --from=builder /build/examples/app/target/release/state-engine-sample ./ -COPY examples/manifest/ ./manifest/ - -CMD ["./state-engine-sample"] diff --git a/examples/app/README.md b/examples/app/README.md deleted file mode 100644 index d3f1848..0000000 --- a/examples/app/README.md +++ /dev/null @@ -1,309 +0,0 @@ -# Sample Application - -## how to run - -```bash -cd examples/app -chmod +x run.sh -./run.sh -``` - -## tree - -``` -app/ - db/ - 000_create_databases.sh - 001_init.sql - src/ - adapters.rs - main.rs - .env - Cargo.toml - Dockerfile - docker-compose.yml - README.md - run.sh -``` - -## expected output - -``` -=== state-engine Integration Tests === - -[connection] -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('connection.common') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('connection') - get connection.common loads from Env ... ok -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('connection.common') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('6') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('connection.common') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('connection') - exists connection.common after get ... ok - -[session] -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('connection.common') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('6') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::exists('connection.common') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('connection') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') - set and get session.sso_user_id via InMemory store ... ok - -[cache.user] -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('5') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user.org_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') - set and get cache.user via KVS ... ok -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user.org_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user.org_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user.id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user.id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') - set and get leaf key cache.user.org_id ... ok -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user.id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user.org_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') - set and get leaf key cache.user.id ... ok -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user.org_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') - cache.user.tenant_id resolved via State client from org_id ... ok -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::delete('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::exists('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user.org_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') - delete cache.user from KVS ... ok - -[cache.user DB load] -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user.org_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('connection.tenant') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('connection') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('connection.tenant') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('93') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('39') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::delete('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('7') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('connection.tenant') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('connection') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('4') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('5') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG tokio_postgres::prepare] preparing query s0: SELECT id, sso_org_id FROM users WHERE sso_user_id=1 -[2026-03-20T07:13:27Z DEBUG tokio_postgres::query] executing statement s0 with parameters: [] -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') - get cache.user loads from DB via _load ... ok - -[cache.tenant.health HTTP load] -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('2') - get cache.tenant.health loads from HTTP after cache.user is set ... ok - -[placeholder] -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.tenant.health') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.tenant.health') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('43') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('44') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('19') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user.tenant_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/got('Some') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('43') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::set('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') - set cache.user without session.sso_user_id returns Err ... ok -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('58') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('24') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('cache') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('cache.user') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('3') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('1') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve/get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::get_meta('session.sso_user_id') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('55') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('23') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::build_config('58') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::resolve_value_to_string('24') -[2026-03-20T07:13:27Z DEBUG state_engine::state] State::get('session.nonexistent') -[2026-03-20T07:13:27Z DEBUG state_engine::manifest] Manifest::load('session') - get cache.user without session.sso_user_id returns Err ... ok - get nonexistent key returns KeyNotFound ... ok - -13 passed, 0 failed -``` diff --git a/examples/app/db/000_create_databases.sh b/examples/app/db/000_create_databases.sh deleted file mode 100644 index da43329..0000000 --- a/examples/app/db/000_create_databases.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e - -psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL - CREATE DATABASE tenant_db; - GRANT ALL PRIVILEGES ON DATABASE tenant_db TO "$POSTGRES_USER"; -EOSQL diff --git a/examples/app/db/001_init.sql b/examples/app/db/001_init.sql deleted file mode 100644 index 699f118..0000000 --- a/examples/app/db/001_init.sql +++ /dev/null @@ -1,52 +0,0 @@ --- state_engine_dev: common DB (tenant metadata, user→tenant mapping) - -CREATE TABLE IF NOT EXISTS tenants ( - id SERIAL PRIMARY KEY, - name VARCHAR(255) NOT NULL, - db_host VARCHAR(255), - db_port INTEGER DEFAULT 5432, - db_database VARCHAR(255), - db_username VARCHAR(255), - db_password VARCHAR(255), - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP -); - -CREATE TABLE IF NOT EXISTS users ( - id SERIAL PRIMARY KEY, - sso_user_id INTEGER UNIQUE NOT NULL, - sso_org_id INTEGER, - tenant_id INTEGER REFERENCES tenants(id), - name VARCHAR(255), - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP -); - -INSERT INTO tenants (id, name, db_host, db_port, db_database, db_username, db_password) VALUES -(1, 'Tenant One', 'postgres', 5432, 'tenant_db', 'state_user', 'state_pass'), -(2, 'Tenant Two', 'postgres', 5432, 'tenant_db', 'state_user', 'state_pass') -ON CONFLICT DO NOTHING; - --- sso_user_id=1 → org_id=100 → tenant_id=1 --- sso_user_id=2 → org_id=200 → tenant_id=2 -INSERT INTO users (sso_user_id, sso_org_id, tenant_id, name) VALUES -(1, 100, 1, 'John Doe'), -(2, 200, 2, 'Jane Smith') -ON CONFLICT DO NOTHING; - -CREATE INDEX IF NOT EXISTS idx_users_sso_user_id ON users(sso_user_id); - -GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO current_user; -GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public TO current_user; -GRANT SELECT ON ALL TABLES IN SCHEMA public TO PUBLIC; - - --- tenant_db: per-tenant data - -\c tenant_db - -CREATE TABLE IF NOT EXISTS tenant_data ( - id SERIAL PRIMARY KEY, - tenant_id INTEGER NOT NULL, - key VARCHAR(255) NOT NULL, - value TEXT, - created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP -); diff --git a/examples/app/docker-compose.yml b/examples/app/docker-compose.yml deleted file mode 100644 index c206803..0000000 --- a/examples/app/docker-compose.yml +++ /dev/null @@ -1,51 +0,0 @@ -services: - example-app: - build: - context: ../.. - dockerfile: examples/app/Dockerfile - container_name: example-app - depends_on: - postgres: - condition: service_healthy - redis: - condition: service_healthy - env_file: .env - environment: - - RUST_LOG=debug - command: ["./state-engine-sample"] - restart: no - - postgres: - image: postgres:18-alpine - container_name: example-postgres - environment: - POSTGRES_USER: ${DB_USERNAME} - POSTGRES_PASSWORD: ${DB_PASSWORD} - POSTGRES_DB: ${DB_DATABASE} - ports: - - "5432:5432" - volumes: - - ./db:/docker-entrypoint-initdb.d - tmpfs: - - /var/lib/postgresql - healthcheck: - test: ["CMD-SHELL", "pg_isready -U ${DB_USERNAME} -d ${DB_DATABASE}"] - interval: 5s - timeout: 5s - retries: 5 - logging: - driver: none - - redis: - image: redis:8-alpine - container_name: example-redis - command: redis-server --save "" --loglevel nothing - ports: - - "6379:6379" - healthcheck: - test: ["CMD", "redis-cli", "ping"] - interval: 5s - timeout: 5s - retries: 5 - logging: - driver: none diff --git a/examples/app/run.sh b/examples/app/run.sh deleted file mode 100755 index daa65c4..0000000 --- a/examples/app/run.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -set -e - -docker compose up -d --build -EXIT_CODE=$(docker wait example-app) -docker logs example-app -docker compose down -exit $EXIT_CODE diff --git a/examples/app/src/adapters.rs b/examples/app/src/adapters.rs deleted file mode 100644 index ee4f9db..0000000 --- a/examples/app/src/adapters.rs +++ /dev/null @@ -1,10 +0,0 @@ -#[path = "../../adapters/mod.rs"] -mod adapters_impl; - -pub use adapters_impl::{ - InMemoryAdapter, - EnvAdapter, - KVSAdapter, - DbAdapter, - HttpAdapter, -}; diff --git a/examples/app/src/main.rs b/examples/app/src/main.rs deleted file mode 100644 index 11356ca..0000000 --- a/examples/app/src/main.rs +++ /dev/null @@ -1,338 +0,0 @@ -mod adapters; - -use adapters::{InMemoryAdapter, EnvAdapter, KVSAdapter, DbAdapter, HttpAdapter}; -use state_engine::{State, Value, ports::required::InMemoryClient}; -use std::sync::Arc; - -fn make_state( - env: Arc, - kvs: Arc, - db: Arc, - in_memory: Arc, - http: Arc, -) -> State { - State::new("./manifest") - .with_env(env) - .with_kvs(kvs) - .with_db(db) - .with_in_memory(in_memory) - .with_http(http) -} - -fn scalar(s: &str) -> Value { - Value::Scalar(s.as_bytes().to_vec()) -} - -fn mapping_get<'a>(v: &'a Value, key: &[u8]) -> Option<&'a Value> { - match v { - Value::Mapping(fields) => fields.iter().find(|(k, _)| k == key).map(|(_, v)| v), - _ => None, - } -} - -fn run_tests() -> (usize, usize) { - let mut passed = 0; - let mut failed = 0; - - macro_rules! test { - ($name:expr, $body:block) => {{ - print!(" {} ... ", $name); - let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| $body)); - match result { - Ok(()) => { println!("ok"); passed += 1; } - Err(_) => { println!("FAILED"); failed += 1; } - } - }}; - } - - // ========================================================================= - // connection: Env load → InMemory store - // ========================================================================= - println!("\n[connection]"); - - test!("get connection.common loads from Env", { - let im = Arc::new(InMemoryAdapter::new()); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - let result = state.get("connection.common").unwrap(); - assert!(result.is_some(), "connection.common should be loaded from Env"); - let obj = result.unwrap(); - assert!(mapping_get(&obj, b"host").is_some()); - assert!(mapping_get(&obj, b"database").is_some()); - }); - - test!("exists connection.common after get", { - let im = Arc::new(InMemoryAdapter::new()); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - state.get("connection.common").unwrap(); - assert!(state.exists("connection.common").unwrap()); - }); - - // ========================================================================= - // session: InMemory set/get - // ========================================================================= - println!("\n[session]"); - - test!("set and get session.sso_user_id via InMemory store", { - let im = Arc::new(InMemoryAdapter::new()); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - assert!(state.set("session.sso_user_id", scalar("42"), None).unwrap()); - let got = state.get("session.sso_user_id").unwrap(); - assert_eq!(got, Some(scalar("42"))); - }); - - // ========================================================================= - // cache.user: KVS set/get/delete - // ========================================================================= - println!("\n[cache.user]"); - - test!("set and get cache.user via KVS", { - let im = Arc::new(InMemoryAdapter::new()); - im.set("request-attributes-user-key", scalar("1")); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - let user = Value::Mapping(vec![ - (b"id".to_vec(), scalar("1")), - (b"org_id".to_vec(), scalar("100")), - (b"tenant_id".to_vec(), scalar("10")), - ]); - assert!(state.set("cache.user", user.clone(), Some(3600)).unwrap()); - let got = state.get("cache.user").unwrap(); - assert_eq!(got, Some(user)); - }); - - test!("set and get leaf key cache.user.org_id", { - let im = Arc::new(InMemoryAdapter::new()); - im.set("request-attributes-user-key", scalar("1")); - let kvs = Arc::new(KVSAdapter::new().unwrap()); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::clone(&kvs), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - assert!(state.set("cache.user.org_id", scalar("100"), None).unwrap()); - let got = state.get("cache.user.org_id").unwrap(); - assert_eq!(got, Some(scalar("100"))); - - // verify Redis directly: KVS key is "user:1" (session.sso_user_id=1) - // expected: encoded Mapping with org_id=100, not raw b"100" - let raw = kvs.raw_get("user:1"); - assert!(raw.is_some(), "user:1 should exist in Redis"); - let decoded = state_engine::codec_value::decode(raw.as_deref().unwrap()); - assert!(decoded.is_some(), "Redis value should decode as Value"); - let decoded = decoded.unwrap(); - assert_eq!( - mapping_get(&decoded, b"org_id"), - Some(&scalar("100")), - "org_id in Redis mapping should be 100" - ); - }); - - test!("set and get leaf key cache.user.id", { - let im = Arc::new(InMemoryAdapter::new()); - im.set("request-attributes-user-key", scalar("1")); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - assert!(state.set("cache.user.id", scalar("1"), None).unwrap()); - let got = state.get("cache.user.id").unwrap(); - assert_eq!(got, Some(scalar("1"))); - }); - - test!("cache.user.tenant_id resolved via State client from org_id", { - let im = Arc::new(InMemoryAdapter::new()); - im.set("request-attributes-user-key", scalar("1")); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - assert!(state.set("cache.user.org_id", scalar("100"), Some(14400)).unwrap()); - let got = state.get("cache.user.tenant_id").unwrap(); - assert_eq!(got, Some(scalar("100"))); - }); - - test!("delete cache.user from KVS", { - let im = Arc::new(InMemoryAdapter::new()); - im.set("request-attributes-user-key", scalar("1")); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - state.set("cache.user", Value::Mapping(vec![(b"id".to_vec(), scalar("1"))]), None).unwrap(); - assert!(state.delete("cache.user").unwrap()); - assert!(!state.exists("cache.user").unwrap()); - }); - - // ========================================================================= - // cache.user: DB load - // ========================================================================= - println!("\n[cache.user DB load]"); - - test!("get cache.user loads from DB via _load", { - let db_host = std::env::var("DB_HOST").unwrap_or("localhost".into()); - let db_port = std::env::var("DB_PORT").unwrap_or("5432".into()); - let db_database = std::env::var("DB_DATABASE").unwrap_or("state_engine_dev".into()); - let db_username = std::env::var("DB_USERNAME").unwrap_or("state_user".into()); - let db_password = std::env::var("DB_PASSWORD").unwrap_or("state_pass".into()); - - let im = Arc::new(InMemoryAdapter::new()); - im.set("request-attributes-user-key", scalar("1")); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - state.set("cache.user.org_id", scalar("100"), Some(14400)).unwrap(); - state.set("cache.user.tenant_id", scalar("1"), Some(14400)).unwrap(); - - let tenant_conn = Value::Mapping(vec![ - (b"tag".to_vec(), scalar("tenant")), - (b"id".to_vec(), scalar("1")), - (b"host".to_vec(), scalar(&db_host)), - (b"port".to_vec(), scalar(&db_port)), - (b"database".to_vec(), scalar(&db_database)), - (b"username".to_vec(), scalar(&db_username)), - (b"password".to_vec(), scalar(&db_password)), - ]); - state.set("connection.tenant", tenant_conn, None).unwrap(); - state.delete("cache.user").ok(); - - let result = state.get("cache.user").unwrap(); - assert!(result.is_some(), "cache.user should be loaded from DB"); - let obj = result.unwrap(); - assert!(mapping_get(&obj, b"id").is_some(), "id should be present"); - assert!(mapping_get(&obj, b"org_id").is_some(), "org_id should be present"); - }); - - // ========================================================================= - // cache.tenant.health: HTTP load (mock) - // Prerequisite: cache.user.tenant_id must be set before cache.tenant can resolve - // ========================================================================= - println!("\n[cache.tenant.health HTTP load]"); - - test!("get cache.tenant.health loads from HTTP after cache.user is set", { - let im = Arc::new(InMemoryAdapter::new()); - im.set("request-attributes-user-key", scalar("1")); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - // cache.tenant._store key = "tenant:${cache.user.tenant_id}" — must be resolvable - // cache.tenant._load (Db) would be tried first, but Db returns None (stub) - // cache.tenant.health._load (HTTP) overrides at leaf level → mock returns {status: ok} - state.set("cache.user.tenant_id", scalar("42"), Some(3600)).unwrap(); - - let result = state.get("cache.tenant.health").unwrap(); - assert!(result.is_some(), "cache.tenant.health should be loaded from HTTP"); - let obj = result.unwrap(); - assert_eq!(mapping_get(&obj, b"status"), Some(&scalar("ok"))); - }); - - // ========================================================================= - // placeholder resolution error cases - // ========================================================================= - println!("\n[placeholder]"); - - test!("set cache.user without session.sso_user_id returns Err", { - let im = Arc::new(InMemoryAdapter::new()); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - let result = state.set("cache.user", Value::Mapping(vec![(b"id".to_vec(), scalar("1"))]), None); - assert!(result.is_err(), "should fail when placeholder cannot be resolved"); - }); - - test!("get cache.user without session.sso_user_id returns Err", { - let im = Arc::new(InMemoryAdapter::new()); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - let result = state.get("cache.user"); - assert!(result.is_err(), "should fail when placeholder cannot be resolved"); - }); - - test!("get nonexistent key returns KeyNotFound", { - let im = Arc::new(InMemoryAdapter::new()); - let mut state = make_state( - Arc::new(EnvAdapter::new()), - Arc::new(KVSAdapter::new().unwrap()), - Arc::new(DbAdapter::new()), - im, - Arc::new(HttpAdapter), - ); - - let result = state.get("session.nonexistent"); - assert!(matches!(result, Err(state_engine::StateError::KeyNotFound(_)))); - }); - - (passed, failed) -} - -fn main() { - env_logger::init(); - println!("=== state-engine Integration Tests ==="); - let (passed, failed) = run_tests(); - println!("\n{} passed, {} failed", passed, failed); - if failed > 0 { - std::process::exit(1); - } -} diff --git a/examples/implements.rs b/examples/implements.rs index e69de29..12a432e 100644 --- a/examples/implements.rs +++ b/examples/implements.rs @@ -0,0 +1,116 @@ +// Example StoreClient implementations. +// These are minimal stubs showing how to implement StoreClient and StoreRegistry +// for common backing stores under the new unified interface. + +use context_engine::ports::required::{StoreClient, StoreRegistry, SetOutcome}; +use context_engine::ports::provided::Tree; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; + +// ── Memory ──────────────────────────────────────────────────────────────────── + +pub struct MemoryClient { + data: Mutex>, +} + +impl MemoryClient { + pub fn new() -> Self { + Self { data: Mutex::new(HashMap::new()) } + } +} + +impl StoreClient for MemoryClient { + fn get(&self, key: &str, _args: &HashMap<&str, Tree>) -> Option { + self.data.lock().unwrap().get(key).cloned() + } + fn set(&self, key: &str, args: &HashMap<&str, Tree>) -> Option { + // args["value"] holds the value to store + let value = args.get("value")?.clone(); + let mut data = self.data.lock().unwrap(); + let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; + data.insert(key.to_string(), value); + Some(outcome) + } + fn delete(&self, key: &str, _args: &HashMap<&str, Tree>) -> bool { + self.data.lock().unwrap().remove(key).is_some() + } +} + +// ── KVS (Redis) ─────────────────────────────────────────────────────────────── +// +// args["ttl"] — optional, seconds as Scalar + +pub struct KvsClient { + client: Mutex, +} + +impl KvsClient { + pub fn new(url: &str) -> Result { + Ok(Self { client: Mutex::new(redis::Client::open(url)?) }) + } +} + +impl StoreClient for KvsClient { + fn get(&self, key: &str, _args: &HashMap<&str, Tree>) -> Option { + let client = self.client.lock().unwrap(); + let mut conn = client.get_connection().ok()?; + let bytes: Option> = redis::cmd("GET").arg(key).query(&mut conn).ok()?; + // deserialize bytes → Tree (implementor's responsibility) + bytes.map(|_b| todo!("deserialize")) + } + fn set(&self, key: &str, args: &HashMap<&str, Tree>) -> Option { + let client = self.client.lock().unwrap(); + let mut conn = client.get_connection().ok()?; + let value = args.get("value")?; + let ttl = args.get("ttl").and_then(|t| match t { + Tree::Scalar(b) => std::str::from_utf8(b).ok()?.parse::().ok(), + _ => None, + }); + // serialize Tree → bytes (implementor's responsibility) + let bytes: Vec = todo!("serialize"); + let result: Result<(), _> = match ttl { + Some(secs) => redis::cmd("SETEX").arg(key).arg(secs).arg(bytes).query(&mut conn), + None => redis::cmd("SET").arg(key).arg(bytes).query(&mut conn), + }; + result.ok().map(|_| SetOutcome::Created) + } + fn delete(&self, key: &str, _args: &HashMap<&str, Tree>) -> bool { + let client = self.client.lock().unwrap(); + let mut conn = match client.get_connection() { Ok(c) => c, Err(_) => return false }; + let result: Result = redis::cmd("DEL").arg(key).query(&mut conn); + result.map(|n| n > 0).unwrap_or(false) + } +} + +// ── Env ─────────────────────────────────────────────────────────────────────── + +pub struct EnvClient; + +impl StoreClient for EnvClient { + fn get(&self, key: &str, _args: &HashMap<&str, Tree>) -> Option { + std::env::var(key).ok().map(|s| Tree::Scalar(s.into_bytes())) + } + fn set(&self, _key: &str, _args: &HashMap<&str, Tree>) -> Option { None } + fn delete(&self, _key: &str, _args: &HashMap<&str, Tree>) -> bool { false } +} + +// ── StoreRegistry ───────────────────────────────────────────────────────────── + +pub struct MyRegistry { + memory: Arc, + kvs: Arc, + env: Arc, +} + +impl StoreRegistry for MyRegistry { + fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient> { + match yaml_name { + "Memory" => Some(self.memory.as_ref()), + "Kvs" => Some(self.kvs.as_ref()), + "Env" => Some(self.env.as_ref()), + "CommonDb" => todo!("implement CommonDb"), + "TenantDb" => todo!("implement TenantDb"), + _ => None, + } + } +} diff --git a/examples/manifest/cache.yml b/examples/manifest/cache.yml deleted file mode 100644 index 3315c13..0000000 --- a/examples/manifest/cache.yml +++ /dev/null @@ -1,54 +0,0 @@ -# We can name this "cache.yml" meaning state stored in KVS. - -user: - _store: - client: KVS - key: "user:${session.sso_user_id}" # manifestDir path reference - ttl: 14400 # DbClient impl defines ttl unit - _load: - client: Db - connection: ${connection.tenant} - table: "users" - where: "sso_user_id=${session.sso_user_id}" - map: - id: "id" - org_id: "sso_org_id" - - id: - _state: - type: integer - org_id: - _state: - type: integer - tenant_id: - _state: - type: integer - _load: - client: State - key: "${org_id}" # relative path reference - -tenant: - _store: - client: KVS - key: "tenant:${cache.user.tenant_id}" - ttl: 3600 - _load: - client: Db - connection: ${connection.common} - table: "tenants" - where: "id=${cache.user.tenant_id}" - map: - name: "name" - - name: - _state: - type: string - health: - _store: null - _load: - client: HTTP - url: "http://127.0.0.1:8080/health/${cache.user.tenant_id}" - map: - status: "status" - _state: - type: string \ No newline at end of file diff --git a/examples/manifest/connection.yml b/examples/manifest/connection.yml deleted file mode 100644 index 3979e74..0000000 --- a/examples/manifest/connection.yml +++ /dev/null @@ -1,74 +0,0 @@ -# We can name this "connection.yml" meaning connection configurations. - -common: - _store: - client: InMemory - key: "connection.common" - _load: - client: Env - map: - host: "DB_HOST" - port: "DB_PORT" - database: "DB_DATABASE" - username: "DB_USERNAME" - password: "DB_PASSWORD" - - tag: "common" - driver: "postgres" - charset: "UTF8" - - host: - _state: - type: string - port: - _state: - type: integer - database: - _state: - type: string - username: - _state: - type: string - password: - _state: - type: string - -tenant: - _store: - client: InMemory - key: "connection.tenant${cache.user.tenant_id}" - _load: - client: Db - connection: ${connection.common} - table: "tenants" - where: "id=${cache.user.tenant_id}" - map: - id: "id" - host: "db_host" - port: "db_port" - database: "db_database" - username: "db_username" - password: "db_password" - - tag: "tenant" - driver: "postgres" - charset: "UTF8" - - id: - _state: - type: integer - host: - _state: - type: string - port: - _state: - type: integer - database: - _state: - type: string - username: - _state: - type: string - password: - _state: - type: string diff --git a/examples/manifest/session.yml b/examples/manifest/session.yml deleted file mode 100644 index 41b7373..0000000 --- a/examples/manifest/session.yml +++ /dev/null @@ -1,11 +0,0 @@ -# We can name this "session.yml" meaning relation to http requests. - -sso_user_id: - _state: - type: integer - _store: - client: InMemory - key: "request-attributes-user-key" - _load: - client: InMemory - key: "request-header-user-key" \ No newline at end of file diff --git a/examples/tenant.yml b/examples/tenant.yml index 33d1216..07e31ac 100644 --- a/examples/tenant.yml +++ b/examples/tenant.yml @@ -1,13 +1,17 @@ session: user: + _store: + client: Kvs + ttl: 3600 # seconds + id: _load: client: Memory key: "request.authorization.user" _load: - client: TenantDb - key: "users.id.${session.user.id}" + client: TenantDb + key: "users.id.${session.user.id}" connection: ${connection.tenant_db} map: password_hash: "password_hash" @@ -22,6 +26,7 @@ session: is_manager: preference: color_mode: + name_copy: ${session.user.name} tenant: id: @@ -29,9 +34,9 @@ session: client: Memory key: "request.authorization.tenant" _load: - client: CommonDb - connection: ${connection.common_db} - key: "tenants.id.${session.user.tenant.id}" + client: CommonDb + connection: ${connection.common_db} + key: "tenants.id.${session.user.tenant.id}" map: code: "code" name: "name" @@ -52,11 +57,13 @@ connection: map: host: "COMMON_DB_HOST" port: "COMMON_DB_PORT" + database: "COMMON_DB_DATABASE" username: "COMMON_DB_USERNAME" password: "COMMON_DB_PASSWORD" host: port: + database: username: password: driver: "postgres" @@ -70,8 +77,12 @@ connection: map: host: "host" port: "port" + database: "database" username: "username" password: "password" + _store: + client: Kvs + ttl: 36000 # seconds host: port: diff --git a/src/codec_value.rs b/src/codec.rs similarity index 99% rename from src/codec_value.rs rename to src/codec.rs index 6a6b45c..59ce249 100644 --- a/src/codec_value.rs +++ b/src/codec.rs @@ -1,4 +1,4 @@ -use crate::ports::provided::Value; +use crate::ports::provided::Tree as Value; // Wire format: // Null : 0x00 diff --git a/src/lib.rs b/src/lib.rs index 28f5e11..f045364 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -12,3 +12,4 @@ pub use ports::provided::{ Context, }; pub use ports::required::{StoreClient, StoreRegistry, SetOutcome}; +pub use ports::codec::{encode, decode}; diff --git a/src/load.rs b/src/load.rs deleted file mode 100644 index 4e32ec4..0000000 --- a/src/load.rs +++ /dev/null @@ -1,524 +0,0 @@ -use crate::ports::required::{ - DbClient, EnvClient, KVSClient, - InMemoryClient, HttpClient, FileClient, -}; -use crate::ports::provided::{LoadError, Value}; -use crate::core::fixed_bits; -use std::collections::HashMap; -use std::sync::Arc; - -pub struct Load { - db: Option>, - kvs: Option>, - in_memory: Option>, - env: Option>, - http: Option>, - file: Option>, -} - -impl Load { - pub fn new() -> Self { - Self { - db: None, - kvs: None, - in_memory: None, - env: None, - http: None, - file: None, - } - } - - pub fn with_db(mut self, client: Arc) -> Self { - self.db = Some(client); - self - } - - pub fn with_kvs(mut self, client: Arc) -> Self { - self.kvs = Some(client); - self - } - - pub fn with_in_memory(mut self, client: Arc) -> Self { - self.in_memory = Some(client); - self - } - - pub fn with_env(mut self, client: Arc) -> Self { - self.env = Some(client); - self - } - - pub fn with_http(mut self, client: Arc) -> Self { - self.http = Some(client); - self - } - - pub fn with_file(mut self, client: Arc) -> Self { - self.file = Some(client); - self - } - - pub fn handle(&self, config: &HashMap) -> Result { - let client = match config.get("client") { - Some(Value::Scalar(b)) => { - u64::from_le_bytes(b.as_slice().try_into().unwrap_or([0u8; 8])) - } - _ => return Err(LoadError::ConfigMissing("client".into())), - }; - - match client { - fixed_bits::CLIENT_ENV => self.load_from_env(config), - fixed_bits::CLIENT_IN_MEMORY => self.load_from_in_memory(config), - fixed_bits::CLIENT_KVS => self.load_from_kvs(config), - fixed_bits::CLIENT_DB => self.load_from_db(config), - fixed_bits::CLIENT_HTTP => self.load_from_http(config), - fixed_bits::CLIENT_FILE => self.load_from_file(config), - _ => Err(LoadError::ConfigMissing(format!("unsupported client '{}'", client))), - } - } - - fn load_from_env( - &self, - config: &HashMap, - ) -> Result { - let env = self.env.as_deref() - .ok_or(LoadError::ClientNotConfigured)?; - - let (yaml_keys, ext_keys) = get_map_keys(config)?; - let values = env.get(&ext_keys) - .ok_or(LoadError::ClientNotConfigured)?; - Ok(zip_to_mapping(yaml_keys, values)) - } - - fn load_from_in_memory( - &self, - config: &HashMap, - ) -> Result { - let in_memory = self.in_memory.as_deref() - .ok_or(LoadError::ClientNotConfigured)?; - - let key = scalar_str(config, "key")?; - in_memory - .get(key) - .ok_or_else(|| LoadError::NotFound(key.into())) - } - - fn load_from_kvs( - &self, - config: &HashMap, - ) -> Result { - let kvs = self.kvs.as_deref() - .ok_or(LoadError::ClientNotConfigured)?; - - let key = scalar_str(config, "key")?; - let bytes = kvs - .get(key) - .ok_or_else(|| LoadError::NotFound(key.into()))?; - Ok(crate::codec_value::decode(&bytes).unwrap_or(Value::Scalar(bytes))) - } - - fn load_from_db( - &self, - config: &HashMap, - ) -> Result { - let db = self.db.as_deref() - .ok_or(LoadError::ClientNotConfigured)?; - - let connection = config - .get("connection") - .ok_or(LoadError::ConfigMissing("connection".into()))?; - - let table = scalar_str(config, "table")?; - - let (yaml_keys, ext_keys) = get_map_keys(config)?; - - let where_clause = config.get("where") - .and_then(|v| if let Value::Scalar(b) = v { Some(b.as_slice()) } else { None }); - - let values = db - .get(connection, table, &ext_keys, where_clause) - .ok_or_else(|| LoadError::NotFound(table.into()))?; - - Ok(zip_to_mapping(yaml_keys, values)) - } - - fn load_from_file( - &self, - config: &HashMap, - ) -> Result { - let file = self.file.as_deref() - .ok_or(LoadError::ClientNotConfigured)?; - - let key = scalar_str(config, "key")?; - let bytes = file - .get(key) - .ok_or_else(|| LoadError::NotFound(key.into()))?; - Ok(crate::codec_value::decode(&bytes).unwrap_or(Value::Scalar(bytes))) - } - - fn load_from_http( - &self, - config: &HashMap, - ) -> Result { - let http = self.http.as_deref() - .ok_or(LoadError::ClientNotConfigured)?; - - let url = scalar_str(config, "url")?; - - let (yaml_keys, ext_keys) = get_map_keys(config)?; - - let headers = match config.get("headers") { - Some(Value::Mapping(m)) => Some( - m.iter() - .filter_map(|(k, v)| { - if let Value::Scalar(val) = v { Some((k.clone(), val.clone())) } else { None } - }) - .collect::>() - ), - _ => None, - }; - - let values = http.get(url, &ext_keys, headers.as_deref()) - .ok_or_else(|| LoadError::NotFound(url.into()))?; - - Ok(zip_to_mapping(yaml_keys, values)) - } -} - -fn get_map_keys(config: &HashMap) -> Result<(Vec>, Vec>), LoadError> { - let yaml_keys = match config.get("yaml_keys") { - Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), - _ => return Err(LoadError::ConfigMissing("yaml_keys".into())), - }; - let ext_keys = match config.get("ext_keys") { - Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), - _ => return Err(LoadError::ConfigMissing("ext_keys".into())), - }; - Ok((yaml_keys, ext_keys)) -} - -/// Zips yaml_keys and values into a Value::Mapping. -fn zip_to_mapping(yaml_keys: Vec>, values: Vec) -> Value { - Value::Mapping(yaml_keys.into_iter().zip(values).collect()) -} - -fn scalar_str<'a>(config: &'a HashMap, key: &str) -> Result<&'a str, LoadError> { - match config.get(key) { - Some(Value::Scalar(b)) => std::str::from_utf8(b) - .map_err(|_| LoadError::ConfigMissing(key.into())), - _ => Err(LoadError::ConfigMissing(key.into())), - } -} - -impl Default for Load { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn client_config(client_id: u64) -> Value { - Value::Scalar(client_id.to_le_bytes().to_vec()) - } - - fn map_config(pairs: &[(&str, &str)]) -> (Value, Value) { - let yaml_keys = Value::Sequence(pairs.iter().map(|(k, _)| Value::Scalar(k.as_bytes().to_vec())).collect()); - let ext_keys = Value::Sequence(pairs.iter().map(|(_, v)| Value::Scalar(v.as_bytes().to_vec())).collect()); - (yaml_keys, ext_keys) - } - - // --- Env --- - - struct MockEnvClient; - impl EnvClient for MockEnvClient { - fn get(&self, keys: &[Vec]) -> Option> { - let vals = keys.iter().map(|k| match k.as_slice() { - b"DB_HOST" => Value::Scalar(b"localhost".to_vec()), - b"DB_PORT" => Value::Scalar(b"5432".to_vec()), - _ => Value::Null, - }).collect(); - Some(vals) - } - fn set(&self, _key: &str, _value: Vec) -> bool { false } - fn delete(&self, _key: &str) -> bool { false } - } - - #[test] - fn test_load_from_env() { - let load = Load::new().with_env(Arc::new(MockEnvClient)); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_ENV)); - let (yaml_keys, ext_keys) = map_config(&[("host", "DB_HOST"), ("port", "DB_PORT")]); - config.insert("yaml_keys".to_string(), yaml_keys); - config.insert("ext_keys".to_string(), ext_keys); - let result = load.handle(&config).unwrap(); - if let Value::Mapping(m) = result { - let host = m.iter().find(|(k, _)| k == b"host").map(|(_, v)| v.clone()); - assert_eq!(host, Some(Value::Scalar(b"localhost".to_vec()))); - } else { - panic!("expected Mapping"); - } - } - - // --- InMemory --- - - struct MockInMemory { - store: std::sync::Mutex>, - } - impl MockInMemory { - fn new(entries: &[(&str, Value)]) -> Self { - Self { store: std::sync::Mutex::new(entries.iter().map(|(k, v)| (k.to_string(), v.clone())).collect()) } - } - } - impl InMemoryClient for MockInMemory { - fn get(&self, key: &str) -> Option { self.store.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, value: Value) -> bool { self.store.lock().unwrap().insert(key.to_string(), value); true } - fn delete(&self, key: &str) -> bool { self.store.lock().unwrap().remove(key).is_some() } - } - - #[test] - fn test_load_from_in_memory() { - let data = Value::Mapping(vec![(b"host".to_vec(), Value::Scalar(b"localhost".to_vec()))]); - let client = Arc::new(MockInMemory::new(&[("conn", data.clone())])); - let load = Load::new().with_in_memory(client); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_IN_MEMORY)); - config.insert("key".to_string(), Value::Scalar(b"conn".to_vec())); - assert_eq!(load.handle(&config).unwrap(), data); - } - - #[test] - fn test_load_from_in_memory_key_not_found() { - let client = Arc::new(MockInMemory::new(&[])); - let load = Load::new().with_in_memory(client); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_IN_MEMORY)); - config.insert("key".to_string(), Value::Scalar(b"missing".to_vec())); - assert!(load.handle(&config).is_err()); - } - - #[test] - fn test_load_from_in_memory_client_not_configured() { - let load = Load::new(); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_IN_MEMORY)); - config.insert("key".to_string(), Value::Scalar(b"k".to_vec())); - assert!(load.handle(&config).is_err()); - } - - // --- KVS --- - - struct MockKVS { - store: std::sync::Mutex>>, - } - impl MockKVS { - fn new(entries: &[(&str, &[u8])]) -> Self { - Self { store: std::sync::Mutex::new(entries.iter().map(|(k, v)| (k.to_string(), v.to_vec())).collect()) } - } - } - impl KVSClient for MockKVS { - fn get(&self, key: &str) -> Option> { self.store.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, value: Vec, _: Option) -> bool { self.store.lock().unwrap().insert(key.to_string(), value); true } - fn delete(&self, key: &str) -> bool { self.store.lock().unwrap().remove(key).is_some() } - } - - #[test] - fn test_load_from_kvs() { - let client = Arc::new(MockKVS::new(&[("sess", b"{\"user_id\":1}")])); - let load = Load::new().with_kvs(client); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_KVS)); - config.insert("key".to_string(), Value::Scalar(b"sess".to_vec())); - assert!(matches!(load.handle(&config).unwrap(), Value::Scalar(_))); - } - - #[test] - fn test_load_from_kvs_key_not_found() { - let client = Arc::new(MockKVS::new(&[])); - let load = Load::new().with_kvs(client); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_KVS)); - config.insert("key".to_string(), Value::Scalar(b"missing".to_vec())); - assert!(load.handle(&config).is_err()); - } - - #[test] - fn test_load_from_kvs_client_not_configured() { - let load = Load::new(); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_KVS)); - config.insert("key".to_string(), Value::Scalar(b"k".to_vec())); - assert!(load.handle(&config).is_err()); - } - - // --- DB --- - - struct MockDb { - rows: Vec, - } - impl MockDb { - fn new(rows: Vec) -> Self { Self { rows } } - } - impl DbClient for MockDb { - fn get(&self, _conn: &Value, _table: &str, _keys: &[Vec], _where: Option<&[u8]>) -> Option> { - if self.rows.is_empty() { None } else { Some(self.rows.clone()) } - } - fn set(&self, _: &Value, _: &str, _: &[Vec], _: Option<&[u8]>) -> bool { false } - fn delete(&self, _: &Value, _: &str, _: Option<&[u8]>) -> bool { false } - } - - fn db_config(table: &str, map: &[(&str, &str)]) -> HashMap { - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_DB)); - config.insert("table".to_string(), Value::Scalar(table.as_bytes().to_vec())); - config.insert("connection".to_string(), Value::Mapping(vec![])); - let (yaml_keys, ext_keys) = map_config(map); - config.insert("yaml_keys".to_string(), yaml_keys); - config.insert("ext_keys".to_string(), ext_keys); - config - } - - #[test] - fn test_load_from_db() { - // adapter returns field values in ext_keys order - let client = Arc::new(MockDb::new(vec![Value::Scalar(b"42".to_vec())])); - let load = Load::new().with_db(client); - let config = db_config("users", &[("id", "id")]); - let result = load.handle(&config).unwrap(); - // zip_to_mapping: yaml_key "id" → Value::Scalar("42") - let expected = Value::Mapping(vec![(b"id".to_vec(), Value::Scalar(b"42".to_vec()))]); - assert_eq!(result, expected); - } - - #[test] - fn test_load_from_db_no_rows() { - let client = Arc::new(MockDb::new(vec![])); - let load = Load::new().with_db(client); - let config = db_config("users", &[("id", "id")]); - assert!(load.handle(&config).is_err()); - } - - #[test] - fn test_load_from_db_client_not_configured() { - let load = Load::new(); - let config = db_config("users", &[("id", "id")]); - assert!(load.handle(&config).is_err()); - } - - // --- HTTP --- - - struct MockHttp { - response: Option, - } - impl MockHttp { - fn new(response: Option) -> Self { Self { response } } - } - impl HttpClient for MockHttp { - fn get(&self, _: &str, keys: &[Vec], _: Option<&[(Vec, Vec)]>) -> Option> { - self.response.as_ref().map(|v| { - keys.iter().map(|k| match v { - Value::Mapping(m) => m.iter() - .find(|(mk, _)| mk == k) - .map(|(_, mv)| mv.clone()) - .unwrap_or(Value::Null), - _ => v.clone(), - }).collect() - }) - } - fn set(&self, _: &str, _: Value, _: Option<&[(Vec, Vec)]>) -> bool { false } - fn delete(&self, _: &str, _: Option<&[(Vec, Vec)]>) -> bool { false } - } - - fn http_config(url: &str) -> HashMap { - let mut c = HashMap::new(); - c.insert("client".to_string(), client_config(fixed_bits::CLIENT_HTTP)); - c.insert("url".to_string(), Value::Scalar(url.as_bytes().to_vec())); - let (yaml_keys, ext_keys) = map_config(&[("status", "status")]); - c.insert("yaml_keys".to_string(), yaml_keys); - c.insert("ext_keys".to_string(), ext_keys); - c - } - - #[test] - fn test_load_from_http() { - let response = Value::Mapping(vec![(b"status".to_vec(), Value::Scalar(b"ok".to_vec()))]); - let client = Arc::new(MockHttp::new(Some(response.clone()))); - let load = Load::new().with_http(client); - let config = http_config("http://example.com/health"); - assert_eq!(load.handle(&config).unwrap(), response); - } - - #[test] - fn test_load_from_http_not_found() { - let client = Arc::new(MockHttp::new(None)); - let load = Load::new().with_http(client); - let config = http_config("http://example.com/health"); - assert!(load.handle(&config).is_err()); - } - - #[test] - fn test_load_from_http_client_not_configured() { - let load = Load::new(); - let config = http_config("http://example.com/health"); - assert!(load.handle(&config).is_err()); - } - - // --- File --- - - struct MockFileClient { - store: std::sync::Mutex>>, - } - impl MockFileClient { - fn new(entries: &[(&str, &[u8])]) -> Self { - Self { - store: std::sync::Mutex::new( - entries.iter().map(|(k, v)| (k.to_string(), v.to_vec())).collect() - ), - } - } - } - impl FileClient for MockFileClient { - fn get(&self, key: &str) -> Option> { - self.store.lock().unwrap().get(key).cloned() - } - fn set(&self, key: &str, value: Vec) -> bool { - self.store.lock().unwrap().insert(key.to_string(), value); - true - } - fn delete(&self, key: &str) -> bool { - self.store.lock().unwrap().remove(key).is_some() - } - } - - #[test] - fn test_load_from_file() { - let file = MockFileClient::new(&[("session_data", b"{\"user_id\":42}")]); - let load = Load::new().with_file(Arc::new(file)); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_FILE)); - config.insert("key".to_string(), Value::Scalar(b"session_data".to_vec())); - assert!(matches!(load.handle(&config).unwrap(), Value::Scalar(_))); - } - - #[test] - fn test_load_from_file_key_not_found() { - let file = MockFileClient::new(&[]); - let load = Load::new().with_file(Arc::new(file)); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_FILE)); - config.insert("key".to_string(), Value::Scalar(b"missing".to_vec())); - assert!(load.handle(&config).is_err()); - } - - #[test] - fn test_load_from_file_client_not_configured() { - let load = Load::new(); - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_FILE)); - config.insert("key".to_string(), Value::Scalar(b"any".to_vec())); - assert!(load.handle(&config).is_err()); - } -} diff --git a/src/ports.rs b/src/ports.rs index 3936de2..e308b56 100644 --- a/src/ports.rs +++ b/src/ports.rs @@ -1,5 +1,7 @@ pub mod provided; pub mod required; +pub mod codec; pub use provided::*; pub use required::*; +pub use codec::{encode, decode}; diff --git a/src/store.rs b/src/store.rs deleted file mode 100644 index 4686fca..0000000 --- a/src/store.rs +++ /dev/null @@ -1,456 +0,0 @@ -use crate::ports::required::{InMemoryClient, KVSClient, HttpClient, FileClient}; -use crate::ports::provided::{StoreError, Value}; -use crate::core::fixed_bits; -use std::collections::HashMap; -use std::sync::Arc; - -pub struct Store { - in_memory: Option>, - kvs: Option>, - http: Option>, - file: Option>, -} - -impl Store { - pub fn new() -> Self { - Self { - in_memory: None, - kvs: None, - http: None, - file: None, - } - } - - pub fn with_in_memory(mut self, client: Arc) -> Self { - self.in_memory = Some(client); - self - } - - pub fn with_kvs(mut self, client: Arc) -> Self { - self.kvs = Some(client); - self - } - - pub fn with_http(mut self, client: Arc) -> Self { - self.http = Some(client); - self - } - - pub fn with_file(mut self, client: Arc) -> Self { - self.file = Some(client); - self - } - - pub fn get(&self, store_config: &HashMap) -> Option { - let client = client_id(store_config)?; - - match client { - fixed_bits::CLIENT_IN_MEMORY => { - let in_memory = self.in_memory.as_deref()?; - let key = scalar_str(store_config, "key")?; - in_memory.get(key) - } - fixed_bits::CLIENT_KVS => { - let kvs = self.kvs.as_deref()?; - let key = scalar_str(store_config, "key")?; - kvs.get(key).map(|b| crate::codec_value::decode(&b).unwrap_or(Value::Scalar(b))) - } - fixed_bits::CLIENT_HTTP => { - let http = self.http.as_deref()?; - let url = scalar_str(store_config, "url")?; - let (yaml_keys, ext_keys) = get_map_keys(store_config)?; - let headers = headers_list(store_config); - let values = http.get(url, &ext_keys, headers.as_deref())?; - Some(zip_to_mapping(yaml_keys, values)) - } - fixed_bits::CLIENT_FILE => { - let file = self.file.as_deref()?; - let key = scalar_str(store_config, "key")?; - file.get(key).map(|b| crate::codec_value::decode(&b).unwrap_or(Value::Scalar(b))) - } - _ => None, - } - } - - pub fn set( - &self, - store_config: &HashMap, - value: Value, - ttl: Option, - ) -> Result { - let client = client_id(store_config) - .ok_or(StoreError::ConfigMissing("client".into()))?; - - match client { - fixed_bits::CLIENT_IN_MEMORY => { - let in_memory = self.in_memory.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let key = scalar_str(store_config, "key") - .ok_or(StoreError::ConfigMissing("key".into()))?; - Ok(in_memory.set(key, value)) - } - fixed_bits::CLIENT_KVS => { - let kvs = self.kvs.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let key = scalar_str(store_config, "key") - .ok_or(StoreError::ConfigMissing("key".into()))?; - let bytes = value_to_bytes(value); - let final_ttl = ttl.or_else(|| scalar_u64(store_config, "ttl")); - Ok(kvs.set(key, bytes, final_ttl)) - } - fixed_bits::CLIENT_HTTP => { - let http = self.http.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let url = scalar_str(store_config, "url") - .ok_or(StoreError::ConfigMissing("url".into()))?; - let headers = headers_list(store_config); - Ok(http.set(url, value, headers.as_deref())) - } - fixed_bits::CLIENT_FILE => { - let file = self.file.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let key = scalar_str(store_config, "key") - .ok_or(StoreError::ConfigMissing("key".into()))?; - let bytes = value_to_bytes(value); - Ok(file.set(key, bytes)) - } - _ => Err(StoreError::UnsupportedClient(client)), - } - } - - pub fn delete(&self, store_config: &HashMap) -> Result { - let client = client_id(store_config) - .ok_or(StoreError::ConfigMissing("client".into()))?; - - match client { - fixed_bits::CLIENT_IN_MEMORY => { - let in_memory = self.in_memory.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let key = scalar_str(store_config, "key") - .ok_or(StoreError::ConfigMissing("key".into()))?; - Ok(in_memory.delete(key)) - } - fixed_bits::CLIENT_KVS => { - let kvs = self.kvs.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let key = scalar_str(store_config, "key") - .ok_or(StoreError::ConfigMissing("key".into()))?; - Ok(kvs.delete(key)) - } - fixed_bits::CLIENT_HTTP => { - let http = self.http.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let url = scalar_str(store_config, "url") - .ok_or(StoreError::ConfigMissing("url".into()))?; - let headers = headers_list(store_config); - Ok(http.delete(url, headers.as_deref())) - } - fixed_bits::CLIENT_FILE => { - let file = self.file.as_deref() - .ok_or(StoreError::ClientNotConfigured)?; - let key = scalar_str(store_config, "key") - .ok_or(StoreError::ConfigMissing("key".into()))?; - Ok(file.delete(key)) - } - _ => Err(StoreError::UnsupportedClient(client)), - } - } -} - -fn client_id(config: &HashMap) -> Option { - match config.get("client") { - Some(Value::Scalar(b)) => b.as_slice().try_into().ok().map(u64::from_le_bytes), - _ => None, - } -} - -fn scalar_str<'a>(config: &'a HashMap, key: &str) -> Option<&'a str> { - match config.get(key) { - Some(Value::Scalar(b)) => std::str::from_utf8(b).ok(), - _ => None, - } -} - -fn scalar_u64(config: &HashMap, key: &str) -> Option { - match config.get(key) { - Some(Value::Scalar(b)) => b.as_slice().try_into().ok().map(u64::from_le_bytes), - _ => None, - } -} - -fn headers_list(config: &HashMap) -> Option, Vec)>> { - match config.get("headers") { - Some(Value::Mapping(m)) => Some( - m.iter() - .filter_map(|(k, v)| { - if let Value::Scalar(val) = v { Some((k.clone(), val.clone())) } else { None } - }) - .collect() - ), - _ => None, - } -} - -fn get_map_keys(config: &HashMap) -> Option<(Vec>, Vec>)> { - let yaml_keys = match config.get("yaml_keys") { - Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), - _ => return None, - }; - let ext_keys = match config.get("ext_keys") { - Some(Value::Sequence(s)) => s.iter().filter_map(|v| if let Value::Scalar(b) = v { Some(b.clone()) } else { None }).collect(), - _ => return None, - }; - Some((yaml_keys, ext_keys)) -} - -fn zip_to_mapping(yaml_keys: Vec>, values: Vec) -> Value { - Value::Mapping(yaml_keys.into_iter().zip(values).collect()) -} - -fn value_to_bytes(value: Value) -> Vec { - match value { - Value::Scalar(b) => b, - other => crate::codec_value::encode(&other), - } -} - -impl Default for Store { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::core::fixed_bits; - - fn client_config(client_id: u64) -> Value { - Value::Scalar(client_id.to_le_bytes().to_vec()) - } - - struct MockFileClient { - store: std::sync::Mutex>>, - } - impl MockFileClient { - fn new() -> Self { - Self { store: std::sync::Mutex::new(std::collections::HashMap::new()) } - } - } - impl FileClient for MockFileClient { - fn get(&self, key: &str) -> Option> { - self.store.lock().unwrap().get(key).cloned() - } - fn set(&self, key: &str, value: Vec) -> bool { - self.store.lock().unwrap().insert(key.to_string(), value); - true - } - fn delete(&self, key: &str) -> bool { - self.store.lock().unwrap().remove(key).is_some() - } - } - - fn file_config(key: &str) -> HashMap { - let mut config = HashMap::new(); - config.insert("client".to_string(), client_config(fixed_bits::CLIENT_FILE)); - config.insert("key".to_string(), Value::Scalar(key.as_bytes().to_vec())); - config - } - - #[test] - fn test_store_file_set_and_get() { - let file = Arc::new(MockFileClient::new()); - let store = Store::new().with_file(file); - let config = file_config("my_key"); - let data = Value::Scalar(b"hello".to_vec()); - assert_eq!(store.set(&config, data.clone(), None).unwrap(), true); - assert!(store.get(&config).is_some()); - } - - #[test] - fn test_store_file_delete() { - let file = Arc::new(MockFileClient::new()); - let store = Store::new().with_file(file); - let config = file_config("my_key"); - store.set(&config, Value::Scalar(b"x".to_vec()), None).unwrap(); - assert_eq!(store.delete(&config).unwrap(), true); - assert!(store.get(&config).is_none()); - } - - #[test] - fn test_store_file_client_not_configured() { - let store = Store::new(); - let config = file_config("my_key"); - assert!(store.set(&config, Value::Scalar(b"x".to_vec()), None).is_err()); - assert!(store.delete(&config).is_err()); - } - - // --- InMemory --- - - struct MockInMemory { - store: std::sync::Mutex>, - } - impl MockInMemory { - fn new() -> Self { Self { store: std::sync::Mutex::new(std::collections::HashMap::new()) } } - } - impl InMemoryClient for MockInMemory { - fn get(&self, key: &str) -> Option { self.store.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, value: Value) -> bool { self.store.lock().unwrap().insert(key.to_string(), value); true } - fn delete(&self, key: &str) -> bool { self.store.lock().unwrap().remove(key).is_some() } - } - - fn in_memory_config(key: &str) -> HashMap { - let mut c = HashMap::new(); - c.insert("client".to_string(), client_config(fixed_bits::CLIENT_IN_MEMORY)); - c.insert("key".to_string(), Value::Scalar(key.as_bytes().to_vec())); - c - } - - #[test] - fn test_store_in_memory_set_and_get() { - let client = Arc::new(MockInMemory::new()); - let store = Store::new().with_in_memory(client); - let config = in_memory_config("k"); - let data = Value::Scalar(b"42".to_vec()); - assert!(store.set(&config, data.clone(), None).unwrap()); - assert_eq!(store.get(&config).unwrap(), data); - } - - #[test] - fn test_store_in_memory_delete() { - let client = Arc::new(MockInMemory::new()); - let store = Store::new().with_in_memory(client); - let config = in_memory_config("k"); - store.set(&config, Value::Scalar(b"1".to_vec()), None).unwrap(); - assert!(store.delete(&config).unwrap()); - assert!(store.get(&config).is_none()); - } - - #[test] - fn test_store_in_memory_client_not_configured() { - let store = Store::new(); - let config = in_memory_config("k"); - assert!(store.set(&config, Value::Scalar(b"1".to_vec()), None).is_err()); - assert!(store.delete(&config).is_err()); - } - - // --- KVS --- - - struct MockKVS { - store: std::sync::Mutex>>, - } - impl MockKVS { - fn new() -> Self { Self { store: std::sync::Mutex::new(std::collections::HashMap::new()) } } - } - impl KVSClient for MockKVS { - fn get(&self, key: &str) -> Option> { self.store.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, value: Vec, _ttl: Option) -> bool { self.store.lock().unwrap().insert(key.to_string(), value); true } - fn delete(&self, key: &str) -> bool { self.store.lock().unwrap().remove(key).is_some() } - } - - fn kvs_config(key: &str) -> HashMap { - let mut c = HashMap::new(); - c.insert("client".to_string(), client_config(fixed_bits::CLIENT_KVS)); - c.insert("key".to_string(), Value::Scalar(key.as_bytes().to_vec())); - c - } - - #[test] - fn test_store_kvs_set_and_get() { - let client = Arc::new(MockKVS::new()); - let store = Store::new().with_kvs(client); - let config = kvs_config("k"); - let data = Value::Scalar(b"hello".to_vec()); - assert!(store.set(&config, data.clone(), None).unwrap()); - assert_eq!(store.get(&config).unwrap(), Value::Scalar(b"hello".to_vec())); - } - - #[test] - fn test_store_kvs_set_uses_ttl_from_config() { - let client = Arc::new(MockKVS::new()); - let store = Store::new().with_kvs(client); - let mut config = kvs_config("k"); - config.insert("ttl".to_string(), Value::Scalar(3600u64.to_le_bytes().to_vec())); - assert!(store.set(&config, Value::Scalar(b"1".to_vec()), None).unwrap()); - } - - #[test] - fn test_store_kvs_delete() { - let client = Arc::new(MockKVS::new()); - let store = Store::new().with_kvs(client); - let config = kvs_config("k"); - store.set(&config, Value::Scalar(b"1".to_vec()), None).unwrap(); - assert!(store.delete(&config).unwrap()); - assert!(store.get(&config).is_none()); - } - - #[test] - fn test_store_kvs_client_not_configured() { - let store = Store::new(); - let config = kvs_config("k"); - assert!(store.set(&config, Value::Scalar(b"1".to_vec()), None).is_err()); - assert!(store.delete(&config).is_err()); - } - - // --- HTTP --- - - struct MockHttp { - store: std::sync::Mutex>, - } - impl MockHttp { - fn new() -> Self { Self { store: std::sync::Mutex::new(std::collections::HashMap::new()) } } - } - impl HttpClient for MockHttp { - fn get(&self, url: &str, keys: &[Vec], _: Option<&[(Vec, Vec)]>) -> Option> { - let stored = self.store.lock().unwrap().get(url).cloned()?; - Some(keys.iter().map(|k| match &stored { - Value::Mapping(m) => m.iter().find(|(mk, _)| mk == k).map(|(_, v)| v.clone()).unwrap_or(Value::Null), - _ => stored.clone(), - }).collect()) - } - fn set(&self, url: &str, value: Value, _: Option<&[(Vec, Vec)]>) -> bool { - self.store.lock().unwrap().insert(url.to_string(), value); true - } - fn delete(&self, url: &str, _: Option<&[(Vec, Vec)]>) -> bool { - self.store.lock().unwrap().remove(url).is_some() - } - } - - fn http_config(url: &str) -> HashMap { - let mut c = HashMap::new(); - c.insert("client".to_string(), client_config(fixed_bits::CLIENT_HTTP)); - c.insert("url".to_string(), Value::Scalar(url.as_bytes().to_vec())); - c.insert("yaml_keys".to_string(), Value::Sequence(vec![Value::Scalar(b"status".to_vec())])); - c.insert("ext_keys".to_string(), Value::Sequence(vec![Value::Scalar(b"status".to_vec())])); - c - } - - #[test] - fn test_store_http_set_and_get() { - let client = Arc::new(MockHttp::new()); - let store = Store::new().with_http(client); - let config = http_config("http://example.com/data"); - let data = Value::Mapping(vec![(b"status".to_vec(), Value::Scalar(b"ok".to_vec()))]); - assert!(store.set(&config, data, None).unwrap()); - let result = store.get(&config).unwrap(); - let expected = Value::Mapping(vec![(b"status".to_vec(), Value::Scalar(b"ok".to_vec()))]); - assert_eq!(result, expected); - } - - #[test] - fn test_store_http_delete() { - let client = Arc::new(MockHttp::new()); - let store = Store::new().with_http(client); - let config = http_config("http://example.com/data"); - store.set(&config, Value::Mapping(vec![(b"status".to_vec(), Value::Scalar(b"ok".to_vec()))]), None).unwrap(); - assert!(store.delete(&config).unwrap()); - } - - #[test] - fn test_store_http_client_not_configured() { - let store = Store::new(); - let config = http_config("http://example.com/data"); - assert!(store.set(&config, Value::Scalar(b"x".to_vec()), None).is_err()); - assert!(store.delete(&config).is_err()); - } -} diff --git a/src/core/codec.rs b/src/unused/codec.rs similarity index 100% rename from src/core/codec.rs rename to src/unused/codec.rs diff --git a/src/core/fixed_bits.rs b/src/unused/fixed_bits.rs similarity index 100% rename from src/core/fixed_bits.rs rename to src/unused/fixed_bits.rs diff --git a/src/core/manifest.rs b/src/unused/manifest.rs similarity index 100% rename from src/core/manifest.rs rename to src/unused/manifest.rs diff --git a/src/core/mod.rs b/src/unused/mod.rs similarity index 100% rename from src/core/mod.rs rename to src/unused/mod.rs diff --git a/src/core/parser.rs b/src/unused/parser.rs similarity index 100% rename from src/core/parser.rs rename to src/unused/parser.rs diff --git a/src/core/pool.rs b/src/unused/pool.rs similarity index 100% rename from src/core/pool.rs rename to src/unused/pool.rs diff --git a/src/state.rs b/src/unused/state.rs similarity index 100% rename from src/state.rs rename to src/unused/state.rs From 28bf96b24c342e2dc376a06d5e429c17b3a9f674 Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 15:33:00 +0900 Subject: [PATCH 19/41] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f3242bd..3ee6ea8 100644 --- a/README.md +++ b/README.md @@ -99,7 +99,7 @@ let user_name = state.get("session.user.name")?; ▲ │ ┌─────────────┐ ┌───────────┴────────────────────┐ -│ Implements │------>│ StoreRegistry (Required Port) │ +│ ClientImpl │------>│ StoreRegistry (Required Port) │ └─────────────┘ impl └────────────────────────────────┘ ``` From 09bf478457faa7bee3e23d96483cb61818f273a9 Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 16:08:21 +0900 Subject: [PATCH 20/41] update to no-std --- Cargo.toml | 6 +++--- src/codec.rs | 2 ++ src/context.rs | 38 ++++++++++++++++++++------------------ src/dsl.rs | 3 +++ src/index.rs | 9 +++++++-- src/lib.rs | 8 ++++++++ src/log_format.rs | 3 +++ src/ports/provided.rs | 20 ++++++++++++-------- src/ports/required.rs | 8 ++++---- src/tree.rs | 2 ++ 10 files changed, 64 insertions(+), 35 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2d91a8d..8999939 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ categories = ["config", "data-structures", "template-engine", "parser-implementa exclude = ["examples/*"] [dependencies] -log = { version = "0.4", optional = true } +log = { version = "0.4", default-features = false, optional = true } serde_yaml_ng = { version = "0.10", optional = true } [dev-dependencies] @@ -23,5 +23,5 @@ ctor = "0.2" [features] default = [] -logging = ["log"] -precompile = ["serde_yaml_ng"] +logging = ["dep:log"] +precompile = ["dep:serde_yaml_ng"] diff --git a/src/codec.rs b/src/codec.rs index 59ce249..a5ac3ea 100644 --- a/src/codec.rs +++ b/src/codec.rs @@ -1,3 +1,5 @@ +use alloc::vec::Vec; + use crate::ports::provided::Tree as Value; // Wire format: diff --git a/src/context.rs b/src/context.rs index f9ce408..810482a 100644 --- a/src/context.rs +++ b/src/context.rs @@ -1,5 +1,7 @@ -use std::collections::HashSet; -use std::sync::Arc; +use alloc::collections::{BTreeMap, BTreeSet}; +use alloc::sync::Arc; +use alloc::vec::Vec; +use core::str::from_utf8; use crate::index::Index; use crate::ports::provided::{Context as ContextTrait, ContextError, StoreError, LoadError, Tree}; @@ -10,9 +12,9 @@ use crate::ports::required::{StoreRegistry, SetOutcome}; pub struct Context<'r> { index: Arc, registry: &'r dyn StoreRegistry, - cache_keys: Vec, // path_idx - cache_vals: Vec, // parallel to cache_keys - called_keys: HashSet, + cache_keys: Vec, // path_idx + cache_vals: Vec, // parallel to cache_keys + called_keys: BTreeSet, max_recursion: usize, } @@ -23,7 +25,7 @@ impl<'r> Context<'r> { registry, cache_keys: Vec::new(), cache_vals: Vec::new(), - called_keys: HashSet::new(), + called_keys: BTreeSet::new(), max_recursion: 20, } } @@ -110,12 +112,12 @@ impl<'r> ContextTrait for Context<'r> { ))?; let store_key = args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } }).ok_or_else(|| ContextError::StoreFailed( StoreError::ConfigMissing("key".to_string()) ))?; - let args_ref: std::collections::HashMap<&str, Tree> = args.iter() + let args_ref: BTreeMap<&str, Tree> = args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); @@ -142,12 +144,12 @@ impl<'r> ContextTrait for Context<'r> { ))?; let store_key = args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } }).ok_or_else(|| ContextError::StoreFailed( StoreError::ConfigMissing("key".to_string()) ))?; - let args_ref: std::collections::HashMap<&str, Tree> = args.iter() + let args_ref: BTreeMap<&str, Tree> = args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); @@ -175,13 +177,13 @@ impl<'r> ContextTrait for Context<'r> { }; let store_key = match args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } }) { Some(k) => k, None => return Ok(false), }; - let args_ref: std::collections::HashMap<&str, Tree> = args.iter() + let args_ref: BTreeMap<&str, Tree> = args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); @@ -203,11 +205,11 @@ impl<'r> Context<'r> { if !store_name.is_empty() { if let Some(client) = self.registry.client_for(store_name) { let key = store_args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } }).ok_or_else(|| ContextError::StoreFailed( StoreError::ConfigMissing("key".to_string()) ))?; - let args_ref: std::collections::HashMap<&str, Tree> = store_args.iter() + let args_ref: BTreeMap<&str, Tree> = store_args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); if let Some(value) = client.get(key, &args_ref) { @@ -227,11 +229,11 @@ impl<'r> Context<'r> { LoadError::ClientNotFound(load_name.to_string()) ))?; let key = load_args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } }).ok_or_else(|| ContextError::LoadFailed( LoadError::ConfigMissing("key".to_string()) ))?; - let args_ref: std::collections::HashMap<&str, Tree> = load_args.iter() + let args_ref: BTreeMap<&str, Tree> = load_args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); let value = client.get(key, &args_ref) @@ -243,10 +245,10 @@ impl<'r> Context<'r> { if !store_name.is_empty() { if let Some(store_client) = self.registry.client_for(store_name) { let store_key = store_args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { std::str::from_utf8(b).ok() } else { None } + if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } }); if let Some(sk) = store_key { - let sargs: std::collections::HashMap<&str, Tree> = store_args.iter() + let sargs: BTreeMap<&str, Tree> = store_args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); store_client.set(sk, &sargs); diff --git a/src/dsl.rs b/src/dsl.rs index 17ddf80..27d156c 100644 --- a/src/dsl.rs +++ b/src/dsl.rs @@ -1,3 +1,6 @@ +use alloc::boxed::Box; +use alloc::vec::Vec; + use crate::ports::provided::Tree; // ── client_idx constants (4bit, stored in leaves) ───────────────────────────── diff --git a/src/index.rs b/src/index.rs index 42cb421..9890a38 100644 --- a/src/index.rs +++ b/src/index.rs @@ -1,6 +1,11 @@ +use alloc::collections::BTreeMap; +use alloc::string::String; +use alloc::vec::Vec; + use crate::dsl::{ PATH_IS_LEAF_MASK, PATH_OFFSET_SHIFT, PATH_OFFSET_MASK, PATH_COUNT_SHIFT, PATH_COUNT_MASK, }; +use crate::ports::provided::Tree; // ── LeafRef ─────────────────────────────────────────────────────────────────── @@ -87,13 +92,13 @@ impl Index { /// Extract _load client yaml_name and args from leaves at `leaf_offset`. /// Returns ("", empty) if no _load is configured. - pub fn load_args(&self, leaf_offset: u32) -> (&str, std::collections::HashMap) { + pub fn load_args(&self, leaf_offset: u32) -> (&str, BTreeMap) { todo!("decode _load from leaves[leaf_offset..]") } /// Extract _store client yaml_name and args from leaves at `leaf_offset`. /// Returns ("", empty) if no _store is configured. - pub fn store_args(&self, leaf_offset: u32) -> (&str, std::collections::HashMap) { + pub fn store_args(&self, leaf_offset: u32) -> (&str, BTreeMap) { todo!("decode _store from leaves[leaf_offset..]") } } diff --git a/src/lib.rs b/src/lib.rs index f045364..dfe11b8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,11 @@ +#![no_std] +extern crate core; +extern crate alloc; + +// precompile feature requires std (file I/O, serde) +#[cfg(feature = "precompile")] +extern crate std; + pub mod log_format; pub mod ports; pub mod context; diff --git a/src/log_format.rs b/src/log_format.rs index 326113c..5dced83 100644 --- a/src/log_format.rs +++ b/src/log_format.rs @@ -1,3 +1,6 @@ +use alloc::string::String; +use alloc::vec::Vec; + use crate::ports::provided::Tree; /// # Examples diff --git a/src/ports/provided.rs b/src/ports/provided.rs index 567952d..165cb76 100644 --- a/src/ports/provided.rs +++ b/src/ports/provided.rs @@ -1,3 +1,7 @@ +use alloc::string::String; +use alloc::vec::Vec; +use core::fmt; + // Request-scoped context handle. Manages state per DSL definition. pub trait Context { /// Returns value from instance cache → _store, triggers _load on miss. @@ -31,8 +35,8 @@ pub enum ParseError { ParseError(String), } -impl std::fmt::Display for ParseError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for ParseError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ParseError::FileNotFound(msg) => write!(f, "FileNotFound: {}", msg), ParseError::AmbiguousFile(msg) => write!(f, "AmbiguousFile: {}", msg), @@ -53,8 +57,8 @@ pub enum LoadError { ParseError(String), } -impl std::fmt::Display for LoadError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for LoadError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { LoadError::ClientNotFound(msg) => write!(f, "ClientNotFound: {}", msg), LoadError::ConfigMissing(msg) => write!(f, "ConfigMissing: {}", msg), @@ -74,8 +78,8 @@ pub enum StoreError { SerializeError(String), } -impl std::fmt::Display for StoreError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for StoreError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { StoreError::ClientNotFound(msg) => write!(f, "ClientNotFound: {}", msg), StoreError::ConfigMissing(msg) => write!(f, "ConfigMissing: {}", msg), @@ -93,8 +97,8 @@ pub enum ContextError { LoadFailed(LoadError), } -impl std::fmt::Display for ContextError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +impl fmt::Display for ContextError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ContextError::ParseFailed(msg) => write!(f, "ParseFailed: {}", msg), ContextError::KeyNotFound(msg) => write!(f, "KeyNotFound: {}", msg), diff --git a/src/ports/required.rs b/src/ports/required.rs index f197ce9..c175328 100644 --- a/src/ports/required.rs +++ b/src/ports/required.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use alloc::collections::BTreeMap; use crate::ports::provided::Tree; // Outcome of a StoreClient::set call. @@ -14,9 +14,9 @@ pub enum SetOutcome { // The implementor defines and reads whatever keys it needs. // - Thread-safety and internal mutability are the implementor's responsibility. pub trait StoreClient: Send + Sync { - fn get(&self, key: &str, args: &HashMap<&str, Tree>) -> Option; - fn set(&self, key: &str, args: &HashMap<&str, Tree>) -> Option; - fn delete(&self, key: &str, args: &HashMap<&str, Tree>) -> bool; + fn get(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option; + fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option; + fn delete(&self, key: &str, args: &BTreeMap<&str, Tree>) -> bool; } /// Dispatches yaml_name → StoreClient. Implemented by the library user. diff --git a/src/tree.rs b/src/tree.rs index a52ac7a..d686f90 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -1,5 +1,7 @@ #[cfg(feature = "precompile")] mod inner { + // precompile requires std: file I/O (std::fs::write) and UTF-8 parsing + extern crate std; use crate::ports::provided::Tree as Value; pub struct Tree { From 20b0548d904715b7e0aa26dd1804744a0d8940b7 Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 16:11:19 +0900 Subject: [PATCH 21/41] u --- src/lib.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index dfe11b8..9141a37 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -19,5 +19,8 @@ pub use ports::provided::{ ParseError, LoadError, StoreError, ContextError, Context, }; -pub use ports::required::{StoreClient, StoreRegistry, SetOutcome}; -pub use ports::codec::{encode, decode}; +pub use ports::required::{ + StoreClient, + StoreRegistry, + SetOutcome +}; From 52f371939310f1a4da50500ba0a5b43ab6daa3c0 Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 16:21:57 +0900 Subject: [PATCH 22/41] update --- src/error.rs | 117 +++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 3 +- src/log_format.rs | 118 ---------------------------------------------- 3 files changed, 118 insertions(+), 120 deletions(-) create mode 100644 src/error.rs delete mode 100644 src/log_format.rs diff --git a/src/error.rs b/src/error.rs new file mode 100644 index 0000000..d49812a --- /dev/null +++ b/src/error.rs @@ -0,0 +1,117 @@ +use alloc::string::String; + +use crate::ports::provided::Tree; + +pub(crate) fn message(class: &str, fn_name: &str, args: &[&str]) -> String { + let mut s = String::from(class); + s.push_str("::"); + s.push_str(fn_name); + s.push('('); + for (i, arg) in args.iter().enumerate() { + if i > 0 { s.push_str(", "); } + s.push_str(arg); + } + s.push(')'); + s +} + +pub(crate) fn format_arg(value: &Tree) -> String { + match value { + Tree::Scalar(b) => { + let s = String::from_utf8_lossy(b); + if s.len() > 50 { + let mut out = String::from("'"); + out.push_str(&s[..47]); + out.push_str("'..."); + out + } else { + let mut out = String::from("'"); + out.push_str(&s); + out.push('\''); + out + } + } + Tree::Sequence(arr) if arr.is_empty() => String::from("[]"), + Tree::Sequence(arr) => { + let mut s = String::from("["); + s.push_str(&arr.len().to_string()); + s.push_str(" items]"); + s + } + Tree::Mapping(obj) if obj.is_empty() => String::from("{}"), + Tree::Mapping(obj) => { + let mut s = String::from("{"); + s.push_str(&obj.len().to_string()); + s.push_str(" fields}"); + s + } + Tree::Null => String::from("null"), + } +} + +pub(crate) fn format_str_arg(s: &str) -> String { + if s.len() > 50 { + let mut out = String::from("'"); + out.push_str(&s[..47]); + out.push_str("'..."); + out + } else { + let mut out = String::from("'"); + out.push_str(s); + out.push('\''); + out + } +} + +#[macro_export] +macro_rules! debug_log { + ($class:expr, $fun:expr $(, $arg:expr)*) => {{ + #[cfg(feature = "logging")] + { + let formatted: alloc::vec::Vec = alloc::vec![ + $( $crate::error::format_str_arg($arg), )* + ]; + let refs: alloc::vec::Vec<&str> = formatted.iter().map(|s| s.as_str()).collect(); + log::debug!("{}", $crate::error::message($class, $fun, &refs)); + } + }}; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_message_multiple_args() { + let result = message("State", "get", &["'cache.user'", "null"]); + assert_eq!(result, "State::get('cache.user', null)"); + } + + #[test] + fn test_format_arg_scalar() { + assert_eq!(format_arg(&Tree::Scalar(b"text".to_vec())), "'text'"); + assert_eq!(format_arg(&Tree::Null), "null"); + assert_eq!(format_arg(&Tree::Sequence(vec![])), "[]"); + assert_eq!(format_arg(&Tree::Mapping(vec![])), "{}"); + assert_eq!(format_arg(&Tree::Sequence(vec![Tree::Null, Tree::Null, Tree::Null])), "[3 items]"); + assert_eq!(format_arg(&Tree::Mapping(vec![(b"a".to_vec(), Tree::Null)])), "{1 fields}"); + } + + #[test] + fn test_format_arg_long_string() { + let long_str = "a".repeat(60); + let result = format_arg(&Tree::Scalar(long_str.into_bytes())); + assert!(result.starts_with("'aaa")); + assert!(result.ends_with("'...")); + assert_eq!(result.len(), 52); + } + + #[test] + fn test_format_str_arg() { + assert_eq!(format_str_arg("key"), "'key'"); + let long_str = "a".repeat(60); + let result = format_str_arg(&long_str); + assert!(result.starts_with("'aaa")); + assert!(result.ends_with("'...")); + } +} diff --git a/src/lib.rs b/src/lib.rs index 9141a37..f9db508 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,14 +6,13 @@ extern crate alloc; #[cfg(feature = "precompile")] extern crate std; -pub mod log_format; +pub(crate) mod error; pub mod ports; pub mod context; pub mod tree; pub mod dsl; pub mod index; -pub use log_format::LogFormat; pub use ports::provided::{ Tree, ParseError, LoadError, StoreError, ContextError, diff --git a/src/log_format.rs b/src/log_format.rs deleted file mode 100644 index 5dced83..0000000 --- a/src/log_format.rs +++ /dev/null @@ -1,118 +0,0 @@ -use alloc::string::String; -use alloc::vec::Vec; - -use crate::ports::provided::Tree; - -/// # Examples -/// ``` -/// use state_engine::LogFormat; -/// -/// let fn_message = LogFormat::call("State", "get", &["'key'".to_string()]); -/// assert_eq!(fn_message, "State::get('key')"); -/// ``` -pub struct LogFormat; - -impl LogFormat { - pub fn call(class: &str, fn_name: &str, args: &[String]) -> String { - let args_str = args.join(", "); - format!("{}::{}({})", class, fn_name, args_str) - } - - /// Format Tree for log output - /// - /// # Examples - /// ``` - /// use state_engine::{LogFormat, Tree}; - /// - /// assert_eq!(LogFormat::format_arg(&Tree::Scalar(b"text".to_vec())), "'text'"); - /// assert_eq!(LogFormat::format_arg(&Tree::Null), "null"); - /// assert_eq!(LogFormat::format_arg(&Tree::Sequence(vec![])), "[]"); - /// assert_eq!(LogFormat::format_arg(&Tree::Mapping(vec![])), "{}"); - /// assert_eq!(LogFormat::format_arg(&Tree::Sequence(vec![Tree::Null, Tree::Null, Tree::Null])), "[3 items]"); - /// assert_eq!(LogFormat::format_arg(&Tree::Mapping(vec![(b"a".to_vec(), Tree::Null)])), "{1 fields}"); - /// ``` - pub fn format_arg(value: &Tree) -> String { - match value { - Tree::Scalar(b) => { - let s = String::from_utf8_lossy(b); - if s.len() > 50 { format!("'{}'...", &s[..47]) } else { format!("'{}'", s) } - } - Tree::Sequence(arr) if arr.is_empty() => "[]".to_string(), - Tree::Sequence(arr) => format!("[{} items]", arr.len()), - Tree::Mapping(obj) if obj.is_empty() => "{}".to_string(), - Tree::Mapping(obj) => format!("{{{} fields}}", obj.len()), - Tree::Null => "null".to_string(), - } - } - - /// Format string argument for log output - /// - /// # Examples - /// ``` - /// use state_engine::LogFormat; - /// - /// assert_eq!(LogFormat::format_str_arg("key"), "'key'"); - /// ``` - pub fn format_str_arg(s: &str) -> String { - if s.len() > 50 { - format!("'{}'...", &s[..47]) - } else { - format!("'{}'", s) - } - } -} - -/// Log macro: fn call -/// -/// # Examples -/// ```ignore -/// use crate::fn_log; -/// -/// fn_log!("State", "get", "cache.user"); -/// // Logs: State::get('cache.user') -/// ``` -#[macro_export] -macro_rules! fn_log { - ($class:expr, $fun:expr $(, $arg:expr)*) => {{ - #[cfg(feature = "logging")] - { - let args: Vec = vec![ - $( - $crate::log_format::LogFormat::format_str_arg($arg), - )* - ]; - log::debug!("{}", $crate::log_format::LogFormat::call($class, $fun, &args)); - } - }}; -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_fn_multiple_args() { - let result = LogFormat::call("State", "get", &[ - "'cache.user'".to_string(), - "null".to_string(), - ]); - assert_eq!(result, "State::get('cache.user', null)"); - } - - #[test] - fn test_format_arg_long_string() { - let long_str = "a".repeat(60); - let result = LogFormat::format_arg(&Tree::Scalar(long_str.into_bytes())); - assert!(result.starts_with("'aaa")); - assert!(result.ends_with("'...")); - assert_eq!(result.len(), 52); - } - - #[test] - fn test_format_str_arg_long_string() { - let long_str = "a".repeat(60); - let result = LogFormat::format_str_arg(&long_str); - assert!(result.starts_with("'aaa")); - assert!(result.ends_with("'...")); - } -} From 0f2ca763f5708b515afbf1dabf94f7795ae59462 Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 16:49:26 +0900 Subject: [PATCH 23/41] update Tree --- src/codec.rs | 291 --------------------------------------------------- src/ports.rs | 2 - src/tree.rs | 198 +++++++++++++++++++++++++++++++++-- 3 files changed, 187 insertions(+), 304 deletions(-) delete mode 100644 src/codec.rs diff --git a/src/codec.rs b/src/codec.rs deleted file mode 100644 index a5ac3ea..0000000 --- a/src/codec.rs +++ /dev/null @@ -1,291 +0,0 @@ -use alloc::vec::Vec; - -use crate::ports::provided::Tree as Value; - -// Wire format: -// Null : 0x00 -// Scalar : 0x01 | len(u32le) | bytes -// Sequence : 0x02 | count(u32le) | item... -// Mapping : 0x03 | count(u32le) | (key_len(u32le) | key_bytes | item)... - -const TAG_NULL: u8 = 0x00; -const TAG_SCALAR: u8 = 0x01; -const TAG_SEQUENCE: u8 = 0x02; -const TAG_MAPPING: u8 = 0x03; - -pub fn encode(value: &Value) -> Vec { - let mut buf = Vec::new(); - write_value(value, &mut buf); - buf -} - -pub fn decode(bytes: &[u8]) -> Option { - let (value, _) = read_value(bytes)?; - Some(value) -} - -fn write_value(value: &Value, buf: &mut Vec) { - match value { - Value::Null => { - buf.push(TAG_NULL); - } - Value::Scalar(b) => { - buf.push(TAG_SCALAR); - buf.extend_from_slice(&(b.len() as u32).to_le_bytes()); - buf.extend_from_slice(b); - } - Value::Sequence(items) => { - buf.push(TAG_SEQUENCE); - buf.extend_from_slice(&(items.len() as u32).to_le_bytes()); - for item in items { - write_value(item, buf); - } - } - Value::Mapping(pairs) => { - buf.push(TAG_MAPPING); - buf.extend_from_slice(&(pairs.len() as u32).to_le_bytes()); - for (k, v) in pairs { - buf.extend_from_slice(&(k.len() as u32).to_le_bytes()); - buf.extend_from_slice(k); - write_value(v, buf); - } - } - } -} - -fn read_value(bytes: &[u8]) -> Option<(Value, &[u8])> { - let (&tag, rest) = bytes.split_first()?; - match tag { - TAG_NULL => Some((Value::Null, rest)), - TAG_SCALAR => { - let (len, rest) = read_u32(rest)?; - let (data, rest) = split_at(rest, len)?; - Some((Value::Scalar(data.to_vec()), rest)) - } - TAG_SEQUENCE => { - let (count, mut rest) = read_u32(rest)?; - let mut items = Vec::with_capacity(count); - for _ in 0..count { - let (item, next) = read_value(rest)?; - items.push(item); - rest = next; - } - Some((Value::Sequence(items), rest)) - } - TAG_MAPPING => { - let (count, mut rest) = read_u32(rest)?; - let mut pairs = Vec::with_capacity(count); - for _ in 0..count { - let (klen, next) = read_u32(rest)?; - let (kdata, next) = split_at(next, klen)?; - let (val, next) = read_value(next)?; - pairs.push((kdata.to_vec(), val)); - rest = next; - } - Some((Value::Mapping(pairs), rest)) - } - _ => None, - } -} - -fn read_u32(bytes: &[u8]) -> Option<(usize, &[u8])> { - let (b, rest) = split_at(bytes, 4)?; - let n = u32::from_le_bytes(b.try_into().ok()?) as usize; - Some((n, rest)) -} - -fn split_at(bytes: &[u8], n: usize) -> Option<(&[u8], &[u8])> { - if bytes.len() >= n { Some(bytes.split_at(n)) } else { None } -} - -#[cfg(test)] -mod tests { - use super::*; - - fn rt(v: &Value) -> Value { - decode(&encode(v)).unwrap() - } - - #[test] - fn test_null() { - assert_eq!(rt(&Value::Null), Value::Null); - } - - #[test] - fn test_scalar() { - assert_eq!(rt(&Value::Scalar(b"hello".to_vec())), Value::Scalar(b"hello".to_vec())); - } - - #[test] - fn test_scalar_empty() { - assert_eq!(rt(&Value::Scalar(vec![])), Value::Scalar(vec![])); - } - - #[test] - fn test_sequence() { - let v = Value::Sequence(vec![ - Value::Scalar(b"a".to_vec()), - Value::Null, - Value::Scalar(b"b".to_vec()), - ]); - assert_eq!(rt(&v), v); - } - - #[test] - fn test_mapping() { - let v = Value::Mapping(vec![ - (b"id".to_vec(), Value::Scalar(b"1".to_vec())), - (b"name".to_vec(), Value::Scalar(b"alice".to_vec())), - ]); - assert_eq!(rt(&v), v); - } - - #[test] - fn test_nested() { - let v = Value::Mapping(vec![ - (b"user".to_vec(), Value::Mapping(vec![ - (b"id".to_vec(), Value::Scalar(b"1".to_vec())), - (b"tags".to_vec(), Value::Sequence(vec![ - Value::Scalar(b"admin".to_vec()), - Value::Scalar(b"staff".to_vec()), - ])), - (b"extra".to_vec(), Value::Null), - ])), - ]); - assert_eq!(rt(&v), v); - } - - #[test] - fn test_decode_invalid_returns_none() { - assert_eq!(decode(&[0xFF]), None); - assert_eq!(decode(&[TAG_SCALAR, 0x05, 0x00, 0x00, 0x00]), None); // truncated - } - - /// Simulate what State::get("cache.user") would return after Db load — - /// a Mapping built from yaml_to_parse_value output (String → Scalar bytes, - /// Number → Scalar bytes, Null → Null). Verify encode→decode roundtrip. - #[test] - fn test_roundtrip_cache_user_from_yaml() { - // Equivalent to serde_yaml_ng parsing: - // id: 1 - // org_id: 100 - // tenant_id: 10 - // yaml_to_parse_value converts Number → Scalar(n.to_string().into_bytes()) - let original = Value::Mapping(vec![ - (b"id".to_vec(), Value::Scalar(b"1".to_vec())), - (b"org_id".to_vec(), Value::Scalar(b"100".to_vec())), - (b"tenant_id".to_vec(), Value::Scalar(b"10".to_vec())), - ]); - - let bytes = encode(&original); - let decoded = decode(&bytes).unwrap(); - assert_eq!(decoded, original); - - // spot-check the wire bytes start with TAG_MAPPING - assert_eq!(bytes[0], TAG_MAPPING); - // 3 pairs - assert_eq!(&bytes[1..5], &3u32.to_le_bytes()); - } - - /// Simulate cache.tenant which has a nested Mapping and a Sequence. - #[test] - fn test_roundtrip_nested_from_yaml() { - // Equivalent to: - // name: "acme" - // health: - // status: "ok" - // tags: - // - "gold" - // - "active" - let original = Value::Mapping(vec![ - (b"name".to_vec(), Value::Scalar(b"acme".to_vec())), - (b"health".to_vec(), Value::Mapping(vec![ - (b"status".to_vec(), Value::Scalar(b"ok".to_vec())), - ])), - (b"tags".to_vec(), Value::Sequence(vec![ - Value::Scalar(b"gold".to_vec()), - Value::Scalar(b"active".to_vec()), - ])), - ]); - - let bytes = encode(&original); - let decoded = decode(&bytes).unwrap(); - assert_eq!(decoded, original); - } - - /// Null fields survive the roundtrip (yaml `~` or missing values). - #[test] - fn test_roundtrip_with_null_field() { - let original = Value::Mapping(vec![ - (b"id".to_vec(), Value::Scalar(b"1".to_vec())), - (b"deleted_at".to_vec(), Value::Null), - ]); - assert_eq!(decode(&encode(&original)).unwrap(), original); - } - - fn from_yaml(v: serde_yaml_ng::Value) -> Value { - match v { - serde_yaml_ng::Value::Mapping(m) => Value::Mapping( - m.into_iter() - .filter_map(|(k, v)| { - let key = match k { - serde_yaml_ng::Value::String(s) => s.into_bytes(), - _ => return None, - }; - Some((key, from_yaml(v))) - }) - .collect(), - ), - serde_yaml_ng::Value::Sequence(s) => Value::Sequence( - s.into_iter().map(from_yaml).collect() - ), - serde_yaml_ng::Value::String(s) => Value::Scalar(s.into_bytes()), - serde_yaml_ng::Value::Number(n) => Value::Scalar(n.to_string().into_bytes()), - serde_yaml_ng::Value::Bool(b) => Value::Scalar(b.to_string().into_bytes()), - serde_yaml_ng::Value::Null => Value::Null, - _ => Value::Null, - } - } - - /// Parse a real YAML string with serde_yaml_ng, convert to Value, - /// then verify encode→decode roundtrip produces identical Value. - #[test] - fn test_roundtrip_real_yaml_cache_user() { - let yaml = r#" -id: 1 -org_id: 100 -tenant_id: 10 -name: "alice" -active: true -score: 3.14 -deleted_at: ~ -"#; - let parsed: serde_yaml_ng::Value = serde_yaml_ng::from_str(yaml).unwrap(); - let original = from_yaml(parsed); - - let bytes = encode(&original); - let decoded = decode(&bytes).unwrap(); - assert_eq!(decoded, original); - } - - #[test] - fn test_roundtrip_real_yaml_nested() { - let yaml = r#" -user: - id: 1 - tags: - - admin - - staff - address: - city: Tokyo - zip: "100-0001" - note: ~ -"#; - let parsed: serde_yaml_ng::Value = serde_yaml_ng::from_str(yaml).unwrap(); - let original = from_yaml(parsed); - - let bytes = encode(&original); - let decoded = decode(&bytes).unwrap(); - assert_eq!(decoded, original); - } -} diff --git a/src/ports.rs b/src/ports.rs index e308b56..3936de2 100644 --- a/src/ports.rs +++ b/src/ports.rs @@ -1,7 +1,5 @@ pub mod provided; pub mod required; -pub mod codec; pub use provided::*; pub use required::*; -pub use codec::{encode, decode}; diff --git a/src/tree.rs b/src/tree.rs index d686f90..b316390 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -1,8 +1,184 @@ +use alloc::vec::Vec; + +use crate::ports::provided::Tree; + +// ── Wire format ─────────────────────────────────────────────────────────────── +// +// Null : 0x00 +// Scalar : 0x01 | len(u32le) | bytes +// Sequence : 0x02 | count(u32le) | item... +// Mapping : 0x03 | count(u32le) | (key_len(u32le) | key_bytes | item)... + +const TAG_NULL: u8 = 0x00; +const TAG_SCALAR: u8 = 0x01; +const TAG_SEQUENCE: u8 = 0x02; +const TAG_MAPPING: u8 = 0x03; + +impl Tree { + pub fn serialize(&self) -> Vec { + let mut buf = Vec::new(); + write_value(self, &mut buf); + buf + } + + pub fn deserialize(bytes: &[u8]) -> Option { + let (value, _) = read_value(bytes)?; + Some(value) + } +} + +fn write_value(value: &Tree, buf: &mut Vec) { + match value { + Tree::Null => { + buf.push(TAG_NULL); + } + Tree::Scalar(b) => { + buf.push(TAG_SCALAR); + buf.extend_from_slice(&(b.len() as u32).to_le_bytes()); + buf.extend_from_slice(b); + } + Tree::Sequence(items) => { + buf.push(TAG_SEQUENCE); + buf.extend_from_slice(&(items.len() as u32).to_le_bytes()); + for item in items { + write_value(item, buf); + } + } + Tree::Mapping(pairs) => { + buf.push(TAG_MAPPING); + buf.extend_from_slice(&(pairs.len() as u32).to_le_bytes()); + for (k, v) in pairs { + buf.extend_from_slice(&(k.len() as u32).to_le_bytes()); + buf.extend_from_slice(k); + write_value(v, buf); + } + } + } +} + +fn read_value(bytes: &[u8]) -> Option<(Tree, &[u8])> { + let (&tag, rest) = bytes.split_first()?; + match tag { + TAG_NULL => Some((Tree::Null, rest)), + TAG_SCALAR => { + let (len, rest) = read_u32(rest)?; + let (data, rest) = split_at(rest, len)?; + Some((Tree::Scalar(data.to_vec()), rest)) + } + TAG_SEQUENCE => { + let (count, mut rest) = read_u32(rest)?; + let mut items = Vec::with_capacity(count); + for _ in 0..count { + let (item, next) = read_value(rest)?; + items.push(item); + rest = next; + } + Some((Tree::Sequence(items), rest)) + } + TAG_MAPPING => { + let (count, mut rest) = read_u32(rest)?; + let mut pairs = Vec::with_capacity(count); + for _ in 0..count { + let (klen, next) = read_u32(rest)?; + let (kdata, next) = split_at(next, klen)?; + let (val, next) = read_value(next)?; + pairs.push((kdata.to_vec(), val)); + rest = next; + } + Some((Tree::Mapping(pairs), rest)) + } + _ => None, + } +} + +fn read_u32(bytes: &[u8]) -> Option<(usize, &[u8])> { + let (b, rest) = split_at(bytes, 4)?; + let n = u32::from_le_bytes(b.try_into().ok()?) as usize; + Some((n, rest)) +} + +fn split_at(bytes: &[u8], n: usize) -> Option<(&[u8], &[u8])> { + if bytes.len() >= n { Some(bytes.split_at(n)) } else { None } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn rt(v: &Tree) -> Tree { + Tree::deserialize(&v.serialize()).unwrap() + } + + #[test] + fn test_null() { + assert_eq!(rt(&Tree::Null), Tree::Null); + } + + #[test] + fn test_scalar() { + assert_eq!(rt(&Tree::Scalar(b"hello".to_vec())), Tree::Scalar(b"hello".to_vec())); + } + + #[test] + fn test_scalar_empty() { + assert_eq!(rt(&Tree::Scalar(vec![])), Tree::Scalar(vec![])); + } + + #[test] + fn test_sequence() { + let v = Tree::Sequence(vec![ + Tree::Scalar(b"a".to_vec()), + Tree::Null, + Tree::Scalar(b"b".to_vec()), + ]); + assert_eq!(rt(&v), v); + } + + #[test] + fn test_mapping() { + let v = Tree::Mapping(vec![ + (b"id".to_vec(), Tree::Scalar(b"1".to_vec())), + (b"name".to_vec(), Tree::Scalar(b"alice".to_vec())), + ]); + assert_eq!(rt(&v), v); + } + + #[test] + fn test_nested() { + let v = Tree::Mapping(vec![ + (b"user".to_vec(), Tree::Mapping(vec![ + (b"id".to_vec(), Tree::Scalar(b"1".to_vec())), + (b"tags".to_vec(), Tree::Sequence(vec![ + Tree::Scalar(b"admin".to_vec()), + Tree::Scalar(b"staff".to_vec()), + ])), + (b"extra".to_vec(), Tree::Null), + ])), + ]); + assert_eq!(rt(&v), v); + } + + #[test] + fn test_deserialize_invalid_returns_none() { + assert_eq!(Tree::deserialize(&[0xFF]), None); + assert_eq!(Tree::deserialize(&[TAG_SCALAR, 0x05, 0x00, 0x00, 0x00]), None); + } + + #[test] + fn test_roundtrip_null_field() { + let v = Tree::Mapping(vec![ + (b"id".to_vec(), Tree::Scalar(b"1".to_vec())), + (b"deleted_at".to_vec(), Tree::Null), + ]); + assert_eq!(Tree::deserialize(&v.serialize()).unwrap(), v); + } +} + #[cfg(feature = "precompile")] mod inner { // precompile requires std: file I/O (std::fs::write) and UTF-8 parsing extern crate std; - use crate::ports::provided::Tree as Value; + use crate::ports::provided::Tree as TreeData; pub struct Tree { paths: Box<[u64]>, @@ -34,8 +210,8 @@ mod inner { std::fs::write(path, out) } - /// Parse a YAML byte slice into a `Value` tree. - pub fn parse(src: &[u8]) -> Result { + /// Parse a YAML byte slice into a `Tree` tree. + pub fn parse(src: &[u8]) -> Result { let s = std::str::from_utf8(src) .map_err(|e| format!("UTF-8 error: {e}"))?; let yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(s) @@ -44,9 +220,9 @@ mod inner { } } - fn yaml_to_tree(v: serde_yaml_ng::Value) -> Value { + fn yaml_to_tree(v: serde_yaml_ng::Value) -> Tree { match v { - serde_yaml_ng::Value::Mapping(m) => Value::Mapping( + serde_yaml_ng::Value::Mapping(m) => Tree::Mapping( m.into_iter() .filter_map(|(k, v)| { if let serde_yaml_ng::Value::String(s) = k { @@ -58,13 +234,13 @@ mod inner { .collect(), ), serde_yaml_ng::Value::Sequence(s) => { - Value::Sequence(s.into_iter().map(yaml_to_tree).collect()) + Tree::Sequence(s.into_iter().map(yaml_to_tree).collect()) } - serde_yaml_ng::Value::String(s) => Value::Scalar(s.into_bytes()), - serde_yaml_ng::Value::Number(n) => Value::Scalar(n.to_string().into_bytes()), - serde_yaml_ng::Value::Bool(b) => Value::Scalar(b.to_string().into_bytes()), - serde_yaml_ng::Value::Null => Value::Null, - _ => Value::Null, + serde_yaml_ng::Value::String(s) => TreeData::Scalar(s.into_bytes()), + serde_yaml_ng::Value::Number(n) => TreeData::Scalar(n.to_string().into_bytes()), + serde_yaml_ng::Value::Bool(b) => Tree::Scalar(b.to_string().into_bytes()), + serde_yaml_ng::Value::Null => TreeData::Null, + _ => TreeData::Null, } } From e6e79d9639980c761a592628c4096f8dd721719e Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 17:29:58 +0900 Subject: [PATCH 24/41] update --- src/context.rs | 1 + src/error.rs | 2 +- src/index.rs | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/context.rs b/src/context.rs index 810482a..87ed4f1 100644 --- a/src/context.rs +++ b/src/context.rs @@ -1,4 +1,5 @@ use alloc::collections::{BTreeMap, BTreeSet}; +use alloc::string::ToString; use alloc::sync::Arc; use alloc::vec::Vec; use core::str::from_utf8; diff --git a/src/error.rs b/src/error.rs index d49812a..33238d6 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,4 +1,4 @@ -use alloc::string::String; +use alloc::string::{String, ToString}; use crate::ports::provided::Tree; diff --git a/src/index.rs b/src/index.rs index 9890a38..96f4ccc 100644 --- a/src/index.rs +++ b/src/index.rs @@ -1,3 +1,4 @@ +use alloc::boxed::Box; use alloc::collections::BTreeMap; use alloc::string::String; use alloc::vec::Vec; From 5d7da092c1a4207eed0cee8c7f7acaec831b180b Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 17:32:54 +0900 Subject: [PATCH 25/41] u --- src/tree.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tree.rs b/src/tree.rs index b316390..2fd2028 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -178,7 +178,7 @@ mod tests { mod inner { // precompile requires std: file I/O (std::fs::write) and UTF-8 parsing extern crate std; - use crate::ports::provided::Tree as TreeData; + use crate::ports::provided::Tree as Data; pub struct Tree { paths: Box<[u64]>, @@ -211,7 +211,7 @@ mod inner { } /// Parse a YAML byte slice into a `Tree` tree. - pub fn parse(src: &[u8]) -> Result { + pub fn parse(src: &[u8]) -> Result { let s = std::str::from_utf8(src) .map_err(|e| format!("UTF-8 error: {e}"))?; let yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(s) @@ -236,11 +236,11 @@ mod inner { serde_yaml_ng::Value::Sequence(s) => { Tree::Sequence(s.into_iter().map(yaml_to_tree).collect()) } - serde_yaml_ng::Value::String(s) => TreeData::Scalar(s.into_bytes()), - serde_yaml_ng::Value::Number(n) => TreeData::Scalar(n.to_string().into_bytes()), + serde_yaml_ng::Value::String(s) => Data::Scalar(s.into_bytes()), + serde_yaml_ng::Value::Number(n) => Data::Scalar(n.to_string().into_bytes()), serde_yaml_ng::Value::Bool(b) => Tree::Scalar(b.to_string().into_bytes()), - serde_yaml_ng::Value::Null => TreeData::Null, - _ => TreeData::Null, + serde_yaml_ng::Value::Null => Data::Null, + _ => Data::Null, } } From 5e63477de78357a708d74c0910702e55b2938849 Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Mon, 6 Apr 2026 18:29:40 +0900 Subject: [PATCH 26/41] u --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3ee6ea8..88405f8 100644 --- a/README.md +++ b/README.md @@ -72,7 +72,7 @@ session: | `StoreClient` | `get()` `set()` `delete()` | [implements.rs](./examples/implements.rs) | | `StoreRegistry` | maps YAML client names to `StoreClient`s | [implements.rs](./examples/implements.rs) | -3. Initialize State with your registry. +3. Initialize Context with your registry. ```rust use context_engine::State; @@ -94,12 +94,12 @@ let user_name = state.get("session.user.name")?; │ ▼ ┌─────────────┐ ┌────────────────────────────────┐ -│ Application │<------│ State (request scope instance) │ +│ Application │<------│ Context(request scope instance)│ └─────────────┘provide└────────────────────────────────┘ ▲ │ ┌─────────────┐ ┌───────────┴────────────────────┐ -│ ClientImpl │------>│ StoreRegistry (Required Port) │ +│ ClientImpl │------>│ StoreRegistry (required port) │ └─────────────┘ impl └────────────────────────────────┘ ``` From 5bc08cb031f94c001a6a053b78ce5f1f799b4e57 Mon Sep 17 00:00:00 2001 From: Andyou Date: Tue, 7 Apr 2026 00:34:29 +0900 Subject: [PATCH 27/41] update for 0.1.6 --- Cargo.toml | 8 +- README.md | 21 +- docs/Architecture.md | 199 ++++--------- docs/Dsl_guide.md | 42 +-- examples/implements.rs | 165 ++++++++--- src/{error.rs => debug_log.rs} | 5 +- src/dsl.rs | 528 +++++++++++++++++++++++++++++++-- src/lib.rs | 2 +- src/tree.rs | 110 +------ 9 files changed, 729 insertions(+), 351 deletions(-) rename src/{error.rs => debug_log.rs} (95%) diff --git a/Cargo.toml b/Cargo.toml index 8999939..2c086de 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "context-engine" -version = "0.1.6-alpha.1" +version = "0.1.6" authors = ["Andyou "] -description = "Declarative state data management system for process" +description = "Declarative context management for request handlers" edition = "2024" license = "MIT" @@ -17,10 +17,6 @@ exclude = ["examples/*"] log = { version = "0.4", default-features = false, optional = true } serde_yaml_ng = { version = "0.10", optional = true } -[dev-dependencies] -env_logger = "0.11" -ctor = "0.2" - [features] default = [] logging = ["dep:log"] diff --git a/README.md b/README.md index 88405f8..a04c16e 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,9 @@ Data labels used by a web system's runtime within a single processing cycle shou | Version | Status | Date | Description | |---------|-----------|-----------|-------------| -| 0.1 | Released | 2026-2-12 | initial | +| 0.1 | Released | 2026-2-12 | - | | 0.1.5 | Current | 2026-3-21 | improve #43 | -| 0.1.6-alpha.1 | Alpha Release | 2026-4-5 | rename crate | +| 0.1.6 | Scheduled | 2026-4-19 | improve #38 | ## Provided Functions @@ -105,28 +105,29 @@ let user_name = state.get("session.user.name")?; see for details [Architecture.md](./docs/Architecture.md) -## tree +## Tree ``` ./ README.md Cargo.toml + docs/ Architecture.md Dsl_guide.md + src/ lib.rs - context.rs - dsl.rs - index.rs - tree.rs - log_format.rs - codec.rs ports.rs ports/ provided.rs required.rs - unused/ # reference: old implementation + debug_log.rs + tree.rs + dsl.rs + index.rs + context.rs + examples/ tenant.yml implements.rs diff --git a/docs/Architecture.md b/docs/Architecture.md index 98d3c74..010d364 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -1,7 +1,5 @@ # Architecture -*warning*: Temporarily, Japanese and English combind in this file. - ## ライブラリ要件 - README 3行目 参照 @@ -19,69 +17,67 @@ | Mod | Description | Ports | Filename | |-----|-------------|-------|----------| -| Tree | YAMLファイルを読み込んでProvided::Tree型にパースし、Dsl::compileの出力を実行ファイルに書き込む | write | tree.rs | -| Dsl | Tree型のDSLを読み込み、n次元疎集合割り出しの最適解である、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する | new, compile | dsl.rs | -| Index | Dsl:compile(DSL)を呼び出し、アドレスリスト(Box<(u64, u32)>)を保持し、トラバーサルによってメモリ位置群を取得する | new, traverse | index.rs | +| tree | `enum Tree` の wire format serialize / deserialize | serialize, deserialize | tree.rs | +| Dsl | `Tree` のDSLを読み込み、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する。`feature=precompile` 時は `Dsl::write()` でYAML→静的Rustファイル出力 | compile, write(precompile) | dsl.rs | +| Index | `Dsl::compile` の出力を保持し、トラバーサルによってleaf参照群を取得する | new, traverse | index.rs | | Context | コンテクストデータの操作を行うリクエスト処理スコープの実行インスタンス | new, get, set, delete, exists | context.rs | * Portsはpub fnのこと * new()であっても、引数はVec等の標準型依存を明示するべき。construct状態は避ける -* Tree::write()は引数にoptionを取って、「Dsl::compileの出力を」をskipしてValueのまま書き込むオプションを追加予定 -* Context.new()内の各StoreClientは、Arcで記述する。ClientResistoryを新規導入したので、検討余地あるかも -* context.rsが煩雑になるようなら、内部modとしてLoadとStoreを切り出す必要があるかも。係数明示不足による所有複雑化に注意 ### Portモジュール -| Mod | Description | Ports | Filename | -|-----|-------------|------|----------| -| Context | Contextのtrait | - | provided.rs | -| StoreClient | *Clientの基底 | - | required.rs | -| ClientResistory | *Clientの登録用 | - | provided.rs | +| Mod | Description | Filename | +|-----|-------------|----------| +| Context | Contextのtrait、Tree型、各Error型 | ports/provided.rs | +| StoreClient | 単一ストアのadapter trait | ports/required.rs | +| StoreRegistry | client名称→StoreClientのdispatch trait | ports/required.rs | ### 開発用モジュール -| Mod | Description | Port | Filename | -|-------|------|---------| -| Error | Provided Port | - | provided.rs | -| Log | feature=logging限定のマクロ | fn_log! | provided.rs | +| Mod | Description | Filename | +|-----|-------------|----------| +| debug_log | `feature=logging` 限定のデバッグログマクロ・ユーティリティ | debug_log.rs | ## 用語 -```yaml -key: n層マップDSLの最末端value以外の要素 -keyword: keyの名前文字列 -field_key: 自身と親祖先がkeywordが'_'で始まらないkey -meta_key: keywordが'_'始まりのkeyと、その子孫key -leaf_key: 子にkeyを持たず値を持つkey -value: leaf keysの値。DSL内で省略された場合はnullが充てられる -path: 単一のfield_keyを表す、'.'区切りkeywordのチェーン +``` +key: n層マップDSLの最末端value以外の要素 +keyword: keyの名前文字列 +field_key: 自身と親祖先のkeywordが'_'で始まらないkey +meta_key: keywordが'_'始まりのkeyと、その子孫key +leaf_key: 子にkeyを持たず値を持つkey +value: leaf_keyの値。DSL内で省略された場合はnullが充てられる +path: 単一のfield_keyを表す、'.'区切りkeywordのチェーン qualified_path: DSL内で一意な完全修飾パス -placeholder: key参照記述("${path}")。valueのみに適用。単独記述時はis_template=falseとして扱い、値をそのままコピーする(string化しない) -template: placeholderと静的な文字列を混合した動的生成文字列。valueのみに適用。is_template=trueとして扱い、解決時にstring化する -called_path: Stateに渡されるパス文字列 +placeholder: key参照記述("${path}")。valueのみに適用。 + 単独記述時はis_template=falseとして扱い、値をそのままコピーする(string化しない) +template: placeholderと静的な文字列を混合した動的生成文字列。valueのみに適用。 + is_template=trueとして扱い、解決時にstring化する +called_path: Context.get()等に渡されるパス文字列 ``` ## mod:fn詳細仕様 ### StoreClient -単一ストアの操作を提供するtrait。`key`は予約引数として明示し、追加の任意引数は`args`のflatなHashMapで渡す。 +単一ストアの操作を提供するtrait。`key`は予約引数として明示し、追加の任意引数は`args`のflatなBTreeMapで渡す。 ```rust pub trait StoreClient: Send + Sync { - fn get(&self, key: &str, args: &HashMap<&str, Value>) -> Option; - fn set(&self, key: &str, args: &HashMap<&str, Value>) -> bool; - fn delete(&self, key: &str, args: &HashMap<&str, Value>) -> bool; + fn get(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option; + fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option; + fn delete(&self, key: &str, args: &BTreeMap<&str, Tree>) -> bool; } ``` -- `key`: manifest の `_{store,load}.key` の値。予約引数。 -- `args`: ttl・connection・headers 等、ストア種別ごとの任意引数。利用者がimpl内で定義・参照する。 +- `key`: DSL の `_load.key` / `_store.key` の値。予約引数。 +- `args`: ttl・connection・map 等、ストア種別ごとの任意引数。利用者がimpl内で定義・参照する。 - 内部可変性・スレッド安全性はimplementor側の責任。 ### StoreRegistry -YAMLの`client:`名称とStoreClientの対応を管理するtrait。利用者がimplし、Stateに渡す。 +YAMLの`client:`名称とStoreClientの対応を管理するtrait。利用者がimplし、Contextに渡す。 ```rust pub trait StoreRegistry { @@ -90,124 +86,59 @@ pub trait StoreRegistry { ``` - ライブラリはYAML名称の文字列をそのまま`client_for()`に渡してmatchを回す。 -- YAML上の名義(`"Memory"`, `"KVS"`, `"Db"`等)は利用者が自由に定義する。 +- YAML上の名義(`"Memory"`, `"Kvs"`, `"TenantDb"`等)は利用者が自由に定義する。 -**実装例:** -```rust -struct MyStores { - memory: Arc, - kvs: Arc, - db: Arc, -} +### Instance Cache -impl StoreRegistry for MyStores { - fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient> { - match yaml_name { - "Memory" => Some(self.memory.as_ref()), - "KVS" => Some(self.kvs.as_ref()), - "Db" => Some(self.db.as_ref()), - _ => None, - } - } -} -``` +Contextインスタンス固有のキャッシュ。StoreClientとは独立。 ---- +- Context生成時: 空 +- Context生存中: get/setに応じて蓄積 +- Context破棄時: 解放 -## Instance Cache - -An instance-level cache separate from persistent stores. +### Context.get() -**Important:** This is NOT a StoreClient. It is a variable of the State instance itself. +指定パスが表す値を返却する。 -**Purpose:** -1. Speed up duplicate `Context.get()` calls within the same request -2. Reduce access count to stores -3. Avoid duplicate loads +戻り値: `Result, ContextError>` -**Lifecycle:** -- State instance created: empty -- During State lifetime: accumulates -- State instance dropped: destroyed (memory released) +**動作フロー:** +1. called チェック(再帰・上限検出) +2. `Index::traverse(path)` → LeafRef一覧 +3. instance cache をチェック +4. `_store` client に問い合わせ +5. miss時、`_load` client で自動ロード → write-through to `_store` +6. `Ok(Some(value))` / `Ok(None)` / `Err(ContextError)` を返却 ## Placeholder Resolution Rules -`${}` paths are **qualified to absolute paths at parse time** — no conversion happens at State runtime. - -**Qualify rule at parse time (`qualify_path()`):** -- Path contains `.` → treated as absolute, used as-is -- No `.` → converted to `filename.ancestors.path` - -**Example (`${tenant_id}` in `cache.yml` under `user._load.where`):** -``` -qualify_path("tenant_id", "cache", ["user"]) -→ "cache.user.tenant_id" -``` +`${}` paths are always treated as absolute paths. **Placeholder resolution at runtime:** -- `is_template=false`(単独 `${path}`): `Context.get(qualified_path)` の値をそのままコピー(string化しない) +- `is_template=false`(単独 `${path}`): `Context.get(path)` の値をそのままコピー(string化しない) - `is_template=true`(文字列混在): 各placeholderを `Context.get()` で解決しstringとして結合 -## error case - -**ManifestError:** -- `FileNotFound` — manifest file not found in manifest dir -- `AmbiguousFile` — two files with the same name but different extensions (`.yml` and `.yaml`) exist in manifestDir -- `ParseError` — YAML parse failed - -**LoadError:** -- `ClientNotFound(String)` — `StoreRegistry::client_for()` returned `None` for the given yaml_name -- `ConfigMissing(String)` — a required config key is missing in the manifest -- `NotFound(String)` — the client call succeeded but returned no data -- `ParseError(String)` — parse error from client response +## Error Types -**StoreError:** -- `ClientNotFound(String)` — `StoreRegistry::client_for()` returned `None` for the given yaml_name -- `ConfigMissing(String)` — a required config key is missing in the manifest -- `SerializeError(String)` — serialize error +**ParseError** (`ports/provided.rs`): +- `FileNotFound(String)` +- `AmbiguousFile(String)` +- `ParseError(String)` ---- - -## Original Text (ja) - -**StoreClient** - -単一ストアのget/set/deleteを提供するtrait。`key`は予約引数。`args`にttl等の任意引数をflatなHashMapで渡す。内部可変性はimplementor側の責任。 - -**StoreRegistry** - -YAMLの`client:`文字列と`StoreClient`の対応を管理するtrait。利用者がimplしてStateに渡す。ライブラリ側はYAML名を`client_for()`に渡してdispatchする。YAML上の名義は利用者が自由に定義できる。 - -## State - -### Context.get() - -指定されたノードが表す値群を参照し、値またはcollectionを返却する。 - -戻り値: `Result, StateError>` - -**動作フロー:** -1. called チェック(再帰・上限検出) -2. `Index::traverse()` -3. cache (インスタンスキャッシュ) をチェック -4. `StoreRegistry::client_for(yaml_name)` → `StoreClient::get()` -5. **miss時、`Load::handle()` で自動ロード** -6. `Ok(Some(value))` / `Ok(None)` / `Err(StateError)` を返却 - -## error case - -**ManifestError:** -- `FileNotFound` — manifestディレクトリにファイルが見つからない -- `AmbiguousFile` — manifestDir内に拡張子違いの同名ファイルが2つ存在する -- `ParseError` — YAMLのパース失敗 - -**LoadError:** +**LoadError** (`ports/provided.rs`): - `ClientNotFound(String)` — `StoreRegistry::client_for()` が None を返した -- `ConfigMissing(String)` — manifest内に必須のconfigキーが欠落 +- `ConfigMissing(String)` — DSL内に必須のconfigキーが欠落 - `NotFound(String)` — clientの呼び出しは成功したがデータが存在しなかった - `ParseError(String)` — clientレスポンスのパースエラー -**StoreError:** +**StoreError** (`ports/provided.rs`): - `ClientNotFound(String)` — `StoreRegistry::client_for()` が None を返した -- `ConfigMissing(String)` — manifest内に必須のconfigキーが欠落 +- `ConfigMissing(String)` — DSL内に必須のconfigキーが欠落 - `SerializeError(String)` — シリアライズエラー + +**ContextError** (`ports/provided.rs`): +- `ParseFailed(String)` +- `KeyNotFound(String)` +- `RecursionLimitExceeded` +- `StoreFailed(StoreError)` +- `LoadFailed(LoadError)` diff --git a/docs/Dsl_guide.md b/docs/Dsl_guide.md index b1b9f1d..7e1181b 100644 --- a/docs/Dsl_guide.md +++ b/docs/Dsl_guide.md @@ -6,7 +6,7 @@ key: n層マップDSLの最末端value以外の要素 keyword: keyの名前文字列 field_key: 自身と親祖先のkeywordが'_'で始まらないkey -meta_key: keywordが'_'始まりのkeyと、その子孫key +meta_key: keywordが'_'始まりのkeyと、その子孫key (_load, _store, _state) leaf_key: 子にkeyを持たず値を持つkey value: leaf_keyの値。DSL内で省略された場合はnullが充てられる path: 単一のfield_keyを表す、'.'区切りkeywordのチェーン @@ -22,6 +22,7 @@ called_path: Context.get()等に渡されるパス文字列 - YAML document separators (`---`) are not supported - `${}` (placeholder / template) are only valid inside values +- `${}` paths are always treated as absolute paths ## Basic Structure @@ -52,29 +53,28 @@ user: # inherits _store: { client: Kvs, key: "user:${user_id}" } ``` -`_store` inheritance rule: child's `_store` fields overwrite matching keys; unspecified fields are inherited as-is. +`_store` / `_load` inheritance rule: child's fields overwrite matching keys; unspecified fields are inherited as-is. Inheritance is resolved at compile time — runtime traversal carries no parent state. ### 2. Placeholder / Template -`${}` paths are qualified to absolute paths at parse time. - -**Qualify rule (`qualify_path()`):** -- No `.` → relative; converted to `filename.ancestors.keyword` at parse time -- Contains `.` → treated as absolute, used as-is - -```yaml -# Inside tenant.yml under session.user._load -key: "${session.user.id}" # absolute — used as-is -key: "${id}" # relative → tenant.session.user.id -``` - **is_template:** - `${path}` alone → `is_template=false`。値をそのままコピー(string化しない) - `"prefix:${path}"` etc. → `is_template=true`。全placeholderをContext.get()で解決しstring結合 -### 3. _store / _load args +### 3. Reserved keywords + +| keyword | scope | description | +|----------|---------------|-------------| +| `_load` | meta_key | load source definition | +| `_store` | meta_key | store destination definition | +| `_state` | meta_key | reserved | +| `client` | _load / _store prop | StoreRegistry yaml_name | +| `key` | _load / _store prop | reserved arg passed to StoreClient | +| `map` | _load / _store prop | field mapping definition | + +### 4. _store / _load args -`client:` 以外の全フィールドはimplementor定義の任意args。ライブラリは関知しない。 +`client:` と `key:` 以外の全フィールドはimplementor定義の任意args。ライブラリは関知しない。 ```yaml _store: @@ -91,11 +91,11 @@ _load: email: "email" ``` -`key:` は予約引数。それ以外はimplementorが`args: &HashMap<&str, Tree>`から取り出して使う。 +implementorは `args: &BTreeMap<&str, Tree>` から任意キーを取り出して使う。 -### 4. map +### 5. map -`_load.map:` でparent field_keyの子fieldにDB列等をマッピングする。 +`_load.map:` / `_store.map:` でparent field_keyの子fieldにストア列等をマッピングする。 ```yaml session: @@ -110,4 +110,6 @@ session: email: ``` -map対象のfield_keyは別途leaf宣言が必要。 +- `map` のvalue(`"name"`, `"email"` 等)がargs経由でclientに渡される +- clientはその順序通りに値を取得して返す責務を持つ +- map対象のfield_keyは別途leaf宣言が必要 diff --git a/examples/implements.rs b/examples/implements.rs index 12a432e..404363e 100644 --- a/examples/implements.rs +++ b/examples/implements.rs @@ -1,10 +1,12 @@ +fn main() {} + // Example StoreClient implementations. // These are minimal stubs showing how to implement StoreClient and StoreRegistry // for common backing stores under the new unified interface. use context_engine::ports::required::{StoreClient, StoreRegistry, SetOutcome}; use context_engine::ports::provided::Tree; -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use std::sync::{Arc, Mutex}; // ── Memory ──────────────────────────────────────────────────────────────────── @@ -20,86 +22,157 @@ impl MemoryClient { } impl StoreClient for MemoryClient { - fn get(&self, key: &str, _args: &HashMap<&str, Tree>) -> Option { + fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { self.data.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, args: &HashMap<&str, Tree>) -> Option { - // args["value"] holds the value to store + fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { let value = args.get("value")?.clone(); let mut data = self.data.lock().unwrap(); let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; data.insert(key.to_string(), value); Some(outcome) } - fn delete(&self, key: &str, _args: &HashMap<&str, Tree>) -> bool { + fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { self.data.lock().unwrap().remove(key).is_some() } } -// ── KVS (Redis) ─────────────────────────────────────────────────────────────── +// ── KVS (Redis-like mock) ───────────────────────────────────────────────────── // // args["ttl"] — optional, seconds as Scalar pub struct KvsClient { - client: Mutex, + data: Mutex>, } impl KvsClient { - pub fn new(url: &str) -> Result { - Ok(Self { client: Mutex::new(redis::Client::open(url)?) }) + pub fn new() -> Self { + Self { data: Mutex::new(HashMap::new()) } } } impl StoreClient for KvsClient { - fn get(&self, key: &str, _args: &HashMap<&str, Tree>) -> Option { - let client = self.client.lock().unwrap(); - let mut conn = client.get_connection().ok()?; - let bytes: Option> = redis::cmd("GET").arg(key).query(&mut conn).ok()?; - // deserialize bytes → Tree (implementor's responsibility) - bytes.map(|_b| todo!("deserialize")) - } - fn set(&self, key: &str, args: &HashMap<&str, Tree>) -> Option { - let client = self.client.lock().unwrap(); - let mut conn = client.get_connection().ok()?; - let value = args.get("value")?; - let ttl = args.get("ttl").and_then(|t| match t { - Tree::Scalar(b) => std::str::from_utf8(b).ok()?.parse::().ok(), - _ => None, - }); - // serialize Tree → bytes (implementor's responsibility) - let bytes: Vec = todo!("serialize"); - let result: Result<(), _> = match ttl { - Some(secs) => redis::cmd("SETEX").arg(key).arg(secs).arg(bytes).query(&mut conn), - None => redis::cmd("SET").arg(key).arg(bytes).query(&mut conn), - }; - result.ok().map(|_| SetOutcome::Created) - } - fn delete(&self, key: &str, _args: &HashMap<&str, Tree>) -> bool { - let client = self.client.lock().unwrap(); - let mut conn = match client.get_connection() { Ok(c) => c, Err(_) => return false }; - let result: Result = redis::cmd("DEL").arg(key).query(&mut conn); - result.map(|n| n > 0).unwrap_or(false) + fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { + let bytes = self.data.lock().unwrap().get(key).cloned()?; + // In real impl: deserialize wire bytes → Tree + Some(bytes) + } + fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { + let value = args.get("value")?.clone(); + // args["ttl"] ignored in mock + let mut data = self.data.lock().unwrap(); + let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; + data.insert(key.to_string(), value); + Some(outcome) + } + fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { + self.data.lock().unwrap().remove(key).is_some() } } // ── Env ─────────────────────────────────────────────────────────────────────── +// +// args contains map.* values as env var names (in order). +// Returns a Mapping of { env_var_name → env_var_value }. pub struct EnvClient; impl StoreClient for EnvClient { - fn get(&self, key: &str, _args: &HashMap<&str, Tree>) -> Option { - std::env::var(key).ok().map(|s| Tree::Scalar(s.into_bytes())) + fn get(&self, _key: &str, args: &BTreeMap<&str, Tree>) -> Option { + let pairs: Vec<(Vec, Tree)> = args.iter() + .filter_map(|(&k, v)| { + let env_key = match v { + Tree::Scalar(b) => std::str::from_utf8(b).ok()?, + _ => return None, + }; + let value = std::env::var(env_key).ok() + .map(|s| Tree::Scalar(s.into_bytes())) + .unwrap_or(Tree::Null); + Some((k.as_bytes().to_vec(), value)) + }) + .collect(); + if pairs.is_empty() { None } else { Some(Tree::Mapping(pairs)) } + } + fn set(&self, _key: &str, _args: &BTreeMap<&str, Tree>) -> Option { None } + fn delete(&self, _key: &str, _args: &BTreeMap<&str, Tree>) -> bool { false } +} + +// ── CommonDb (mock) ─────────────────────────────────────────────────────────── + +pub struct CommonDbClient { + data: Mutex>, +} + +impl CommonDbClient { + pub fn new() -> Self { + Self { data: Mutex::new(HashMap::new()) } + } +} + +impl StoreClient for CommonDbClient { + fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { + self.data.lock().unwrap().get(key).cloned() + } + fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { + let value = args.get("value")?.clone(); + let mut data = self.data.lock().unwrap(); + let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; + data.insert(key.to_string(), value); + Some(outcome) + } + fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { + self.data.lock().unwrap().remove(key).is_some() + } +} + +// ── TenantDb (mock) ─────────────────────────────────────────────────────────── + +pub struct TenantDbClient { + data: Mutex>, +} + +impl TenantDbClient { + pub fn new() -> Self { + Self { data: Mutex::new(HashMap::new()) } + } +} + +impl StoreClient for TenantDbClient { + fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { + self.data.lock().unwrap().get(key).cloned() + } + fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { + let value = args.get("value")?.clone(); + let mut data = self.data.lock().unwrap(); + let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; + data.insert(key.to_string(), value); + Some(outcome) + } + fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { + self.data.lock().unwrap().remove(key).is_some() } - fn set(&self, _key: &str, _args: &HashMap<&str, Tree>) -> Option { None } - fn delete(&self, _key: &str, _args: &HashMap<&str, Tree>) -> bool { false } } // ── StoreRegistry ───────────────────────────────────────────────────────────── pub struct MyRegistry { - memory: Arc, - kvs: Arc, - env: Arc, + memory: Arc, + kvs: Arc, + env: Arc, + common_db: Arc, + tenant_db: Arc, +} + +impl MyRegistry { + pub fn new() -> Self { + Self { + memory: Arc::new(MemoryClient::new()), + kvs: Arc::new(KvsClient::new()), + env: Arc::new(EnvClient), + common_db: Arc::new(CommonDbClient::new()), + tenant_db: Arc::new(TenantDbClient::new()), + } + } } impl StoreRegistry for MyRegistry { @@ -108,8 +181,8 @@ impl StoreRegistry for MyRegistry { "Memory" => Some(self.memory.as_ref()), "Kvs" => Some(self.kvs.as_ref()), "Env" => Some(self.env.as_ref()), - "CommonDb" => todo!("implement CommonDb"), - "TenantDb" => todo!("implement TenantDb"), + "CommonDb" => Some(self.common_db.as_ref()), + "TenantDb" => Some(self.tenant_db.as_ref()), _ => None, } } diff --git a/src/error.rs b/src/debug_log.rs similarity index 95% rename from src/error.rs rename to src/debug_log.rs index 33238d6..125be92 100644 --- a/src/error.rs +++ b/src/debug_log.rs @@ -69,10 +69,10 @@ macro_rules! debug_log { #[cfg(feature = "logging")] { let formatted: alloc::vec::Vec = alloc::vec![ - $( $crate::error::format_str_arg($arg), )* + $( $crate::debug_log::format_str_arg($arg), )* ]; let refs: alloc::vec::Vec<&str> = formatted.iter().map(|s| s.as_str()).collect(); - log::debug!("{}", $crate::error::message($class, $fun, &refs)); + log::debug!("{}", $crate::debug_log::message($class, $fun, &refs)); } }}; } @@ -80,6 +80,7 @@ macro_rules! debug_log { #[cfg(test)] mod tests { use super::*; + use alloc::vec; #[test] fn test_message_multiple_args() { diff --git a/src/dsl.rs b/src/dsl.rs index 27d156c..78f69df 100644 --- a/src/dsl.rs +++ b/src/dsl.rs @@ -3,34 +3,36 @@ use alloc::vec::Vec; use crate::ports::provided::Tree; -// ── client_idx constants (4bit, stored in leaves) ───────────────────────────── +// ── meta_key keywords ───────────────────────────────────────────────────────── -pub const CLIENT_NULL: u8 = 0b0000; -pub const CLIENT_STATE: u8 = 0b0001; +pub const META_LOAD: &[u8] = b"_load"; +pub const META_STORE: &[u8] = b"_store"; +pub const META_STATE: &[u8] = b"_state"; -// ── prop constants ──────────────────────────────────────────────────────────── +// ── prop keywords (within _load / _store) ──────────────────────────────────── -pub const PROP_NULL: u8 = 0b00; -pub const PROP_KEY: u8 = 0b01; -pub const PROP_MAP: u8 = 0b10; +pub const PROP_CLIENT: &[u8] = b"client"; +pub const PROP_KEY: &[u8] = b"key"; +pub const PROP_MAP: &[u8] = b"map"; -// ── path field masks (u64) ──────────────────────────────────────────────────── +// ── path field layout (u64) ─────────────────────────────────────────────────── // -// | field | bits | -// |---------|-------| -// | is_leaf | 1 | -// | offset | 32 | -// | count | 8 | // is_leaf=0: [3:0]=子path数, [7:4]=unused -// | | | // is_leaf=1: [7:4]=load_args count, [3:0]=store_args count -// | padding | 23 | +// | field | bits | +// |------------|------| +// | is_leaf | 1 | bit 63 +// | offset | 32 | bits 54..23 +// | count | 8 | bits 22..15 +// | | | is_leaf=0: [3:0]=子path数(1~16), [7:4]=unused +// | | | is_leaf=1: [7:4]=load_args count, [3:0]=store_args count (各最大15) +// | (reserved) | 23 | bits 14..0 pub const PATH_IS_LEAF_SHIFT: u64 = 63; pub const PATH_OFFSET_SHIFT: u64 = 23; pub const PATH_COUNT_SHIFT: u64 = 15; -pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; -pub const PATH_OFFSET_MASK: u64 = 0xffff_ffff << PATH_OFFSET_SHIFT; -pub const PATH_COUNT_MASK: u64 = 0xff << PATH_COUNT_SHIFT; +pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; +pub const PATH_OFFSET_MASK: u64 = 0xffff_ffff << PATH_OFFSET_SHIFT; +pub const PATH_COUNT_MASK: u64 = 0xff << PATH_COUNT_SHIFT; // ── Dsl ─────────────────────────────────────────────────────────────────────── @@ -61,9 +63,45 @@ impl Dsl { Box<[u64]>, ) { let mut compiler = Compiler::new(); - compiler.walk(tree); + if let Tree::Mapping(pairs) = tree { + compiler.walk_mapping(pairs, None, None); + } compiler.finish() } + + /// Parse YAML source, compile, and write static Rust data to `out_path`. + #[cfg(feature = "precompile")] + pub fn write(src: &[u8], out_path: &str) -> Result<(), alloc::string::String> { + extern crate std; + use std::string::{String, ToString}; + use std::format; + + let tree = parse_yaml(src)?; + let (paths, children, leaves, interning, interning_idx) = Self::compile(&tree); + + let mut out = String::new(); + out.push_str("// @generated — do not edit by hand\n\n"); + emit_u64_slice(&mut out, "PATHS", &paths); + emit_u32_slice(&mut out, "CHILDREN", &children); + emit_u8_slice (&mut out, "LEAVES", &leaves); + emit_u8_slice (&mut out, "INTERNING", &interning); + emit_u64_slice(&mut out, "INTERNING_IDX", &interning_idx); + + std::fs::write(out_path, out) + .map_err(|e| format!("write error: {e}")) + } +} + +// ── MetaBlock ───────────────────────────────────────────────────────────────── +// +// Intermediate representation of a resolved _load or _store block. +// Carried down the recursion for inheritance. + +#[derive(Clone)] +struct MetaBlock { + client_idx: u32, // interning_idx of client yaml_name + key_idx: u32, // interning_idx of key value + args: Vec<(u32, u32)>, // (key_interning_idx, value_interning_idx) } // ── Compiler (internal) ─────────────────────────────────────────────────────── @@ -87,13 +125,224 @@ impl Compiler { } } - fn walk(&mut self, _tree: &Tree) { - todo!("compile Tree into flat lists") + // ── walk ────────────────────────────────────────────────────────────────── + + /// Walk a mapping's field_key entries, building path/children/leaves. + /// `inh_load` / `inh_store`: inherited MetaBlock from parent (carried down). + fn walk_mapping( + &mut self, + pairs: &[(Vec, Tree)], + inh_load: Option<&MetaBlock>, + inh_store: Option<&MetaBlock>, + ) { + for (k, v) in pairs { + if k.first() != Some(&b'_') { + self.walk_field_key(k, v, inh_load, inh_store); + } + } + } + + /// Process a single field_key node. + fn walk_field_key( + &mut self, + keyword: &[u8], + value: &Tree, + inh_load: Option<&MetaBlock>, + inh_store: Option<&MetaBlock>, + ) { + let path_idx = self.paths.len() as u32; + self.paths.push(0u64); // placeholder, filled below + + let keyword_idx = self.intern(keyword); + + match value { + Tree::Mapping(pairs) => { + // Extract _load / _store from this node, merging with inherited. + let load = self.resolve_meta(pairs, META_LOAD, inh_load); + let store = self.resolve_meta(pairs, META_STORE, inh_store); + + // Collect child field_keys. + let children_offset = self.children.len() as u32; + let mut child_count = 0u32; + + for (k, v) in pairs { + if k.first() == Some(&b'_') { continue; } + let child_idx = self.paths.len() as u32; + self.walk_field_key(k, v, load.as_ref(), store.as_ref()); + self.children.push(child_idx); + child_count += 1; + } + + if child_count == 0 { + // No child field_keys → treat as leaf. + self.write_leaf(path_idx, keyword_idx, None, load.as_ref(), store.as_ref()); + } else { + let count_bits = (child_count as u64) & 0xf; + self.paths[path_idx as usize] = + (children_offset as u64) << PATH_OFFSET_SHIFT + | count_bits << PATH_COUNT_SHIFT; + } + } + // Scalar or Null → leaf with optional hardcoded value. + _ => { + let value_idx = self.intern_tree_scalar(value); + self.write_leaf(path_idx, keyword_idx, Some(value_idx), inh_load, inh_store); + } + } + } + + // ── meta resolution ─────────────────────────────────────────────────────── + + /// Resolve a _load or _store block from this node's pairs, merging with inherited. + /// Returns None if neither this node nor ancestors define the block. + fn resolve_meta( + &mut self, + pairs: &[(Vec, Tree)], + meta_key: &[u8], + inherited: Option<&MetaBlock>, + ) -> Option { + let local = pairs.iter().find(|(k, _)| k.as_slice() == meta_key); + match (local, inherited) { + (None, None) => None, + (None, Some(inh)) => Some(inh.clone()), + (Some((_, Tree::Mapping(meta_pairs))), inh) => { + // Start from inherited, overwrite with local fields. + let mut client_idx = inh.map(|b| b.client_idx).unwrap_or(0); + let mut key_idx = inh.map(|b| b.key_idx).unwrap_or(0); + let mut args: Vec<(u32, u32)> = inh.map(|b| b.args.clone()).unwrap_or_default(); + + for (k, v) in meta_pairs { + if k.as_slice() == PROP_CLIENT { + if let Tree::Scalar(b) = v { + client_idx = self.intern(b); + } + } else if k.as_slice() == PROP_KEY { + key_idx = self.intern_tree_scalar(v); + } else if k.as_slice() == PROP_MAP { + // map entries: each value is a string (store column name etc.) + // stored as (dst_path_interning_idx, src_value_interning_idx) + if let Tree::Mapping(map_pairs) = v { + args.clear(); // local map overrides inherited + for (mk, mv) in map_pairs { + let mk_idx = self.intern(mk); + let mv_idx = self.intern_tree_scalar(mv); + args.push((mk_idx, mv_idx)); + } + } + } else if k.as_slice() != META_LOAD + && k.as_slice() != META_STORE + && k.as_slice() != META_STATE { + // arbitrary implementor arg + let ak = self.intern(k); + let av = self.intern_tree_scalar(v); + // overwrite if key already present, otherwise append + if let Some(entry) = args.iter_mut().find(|(ek, _)| *ek == ak) { + entry.1 = av; + } else { + args.push((ak, av)); + } + } + } + Some(MetaBlock { client_idx, key_idx, args }) + } + _ => inherited.cloned(), + } } + // ── leaf serialization ──────────────────────────────────────────────────── + + /// Write leaf data to `leaves` and update `paths[path_idx]`. + /// + /// Leaf layout: + /// keyword_idx (u32le) + /// value_idx (u32le) // 0 = null/absent + /// _load client_idx (u32le) | key_idx (u32le) + /// _store client_idx (u32le) | key_idx (u32le) + /// _load.args × load_args_count : key_idx(u32le) | value_idx(u32le) + /// _store.args × store_args_count : key_idx(u32le) | value_idx(u32le) + fn write_leaf( + &mut self, + path_idx: u32, + keyword_idx: u32, + value_idx: Option, + load: Option<&MetaBlock>, + store: Option<&MetaBlock>, + ) { + let leaf_offset = self.leaves.len() as u32; + + let load_args_count = load.map(|b| b.args.len()).unwrap_or(0); + let store_args_count = store.map(|b| b.args.len()).unwrap_or(0); + + // keyword + value + self.push_u32(keyword_idx); + self.push_u32(value_idx.unwrap_or(0)); + + // _load header + self.push_u32(load.map(|b| b.client_idx).unwrap_or(0)); + self.push_u32(load.map(|b| b.key_idx).unwrap_or(0)); + + // _store header + self.push_u32(store.map(|b| b.client_idx).unwrap_or(0)); + self.push_u32(store.map(|b| b.key_idx).unwrap_or(0)); + + // _load.args + if let Some(b) = load { + for &(ak, av) in &b.args { + self.push_u32(ak); + self.push_u32(av); + } + } + + // _store.args + if let Some(b) = store { + for &(ak, av) in &b.args { + self.push_u32(ak); + self.push_u32(av); + } + } + + // Update path entry: is_leaf=1, offset=leaf_offset, count=load_args<<4|store_args + let count = ((load_args_count as u64) & 0xf) << 4 + | ((store_args_count as u64) & 0xf); + self.paths[path_idx as usize] = + PATH_IS_LEAF_MASK + | (leaf_offset as u64) << PATH_OFFSET_SHIFT + | count << PATH_COUNT_SHIFT; + } + + // ── interning ───────────────────────────────────────────────────────────── + /// Intern a byte string, returning its interning_idx index. - fn intern(&mut self, _s: &[u8]) -> u32 { - todo!() + /// Deduplicates: if already interned, returns existing index. + fn intern(&mut self, s: &[u8]) -> u32 { + // Linear scan for dedup (DSL strings are small in number). + for (i, entry) in self.interning_idx.iter().enumerate() { + let offset = (entry >> 32) as usize; + let len = (entry & 0xffff_ffff) as usize; + if self.interning.get(offset..offset + len) == Some(s) { + return i as u32; + } + } + let offset = self.interning.len(); + self.interning.extend_from_slice(s); + let idx = self.interning_idx.len() as u32; + self.interning_idx.push(((offset as u64) << 32) | s.len() as u64); + idx + } + + /// Intern a Tree scalar as bytes. Null → intern empty slice → index 0. + fn intern_tree_scalar(&mut self, v: &Tree) -> u32 { + match v { + Tree::Scalar(b) => self.intern(b), + Tree::Null => self.intern(b""), + _ => self.intern(b""), + } + } + + // ── helpers ─────────────────────────────────────────────────────────────── + + fn push_u32(&mut self, v: u32) { + self.leaves.extend_from_slice(&v.to_le_bytes()); } fn finish(self) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>) { @@ -106,3 +355,236 @@ impl Compiler { ) } } + +// ── precompile helpers ──────────────────────────────────────────────────────── + +#[cfg(feature = "precompile")] +fn parse_yaml(src: &[u8]) -> Result { + extern crate std; + use std::string::ToString; + use std::format; + + let s = std::str::from_utf8(src) + .map_err(|e| format!("UTF-8 error: {e}"))?; + let yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(s) + .map_err(|e| format!("YAML parse error: {e}"))?; + Ok(yaml_value_to_tree(yaml)) +} + +#[cfg(feature = "precompile")] +fn yaml_value_to_tree(v: serde_yaml_ng::Value) -> Tree { + extern crate std; + use std::string::ToString; + + match v { + serde_yaml_ng::Value::Mapping(m) => Tree::Mapping( + m.into_iter() + .filter_map(|(k, v)| { + if let serde_yaml_ng::Value::String(s) = k { + Some((s.into_bytes(), yaml_value_to_tree(v))) + } else { + None + } + }) + .collect(), + ), + serde_yaml_ng::Value::Sequence(s) => { + Tree::Sequence(s.into_iter().map(yaml_value_to_tree).collect()) + } + serde_yaml_ng::Value::String(s) => Tree::Scalar(s.into_bytes()), + serde_yaml_ng::Value::Number(n) => Tree::Scalar(n.to_string().into_bytes()), + serde_yaml_ng::Value::Bool(b) => Tree::Scalar(b.to_string().into_bytes()), + serde_yaml_ng::Value::Null => Tree::Null, + _ => Tree::Null, + } +} + +#[cfg(feature = "precompile")] +fn emit_u64_slice(out: &mut alloc::string::String, name: &str, data: &[u64]) { + extern crate std; + use std::format; + out.push_str(&format!("pub static {name}: &[u64] = &[\n")); + for chunk in data.chunks(8) { + out.push_str(" "); + for v in chunk { out.push_str(&format!("0x{v:016x}, ")); } + out.push('\n'); + } + out.push_str("];\n\n"); +} + +#[cfg(feature = "precompile")] +fn emit_u32_slice(out: &mut alloc::string::String, name: &str, data: &[u32]) { + extern crate std; + use std::format; + out.push_str(&format!("pub static {name}: &[u32] = &[\n")); + for chunk in data.chunks(8) { + out.push_str(" "); + for v in chunk { out.push_str(&format!("0x{v:08x}, ")); } + out.push('\n'); + } + out.push_str("];\n\n"); +} + +#[cfg(feature = "precompile")] +fn emit_u8_slice(out: &mut alloc::string::String, name: &str, data: &[u8]) { + extern crate std; + use std::format; + out.push_str(&format!("pub static {name}: &[u8] = &[\n")); + for chunk in data.chunks(16) { + out.push_str(" "); + for v in chunk { out.push_str(&format!("0x{v:02x}, ")); } + out.push('\n'); + } + out.push_str("];\n\n"); +} + +// ── tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + + fn scalar(s: &str) -> Tree { Tree::Scalar(s.as_bytes().to_vec()) } + fn mapping(pairs: Vec<(&str, Tree)>) -> Tree { + Tree::Mapping(pairs.into_iter().map(|(k, v)| (k.as_bytes().to_vec(), v)).collect()) + } + + fn compile(tree: &Tree) -> (Vec, Vec, Vec, Vec, Vec) { + let (p, c, l, i, ii) = Dsl::compile(tree); + (p.into_vec(), c.into_vec(), l.into_vec(), i.into_vec(), ii.into_vec()) + } + + #[test] + fn test_single_leaf() { + let tree = mapping(vec![ + ("name", Tree::Null), + ]); + let (paths, _children, _leaves, _interning, _interning_idx) = compile(&tree); + assert_eq!(paths.len(), 1); + assert!(paths[0] & PATH_IS_LEAF_MASK != 0); + } + + #[test] + fn test_nested_field_keys() { + let tree = mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ("name", Tree::Null), + ])), + ]); + let (paths, children, _leaves, _interning, _interning_idx) = compile(&tree); + // user(0) + id(1) + name(2) + assert_eq!(paths.len(), 3); + // user is not a leaf (has children) + assert!(paths[0] & PATH_IS_LEAF_MASK == 0); + // id and name are leaves + assert!(paths[1] & PATH_IS_LEAF_MASK != 0); + assert!(paths[2] & PATH_IS_LEAF_MASK != 0); + // children slice has 2 entries + assert_eq!(children.len(), 2); + } + + #[test] + fn test_meta_key_excluded_from_paths() { + let tree = mapping(vec![ + ("user", mapping(vec![ + ("_load", mapping(vec![ + ("client", scalar("Memory")), + ("key", scalar("user:1")), + ])), + ("id", Tree::Null), + ])), + ]); + let (paths, _children, _leaves, _interning, _interning_idx) = compile(&tree); + // user(0) + id(1) — _load must not appear as a path + assert_eq!(paths.len(), 2); + } + + #[test] + fn test_load_stored_in_leaf() { + let tree = mapping(vec![ + ("user", mapping(vec![ + ("_load", mapping(vec![ + ("client", scalar("Memory")), + ("key", scalar("user:1")), + ])), + ("id", Tree::Null), + ])), + ]); + let (paths, _children, leaves, interning, interning_idx) = compile(&tree); + + // id is a leaf + let id_path = paths[1]; + assert!(id_path & PATH_IS_LEAF_MASK != 0); + let leaf_offset = ((id_path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; + + // keyword_idx(4) + value_idx(4) + load_client(4) + load_key(4) + store_client(4) + store_key(4) = 24 bytes + assert!(leaves.len() >= leaf_offset + 24); + + // load client_idx points to "Memory" + let load_client_idx = u32::from_le_bytes(leaves[leaf_offset+8..leaf_offset+12].try_into().unwrap()) as usize; + let offset = (interning_idx[load_client_idx] >> 32) as usize; + let len = (interning_idx[load_client_idx] & 0xffff_ffff) as usize; + assert_eq!(&interning[offset..offset+len], b"Memory"); + } + + #[test] + fn test_store_inheritance() { + let tree = mapping(vec![ + ("session", mapping(vec![ + ("_store", mapping(vec![ + ("client", scalar("Kvs")), + ("key", scalar("session:1")), + ])), + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])), + ]); + let (paths, _children, leaves, interning, interning_idx) = compile(&tree); + + // session(0), user(1), id(2) + let id_path = paths[2]; + assert!(id_path & PATH_IS_LEAF_MASK != 0); + let leaf_offset = ((id_path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; + + // store client_idx (offset 16) should point to "Kvs" + let store_client_idx = u32::from_le_bytes(leaves[leaf_offset+16..leaf_offset+20].try_into().unwrap()) as usize; + let offset = (interning_idx[store_client_idx] >> 32) as usize; + let len = (interning_idx[store_client_idx] & 0xffff_ffff) as usize; + assert_eq!(&interning[offset..offset+len], b"Kvs"); + } + + #[test] + fn test_intern_dedup() { + let tree = mapping(vec![ + ("a", scalar("hello")), + ("b", scalar("hello")), + ]); + let (_paths, _children, _leaves, _interning, interning_idx) = compile(&tree); + // "hello" should be interned only once + let hello_count = (0..interning_idx.len()).filter(|&i| { + let offset = (interning_idx[i] >> 32) as usize; + let len = (interning_idx[i] & 0xffff_ffff) as usize; + &_interning[offset..offset+len] == b"hello" + }).count(); + assert_eq!(hello_count, 1); + } + + #[cfg(feature = "precompile")] + #[test] + fn test_write_tenant_yml() { + extern crate std; + let src = std::include_bytes!("../examples/tenant.yml"); + let out = std::env::temp_dir().join("tenant_compiled.rs"); + std::fs::remove_file(&out).ok(); // idempotency + Dsl::write(src, out.to_str().unwrap()).expect("write failed"); + let content = std::fs::read_to_string(&out).expect("output not written"); + assert!(content.contains("pub static PATHS:")); + assert!(content.contains("pub static LEAVES:")); + assert!(content.contains("pub static INTERNING:")); + assert!(content.contains("// @generated")); + // output intentionally left for inspection + } +} diff --git a/src/lib.rs b/src/lib.rs index f9db508..894e9de 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,7 @@ extern crate alloc; #[cfg(feature = "precompile")] extern crate std; -pub(crate) mod error; +pub(crate) mod debug_log; pub mod ports; pub mod context; pub mod tree; diff --git a/src/tree.rs b/src/tree.rs index 2fd2028..bc73392 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -104,6 +104,7 @@ fn split_at(bytes: &[u8], n: usize) -> Option<(&[u8], &[u8])> { #[cfg(test)] mod tests { use super::*; + use alloc::vec; fn rt(v: &Tree) -> Tree { Tree::deserialize(&v.serialize()).unwrap() @@ -174,112 +175,3 @@ mod tests { } } -#[cfg(feature = "precompile")] -mod inner { - // precompile requires std: file I/O (std::fs::write) and UTF-8 parsing - extern crate std; - use crate::ports::provided::Tree as Data; - - pub struct Tree { - paths: Box<[u64]>, - children: Box<[u32]>, - leaves: Box<[u8]>, - interning: Box<[u8]>, - interning_idx: Box<[u64]>, - } - - impl Tree { - pub fn new( - paths: Box<[u64]>, - children: Box<[u32]>, - leaves: Box<[u8]>, - interning: Box<[u8]>, - interning_idx: Box<[u64]>, - ) -> Self { - Self { paths, children, leaves, interning, interning_idx } - } - - pub fn write(&self, path: &str) -> std::io::Result<()> { - let mut out = String::new(); - out.push_str("// @generated — do not edit by hand\n\n"); - push_u64_slice(&mut out, "PATHS", &self.paths); - push_u32_slice(&mut out, "CHILDREN", &self.children); - push_u8_slice (&mut out, "LEAVES", &self.leaves); - push_u8_slice (&mut out, "INTERNING", &self.interning); - push_u64_slice(&mut out, "INTERNING_IDX", &self.interning_idx); - std::fs::write(path, out) - } - - /// Parse a YAML byte slice into a `Tree` tree. - pub fn parse(src: &[u8]) -> Result { - let s = std::str::from_utf8(src) - .map_err(|e| format!("UTF-8 error: {e}"))?; - let yaml: serde_yaml_ng::Value = serde_yaml_ng::from_str(s) - .map_err(|e| format!("YAML parse error: {e}"))?; - Ok(yaml_to_tree(yaml)) - } - } - - fn yaml_to_tree(v: serde_yaml_ng::Value) -> Tree { - match v { - serde_yaml_ng::Value::Mapping(m) => Tree::Mapping( - m.into_iter() - .filter_map(|(k, v)| { - if let serde_yaml_ng::Value::String(s) = k { - Some((s.into_bytes(), yaml_to_tree(v))) - } else { - None - } - }) - .collect(), - ), - serde_yaml_ng::Value::Sequence(s) => { - Tree::Sequence(s.into_iter().map(yaml_to_tree).collect()) - } - serde_yaml_ng::Value::String(s) => Data::Scalar(s.into_bytes()), - serde_yaml_ng::Value::Number(n) => Data::Scalar(n.to_string().into_bytes()), - serde_yaml_ng::Value::Bool(b) => Tree::Scalar(b.to_string().into_bytes()), - serde_yaml_ng::Value::Null => Data::Null, - _ => Data::Null, - } - } - - fn push_u64_slice(out: &mut String, name: &str, data: &[u64]) { - out.push_str(&format!("pub static {name}: &[u64] = &[\n")); - for chunk in data.chunks(8) { - out.push_str(" "); - for v in chunk { - out.push_str(&format!("0x{v:016x}, ")); - } - out.push('\n'); - } - out.push_str("];\n\n"); - } - - fn push_u32_slice(out: &mut String, name: &str, data: &[u32]) { - out.push_str(&format!("pub static {name}: &[u32] = &[\n")); - for chunk in data.chunks(8) { - out.push_str(" "); - for v in chunk { - out.push_str(&format!("0x{v:08x}, ")); - } - out.push('\n'); - } - out.push_str("];\n\n"); - } - - fn push_u8_slice(out: &mut String, name: &str, data: &[u8]) { - out.push_str(&format!("pub static {name}: &[u8] = &[\n")); - for chunk in data.chunks(16) { - out.push_str(" "); - for v in chunk { - out.push_str(&format!("0x{v:02x}, ")); - } - out.push('\n'); - } - out.push_str("];\n\n"); - } -} - -#[cfg(feature = "precompile")] -pub use inner::Tree; From f4976cfebd274b10f68c6796ebe871f18c624558 Mon Sep 17 00:00:00 2001 From: Andyou Date: Tue, 7 Apr 2026 00:48:43 +0900 Subject: [PATCH 28/41] update quick start --- README.md | 33 +++++++++++++++++++++++++-------- examples/precompile.rs | 12 ++++++++++++ src/lib.rs | 3 ++- 3 files changed, 39 insertions(+), 9 deletions(-) create mode 100644 examples/precompile.rs diff --git a/README.md b/README.md index a04c16e..172df4c 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,7 @@ context-engine = "0.1" 1. Write a yaml file. ```yaml +# mine.yml session: user: id: @@ -69,20 +70,36 @@ session: | Trait | Description | Example | |-----------------|------------------------------------------|---------| -| `StoreClient` | `get()` `set()` `delete()` | [implements.rs](./examples/implements.rs) | -| `StoreRegistry` | maps YAML client names to `StoreClient`s | [implements.rs](./examples/implements.rs) | +| `StoreClient` | `get()` `set()` `delete()` | [DbClient](./examples/implements.rs) | +| `StoreRegistry` | maps YAML client names to `StoreClient`s | [MyRegistry](./examples/implements.rs) | -3. Initialize Context with your registry. +3. Precompile your yaml to a rs file. + +```bash +cargo run --example precompile --features precompile -- examples/mine.yml src/generated.rs +``` + +4. Initialize Context with your registry. ```rust -use context_engine::State; +use context_engine::{Context, Index}; +use std::sync::Arc; + +// Include the precompiled static data +include!("generated.rs"); -let stores = MyStores::new()?; +let index = Arc::new(Index::new( + Box::from(PATHS), + Box::from(CHILDREN), + Box::from(LEAVES), + Box::from(INTERNING), + Box::from(INTERNING_IDX), +)); -let mut state = State::new(stores); +let registry = MyRegistry::new(); +let mut context = Context::new(index, ®istry); -// Use context-engine -let user_name = state.get("session.user.name")?; +let user_name = context.get("session.user.name")?; ``` ## Architecture diff --git a/examples/precompile.rs b/examples/precompile.rs new file mode 100644 index 0000000..582ddf9 --- /dev/null +++ b/examples/precompile.rs @@ -0,0 +1,12 @@ +fn main() { + let args: std::vec::Vec = std::env::args().collect(); + if args.len() != 3 { + eprintln!("usage: precompile "); + std::process::exit(1); + } + let src = std::fs::read(&args[1]) + .unwrap_or_else(|e| { eprintln!("read error: {e}"); std::process::exit(1); }); + context_engine::dsl::Dsl::write(&src, &args[2]) + .unwrap_or_else(|e| { eprintln!("compile error: {e}"); std::process::exit(1); }); + println!("written: {}", args[2]); +} diff --git a/src/lib.rs b/src/lib.rs index 894e9de..67ade76 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,5 +21,6 @@ pub use ports::provided::{ pub use ports::required::{ StoreClient, StoreRegistry, - SetOutcome + SetOutcome, }; +pub use index::Index; From 0cdb6900c1914071981415627197d3610345cb0b Mon Sep 17 00:00:00 2001 From: Andyou Date: Tue, 7 Apr 2026 09:18:41 +0900 Subject: [PATCH 29/41] u --- src/context.rs | 12 +++-- src/dsl.rs | 113 +++++++++++++++++++++++-------------------- src/index.rs | 129 ++++++++++++++++++++++++++++++++++++++++++------- 3 files changed, 178 insertions(+), 76 deletions(-) diff --git a/src/context.rs b/src/context.rs index 87ed4f1..7025627 100644 --- a/src/context.rs +++ b/src/context.rs @@ -106,7 +106,7 @@ impl<'r> ContextTrait for Context<'r> { } let leaf = &leaves[0]; - let (yaml_name, args) = self.index.store_args(leaf.leaf_offset); + let (yaml_name, args) = self.index.store_args(leaf); let client = self.registry.client_for(yaml_name) .ok_or_else(|| ContextError::StoreFailed( StoreError::ClientNotFound(yaml_name.to_string()) @@ -138,7 +138,7 @@ impl<'r> ContextTrait for Context<'r> { } let leaf = &leaves[0]; - let (yaml_name, args) = self.index.store_args(leaf.leaf_offset); + let (yaml_name, args) = self.index.store_args(leaf); let client = self.registry.client_for(yaml_name) .ok_or_else(|| ContextError::StoreFailed( StoreError::ClientNotFound(yaml_name.to_string()) @@ -172,7 +172,7 @@ impl<'r> ContextTrait for Context<'r> { return Ok(!matches!(v, Tree::Null)); } - let (yaml_name, args) = self.index.store_args(leaf.leaf_offset); + let (yaml_name, args) = self.index.store_args(leaf); let Some(client) = self.registry.client_for(yaml_name) else { return Ok(false); }; @@ -201,8 +201,10 @@ impl<'r> Context<'r> { return Ok(Some(v.clone())); } + let leaf_ref = crate::index::LeafRef { path_idx, leaf_offset }; + // 2. _store - let (store_name, store_args) = self.index.store_args(leaf_offset); + let (store_name, store_args) = self.index.store_args(&leaf_ref); if !store_name.is_empty() { if let Some(client) = self.registry.client_for(store_name) { let key = store_args.get("key").and_then(|v| { @@ -221,7 +223,7 @@ impl<'r> Context<'r> { } // 3. _load - let (load_name, load_args) = self.index.load_args(leaf_offset); + let (load_name, load_args) = self.index.load_args(&leaf_ref); if load_name.is_empty() { return Ok(None); } diff --git a/src/dsl.rs b/src/dsl.rs index 78f69df..42bd90f 100644 --- a/src/dsl.rs +++ b/src/dsl.rs @@ -17,22 +17,24 @@ pub const PROP_MAP: &[u8] = b"map"; // ── path field layout (u64) ─────────────────────────────────────────────────── // -// | field | bits | -// |------------|------| -// | is_leaf | 1 | bit 63 -// | offset | 32 | bits 54..23 -// | count | 8 | bits 22..15 -// | | | is_leaf=0: [3:0]=子path数(1~16), [7:4]=unused -// | | | is_leaf=1: [7:4]=load_args count, [3:0]=store_args count (各最大15) -// | (reserved) | 23 | bits 14..0 - -pub const PATH_IS_LEAF_SHIFT: u64 = 63; -pub const PATH_OFFSET_SHIFT: u64 = 23; -pub const PATH_COUNT_SHIFT: u64 = 15; - -pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; -pub const PATH_OFFSET_MASK: u64 = 0xffff_ffff << PATH_OFFSET_SHIFT; -pub const PATH_COUNT_MASK: u64 = 0xff << PATH_COUNT_SHIFT; +// | field | bits | +// |-------------|------| +// | is_leaf | 1 | bit 63 +// | offset | 32 | bits 54..23 +// | count | 8 | bits 22..15 +// | | | is_leaf=0: [3:0]=子path数(1~16), [7:4]=unused +// | | | is_leaf=1: [7:4]=load_args count, [3:0]=store_args count (各最大15) +// | keyword_idx | 23 | bits 14..0 interning_idx of this node's keyword + +pub const PATH_IS_LEAF_SHIFT: u64 = 63; +pub const PATH_OFFSET_SHIFT: u64 = 23; +pub const PATH_COUNT_SHIFT: u64 = 15; +pub const PATH_KEYWORD_IDX_SHIFT: u64 = 0; + +pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; +pub const PATH_OFFSET_MASK: u64 = 0xffff_ffff << PATH_OFFSET_SHIFT; +pub const PATH_COUNT_MASK: u64 = 0xff << PATH_COUNT_SHIFT; +pub const PATH_KEYWORD_IDX_MASK: u64 = 0x7fff; // bits 14..0 // ── Dsl ─────────────────────────────────────────────────────────────────────── @@ -63,8 +65,25 @@ impl Dsl { Box<[u64]>, ) { let mut compiler = Compiler::new(); + // paths[0] = virtual root (keyword_idx=0 = empty string) + compiler.intern(b""); // interning[0] = "" + compiler.paths.push(0u64); // placeholder, filled after walking top-level if let Tree::Mapping(pairs) = tree { - compiler.walk_mapping(pairs, None, None); + let children_offset = compiler.children.len() as u32; + let mut child_count = 0u32; + for (k, v) in pairs { + if k.first() != Some(&b'_') { + let child_idx = compiler.paths.len() as u32; + compiler.walk_field_key(k, v, None, None); + compiler.children.push(child_idx); + child_count += 1; + } + } + let count_bits = (child_count as u64) & 0xf; + compiler.paths[0] = + (children_offset as u64) << PATH_OFFSET_SHIFT + | count_bits << PATH_COUNT_SHIFT + | 0u64; // keyword_idx=0 (empty) } compiler.finish() } @@ -127,21 +146,6 @@ impl Compiler { // ── walk ────────────────────────────────────────────────────────────────── - /// Walk a mapping's field_key entries, building path/children/leaves. - /// `inh_load` / `inh_store`: inherited MetaBlock from parent (carried down). - fn walk_mapping( - &mut self, - pairs: &[(Vec, Tree)], - inh_load: Option<&MetaBlock>, - inh_store: Option<&MetaBlock>, - ) { - for (k, v) in pairs { - if k.first() != Some(&b'_') { - self.walk_field_key(k, v, inh_load, inh_store); - } - } - } - /// Process a single field_key node. fn walk_field_key( &mut self, @@ -179,8 +183,9 @@ impl Compiler { } else { let count_bits = (child_count as u64) & 0xf; self.paths[path_idx as usize] = - (children_offset as u64) << PATH_OFFSET_SHIFT - | count_bits << PATH_COUNT_SHIFT; + (children_offset as u64) << PATH_OFFSET_SHIFT + | count_bits << PATH_COUNT_SHIFT + | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; } } // Scalar or Null → leaf with optional hardcoded value. @@ -306,8 +311,9 @@ impl Compiler { | ((store_args_count as u64) & 0xf); self.paths[path_idx as usize] = PATH_IS_LEAF_MASK - | (leaf_offset as u64) << PATH_OFFSET_SHIFT - | count << PATH_COUNT_SHIFT; + | (leaf_offset as u64) << PATH_OFFSET_SHIFT + | count << PATH_COUNT_SHIFT + | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; } // ── interning ───────────────────────────────────────────────────────────── @@ -461,8 +467,10 @@ mod tests { ("name", Tree::Null), ]); let (paths, _children, _leaves, _interning, _interning_idx) = compile(&tree); - assert_eq!(paths.len(), 1); - assert!(paths[0] & PATH_IS_LEAF_MASK != 0); + // root(0) + name(1) + assert_eq!(paths.len(), 2); + assert!(paths[0] & PATH_IS_LEAF_MASK == 0); // root is not a leaf + assert!(paths[1] & PATH_IS_LEAF_MASK != 0); } #[test] @@ -474,15 +482,14 @@ mod tests { ])), ]); let (paths, children, _leaves, _interning, _interning_idx) = compile(&tree); - // user(0) + id(1) + name(2) - assert_eq!(paths.len(), 3); - // user is not a leaf (has children) - assert!(paths[0] & PATH_IS_LEAF_MASK == 0); - // id and name are leaves - assert!(paths[1] & PATH_IS_LEAF_MASK != 0); - assert!(paths[2] & PATH_IS_LEAF_MASK != 0); - // children slice has 2 entries - assert_eq!(children.len(), 2); + // root(0) + user(1) + id(2) + name(3) + assert_eq!(paths.len(), 4); + assert!(paths[0] & PATH_IS_LEAF_MASK == 0); // root + assert!(paths[1] & PATH_IS_LEAF_MASK == 0); // user is not a leaf + assert!(paths[2] & PATH_IS_LEAF_MASK != 0); // id + assert!(paths[3] & PATH_IS_LEAF_MASK != 0); // name + // root→user(1 child) + user→id,name(2 children) = 3 entries + assert_eq!(children.len(), 3); } #[test] @@ -497,8 +504,8 @@ mod tests { ])), ]); let (paths, _children, _leaves, _interning, _interning_idx) = compile(&tree); - // user(0) + id(1) — _load must not appear as a path - assert_eq!(paths.len(), 2); + // root(0) + user(1) + id(2) — _load must not appear as a path + assert_eq!(paths.len(), 3); } #[test] @@ -514,8 +521,8 @@ mod tests { ]); let (paths, _children, leaves, interning, interning_idx) = compile(&tree); - // id is a leaf - let id_path = paths[1]; + // root(0), user(1), id(2) + let id_path = paths[2]; assert!(id_path & PATH_IS_LEAF_MASK != 0); let leaf_offset = ((id_path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; @@ -544,8 +551,8 @@ mod tests { ]); let (paths, _children, leaves, interning, interning_idx) = compile(&tree); - // session(0), user(1), id(2) - let id_path = paths[2]; + // root(0), session(1), user(2), id(3) + let id_path = paths[3]; assert!(id_path & PATH_IS_LEAF_MASK != 0); let leaf_offset = ((id_path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; diff --git a/src/index.rs b/src/index.rs index 96f4ccc..0cbba71 100644 --- a/src/index.rs +++ b/src/index.rs @@ -2,9 +2,13 @@ use alloc::boxed::Box; use alloc::collections::BTreeMap; use alloc::string::String; use alloc::vec::Vec; +use core::str::from_utf8; use crate::dsl::{ - PATH_IS_LEAF_MASK, PATH_OFFSET_SHIFT, PATH_OFFSET_MASK, PATH_COUNT_SHIFT, PATH_COUNT_MASK, + PATH_IS_LEAF_MASK, + PATH_OFFSET_SHIFT, PATH_OFFSET_MASK, + PATH_COUNT_SHIFT, PATH_COUNT_MASK, + PATH_KEYWORD_IDX_MASK, }; use crate::ports::provided::Tree; @@ -47,18 +51,43 @@ impl Index { result.into_boxed_slice() } - /// Walk the interning list to find the path_idx matching the dot-separated `path`. + /// Resolve the keyword bytes of a path node from the interning list. + pub fn keyword_of(&self, path_idx: u32) -> &[u8] { + let path = self.paths[path_idx as usize]; + let interning_idx = (path & PATH_KEYWORD_IDX_MASK) as usize; + self.interning_str(interning_idx) + } + + /// Extract _load client yaml_name and args for the given leaf. + /// Returns ("", empty) if no _load is configured. + pub fn load_args(&self, leaf: &LeafRef) -> (&str, BTreeMap) { + self.decode_meta(leaf.path_idx, leaf.leaf_offset, MetaKind::Load) + } + + /// Extract _store client yaml_name and args for the given leaf. + /// Returns ("", empty) if no _store is configured. + pub fn store_args(&self, leaf: &LeafRef) -> (&str, BTreeMap) { + self.decode_meta(leaf.path_idx, leaf.leaf_offset, MetaKind::Store) + } +} + +// ── private ─────────────────────────────────────────────────────────────────── + +enum MetaKind { Load, Store } + +impl Index { + /// Walk dot-separated `path` from the virtual root (paths[0]). fn find(&self, path: &str) -> Option { - let mut current: u32 = 0; // root - for keyword in path.split('.') { - current = self.find_child(current, keyword.as_bytes())?; + let mut current: u32 = 0; // paths[0] = virtual root + for segment in path.split('.') { + current = self.find_child(current, segment.as_bytes())?; } Some(current) } - /// Among the children of `path_idx`, find the one whose interning keyword matches `keyword`. + /// Among the children of `path_idx`, find the one whose keyword matches. fn find_child(&self, path_idx: u32, keyword: &[u8]) -> Option { - let path = self.paths[path_idx as usize]; + let path = self.paths[path_idx as usize]; let offset = ((path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; let count = (((path & PATH_COUNT_MASK) >> PATH_COUNT_SHIFT) & 0xf) as usize; @@ -86,20 +115,84 @@ impl Index { } } - /// Resolve the keyword bytes of a path node from the interning list. - pub fn keyword_of(&self, path_idx: u32) -> &[u8] { - todo!("resolve keyword from interning via path_idx") + /// Decode _load or _store from `leaves` at `leaf_offset`. + /// + /// Leaf layout (u32le each): + /// +0 keyword_idx + /// +4 value_idx + /// +8 load_client_idx + /// +12 load_key_idx + /// +16 store_client_idx + /// +20 store_key_idx + /// +24 load.args × load_args_count : key_idx | value_idx + /// +24+N store.args × store_args_count : key_idx | value_idx + /// + /// args counts are in path.count: [7:4]=load, [3:0]=store + fn decode_meta(&self, path_idx: u32, leaf_offset: u32, kind: MetaKind) -> (&str, BTreeMap) { + let base = leaf_offset as usize; + let empty = BTreeMap::new(); + + if path_idx as usize >= self.paths.len() { return ("", empty); } + let path_entry = self.paths[path_idx as usize]; + + let count_byte = ((path_entry & PATH_COUNT_MASK) >> PATH_COUNT_SHIFT) as u8; + let load_count = ((count_byte >> 4) & 0xf) as usize; + let store_count = (count_byte & 0xf) as usize; + + let (client_offset, key_offset, args_count, args_start) = match kind { + MetaKind::Load => (8, 12, load_count, 24), + MetaKind::Store => (16, 20, store_count, 24 + load_count * 8), + }; + + let client_idx = self.read_u32(base + client_offset) as usize; + let key_idx = self.read_u32(base + key_offset) as usize; + + let client_name = from_utf8(self.interning_str(client_idx)).unwrap_or(""); + if client_name.is_empty() { + return ("", empty); + } + + let mut args: BTreeMap = BTreeMap::new(); + + // key arg + let key_str = from_utf8(self.interning_str(key_idx)).unwrap_or(""); + if !key_str.is_empty() { + args.insert( + String::from("key"), + Tree::Scalar(key_str.as_bytes().to_vec()), + ); + } + + // additional args + for i in 0..args_count { + let off = base + args_start + i * 8; + let ak = self.read_u32(off) as usize; + let av = self.read_u32(off + 4) as usize; + let k = from_utf8(self.interning_str(ak)).unwrap_or(""); + let v = self.interning_str(av); + if !k.is_empty() { + args.insert( + String::from(k), + Tree::Scalar(v.to_vec()), + ); + } + } + + (client_name, args) } - /// Extract _load client yaml_name and args from leaves at `leaf_offset`. - /// Returns ("", empty) if no _load is configured. - pub fn load_args(&self, leaf_offset: u32) -> (&str, BTreeMap) { - todo!("decode _load from leaves[leaf_offset..]") + /// Read a u32le from `leaves` at byte offset `off`. + fn read_u32(&self, off: usize) -> u32 { + let b = &self.leaves[off..off + 4]; + u32::from_le_bytes(b.try_into().unwrap()) } - /// Extract _store client yaml_name and args from leaves at `leaf_offset`. - /// Returns ("", empty) if no _store is configured. - pub fn store_args(&self, leaf_offset: u32) -> (&str, BTreeMap) { - todo!("decode _store from leaves[leaf_offset..]") + /// Resolve interning bytes by interning_idx index. + fn interning_str(&self, idx: usize) -> &[u8] { + if idx >= self.interning_idx.len() { return b""; } + let entry = self.interning_idx[idx]; + let offset = (entry >> 32) as usize; + let len = (entry & 0xffff_ffff) as usize; + self.interning.get(offset..offset + len).unwrap_or(b"") } } From c53687e49b7e86706847a348a9ce82efb0231a0b Mon Sep 17 00:00:00 2001 From: Andyou Date: Tue, 7 Apr 2026 09:41:46 +0900 Subject: [PATCH 30/41] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 172df4c..2890d74 100644 --- a/README.md +++ b/README.md @@ -182,7 +182,7 @@ computer: "Network-capable nodes in the system." ## Original Text (ja) -webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。context-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このライブラリは、[## background](#background)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 +webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。context-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このライブラリは、[### 背景](#背景)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 ### 背景 From 127f7f9b169dcb082afc0d29cfb9835a8a61b26a Mon Sep 17 00:00:00 2001 From: Qebju <207797756+Qebju007@users.noreply.github.com> Date: Tue, 7 Apr 2026 15:12:39 +0900 Subject: [PATCH 31/41] u md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2890d74..5f9134d 100644 --- a/README.md +++ b/README.md @@ -161,7 +161,8 @@ cargo test --features=logging -- --nocapture ## License -Apache-2.0 +SPDX-License-Identifier: Apache-2.0 +Copyright (c) 2026 Andyou ## Background From 083377bdaf2d8497a5d19f583be417a05bd2b265 Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 00:38:16 +0900 Subject: [PATCH 32/41] update --- README.md | 3 ++- docs/Architecture.md | 54 +++++++++++++++++++++--------------------- examples/implements.rs | 2 +- examples/precompile.rs | 9 +++---- src/lib.rs | 1 - src/tree.rs | 14 +++++------ 6 files changed, 42 insertions(+), 41 deletions(-) diff --git a/README.md b/README.md index 5f9134d..660f5ab 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,8 @@ session: 3. Precompile your yaml to a rs file. ```bash -cargo run --example precompile --features precompile -- examples/mine.yml src/generated.rs +cargo run --example precompile --features precompile -- examples/mine.yml src/dsl_compiled.rs +# -- ``` 4. Initialize Context with your registry. diff --git a/docs/Architecture.md b/docs/Architecture.md index 010d364..626bfbd 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -11,33 +11,33 @@ - traversal: 上記データ群を保持し、トラバーサルによってメモリ位置群を取得する - addressing & operation: Manifestに対応した1層mapを保持し、アプリケーションからの呼び出しに応じて値の操作を行う。リクエスト処理スコープインスタンス。 -## モジュール構成 - -### 実体部 - -| Mod | Description | Ports | Filename | -|-----|-------------|-------|----------| -| tree | `enum Tree` の wire format serialize / deserialize | serialize, deserialize | tree.rs | -| Dsl | `Tree` のDSLを読み込み、固定長メモリ位置群のトラバーサルに落とし込むための静的データ群を生成する。`feature=precompile` 時は `Dsl::write()` でYAML→静的Rustファイル出力 | compile, write(precompile) | dsl.rs | -| Index | `Dsl::compile` の出力を保持し、トラバーサルによってleaf参照群を取得する | new, traverse | index.rs | -| Context | コンテクストデータの操作を行うリクエスト処理スコープの実行インスタンス | new, get, set, delete, exists | context.rs | - -* Portsはpub fnのこと -* new()であっても、引数はVec等の標準型依存を明示するべき。construct状態は避ける - -### Portモジュール - -| Mod | Description | Filename | -|-----|-------------|----------| -| Context | Contextのtrait、Tree型、各Error型 | ports/provided.rs | -| StoreClient | 単一ストアのadapter trait | ports/required.rs | -| StoreRegistry | client名称→StoreClientのdispatch trait | ports/required.rs | - -### 開発用モジュール - -| Mod | Description | Filename | -|-----|-------------|----------| -| debug_log | `feature=logging` 限定のデバッグログマクロ・ユーティリティ | debug_log.rs | +## ポート構成 + +| Port | Module | Signature | Description | Filename | +|------|--------|-----------|-------------|----------| +| `debug_log!` | debug_log | `(class, fn $(, arg)*)` | `feature=logging` ログマクロ | debug_log.rs | +| `Tree` | provided | — | n次元scalar map型 | provided.rs | +| `wire` | Tree | `(&self) -> Vec` | Treeをワイヤフォーマットに変換 | tree.rs | +| `dewire` | Tree | `(bytes: &[u8]) -> Option` | ワイヤフォーマットからTreeへ変換 | tree.rs | +| `compile` | Dsl | `(tree: &Tree) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | Treeから静的list(paths/children/leaves/interning/interning_idx) を生成 | dsl.rs | +| `write` | Dsl | `(src: &[u8], out_path: &str) -> Result<(), String>` | YAML→.rs出力[precompile] | dsl.rs | +| `Index::new` | Index | `(paths, children, leaves, interning, interning_idx) -> Index` | `Dsl::compile` 出力を保持するIndexを構築 | index.rs | +| `Index::traverse` | Index | `(&self, path: &str) -> Box<[LeafRef]>` | ドット区切りパスを辿り、leaf参照リストを返す | index.rs | +| `Index::keyword_of` | Index | `(&self, path_idx: u32) -> &[u8]` | path_idxに対応するkeywordバイト列を返す | index.rs | +| `Index::load_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_load` client名とargsを返す | index.rs | +| `Index::store_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_store` client名とargsを返す | index.rs | +| `Context::new` | Context | `(index: Arc, registry: &'r dyn StoreRegistry) -> Context` | リクエストスコープのContextインスタンスを構築 | context.rs | +| `Context::get` | Context | `(&mut self, key: &str) -> Result, ContextError>` | cache→_store→_load の順で値を解決 | context.rs | +| `Context::set` | Context | `(&mut self, key: &str, value: Tree) -> Result` | _storeに値を書き込み、cacheを更新 | context.rs | +| `Context::delete` | Context | `(&mut self, key: &str) -> Result` | _storeから値を削除し、cacheを無効化 | context.rs | +| `Context::exists` | Context | `(&mut self, key: &str) -> Result` | cache or _storeに値が存在するか確認(_loadは起動しない) | context.rs | +| `ParseError` | provided | — | DSL解析エラー | provided.rs | +| `LoadError` | provided | — | _loadクライアント呼び出しエラー | provided.rs | +| `StoreError` | provided | — | _storeクライアント呼び出しエラー | provided.rs | +| `ContextError` | provided | — | Context操作の最上位エラー | provided.rs | +| `StoreClient` | required | `get / set / delete` | 単一ストアのadapter trait。利用者がimplする | required.rs | +| `StoreRegistry` | required | `client_for(&str) -> Option<&dyn StoreClient>` | yaml_name→StoreClientのdispatch trait。利用者がimplする | required.rs | +| `SetOutcome` | required | — | `StoreClient::set` の戻り値 | required.rs | ## 用語 diff --git a/examples/implements.rs b/examples/implements.rs index 404363e..d261cff 100644 --- a/examples/implements.rs +++ b/examples/implements.rs @@ -54,7 +54,7 @@ impl KvsClient { impl StoreClient for KvsClient { fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { let bytes = self.data.lock().unwrap().get(key).cloned()?; - // In real impl: deserialize wire bytes → Tree + // In real impl: unwire bytes → Tree Some(bytes) } fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { diff --git a/examples/precompile.rs b/examples/precompile.rs index 582ddf9..e9666de 100644 --- a/examples/precompile.rs +++ b/examples/precompile.rs @@ -1,12 +1,13 @@ fn main() { let args: std::vec::Vec = std::env::args().collect(); - if args.len() != 3 { - eprintln!("usage: precompile "); + if args.len() < 2 { + eprintln!("usage: precompile [output.rs]"); std::process::exit(1); } + let out = if args.len() >= 3 { args[2].clone() } else { "src/dsl_compiled.rs".to_string() }; let src = std::fs::read(&args[1]) .unwrap_or_else(|e| { eprintln!("read error: {e}"); std::process::exit(1); }); - context_engine::dsl::Dsl::write(&src, &args[2]) + context_engine::dsl::Dsl::write(&src, &out) .unwrap_or_else(|e| { eprintln!("compile error: {e}"); std::process::exit(1); }); - println!("written: {}", args[2]); + println!("written: {}", out); } diff --git a/src/lib.rs b/src/lib.rs index 67ade76..0eb9368 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,7 +2,6 @@ extern crate core; extern crate alloc; -// precompile feature requires std (file I/O, serde) #[cfg(feature = "precompile")] extern crate std; diff --git a/src/tree.rs b/src/tree.rs index bc73392..2ad7c23 100644 --- a/src/tree.rs +++ b/src/tree.rs @@ -15,13 +15,13 @@ const TAG_SEQUENCE: u8 = 0x02; const TAG_MAPPING: u8 = 0x03; impl Tree { - pub fn serialize(&self) -> Vec { + pub fn wire(&self) -> Vec { let mut buf = Vec::new(); write_value(self, &mut buf); buf } - pub fn deserialize(bytes: &[u8]) -> Option { + pub fn unwire(bytes: &[u8]) -> Option { let (value, _) = read_value(bytes)?; Some(value) } @@ -107,7 +107,7 @@ mod tests { use alloc::vec; fn rt(v: &Tree) -> Tree { - Tree::deserialize(&v.serialize()).unwrap() + Tree::unwire(&v.wire()).unwrap() } #[test] @@ -160,9 +160,9 @@ mod tests { } #[test] - fn test_deserialize_invalid_returns_none() { - assert_eq!(Tree::deserialize(&[0xFF]), None); - assert_eq!(Tree::deserialize(&[TAG_SCALAR, 0x05, 0x00, 0x00, 0x00]), None); + fn test_unwire_invalid_returns_none() { + assert_eq!(Tree::unwire(&[0xFF]), None); + assert_eq!(Tree::unwire(&[TAG_SCALAR, 0x05, 0x00, 0x00, 0x00]), None); } #[test] @@ -171,7 +171,7 @@ mod tests { (b"id".to_vec(), Tree::Scalar(b"1".to_vec())), (b"deleted_at".to_vec(), Tree::Null), ]); - assert_eq!(Tree::deserialize(&v.serialize()).unwrap(), v); + assert_eq!(Tree::unwire(&v.wire()).unwrap(), v); } } From 3e3d21f2c02e5a378e4e881bd1d9d4fa18a21e5f Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 06:47:14 +0900 Subject: [PATCH 33/41] rename yaml_name to keyword --- docs/Architecture.md | 48 +++++++++++++++++++++++------------------- docs/Dsl_guide.md | 2 +- examples/implements.rs | 4 ++-- src/context.rs | 16 +++++++------- src/dsl.rs | 2 +- src/index.rs | 4 ++-- src/ports/provided.rs | 4 ++-- src/ports/required.rs | 4 ++-- 8 files changed, 44 insertions(+), 40 deletions(-) diff --git a/docs/Architecture.md b/docs/Architecture.md index 626bfbd..1b238ab 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -15,29 +15,33 @@ | Port | Module | Signature | Description | Filename | |------|--------|-----------|-------------|----------| -| `debug_log!` | debug_log | `(class, fn $(, arg)*)` | `feature=logging` ログマクロ | debug_log.rs | -| `Tree` | provided | — | n次元scalar map型 | provided.rs | +| `debug_log!` | - | `(class, fn $(, arg)*)` | `feature=logging` ログマクロ | debug_log.rs | +| `Tree` | - | - | n次元scalar map型 | provided.rs | | `wire` | Tree | `(&self) -> Vec` | Treeをワイヤフォーマットに変換 | tree.rs | | `dewire` | Tree | `(bytes: &[u8]) -> Option` | ワイヤフォーマットからTreeへ変換 | tree.rs | -| `compile` | Dsl | `(tree: &Tree) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | Treeから静的list(paths/children/leaves/interning/interning_idx) を生成 | dsl.rs | +| `compile` | Dsl | `(tree: &Tree) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | Treeから静的list(paths/children/leaves/interning/interning_idx) を返す | dsl.rs | | `write` | Dsl | `(src: &[u8], out_path: &str) -> Result<(), String>` | YAML→.rs出力[precompile] | dsl.rs | -| `Index::new` | Index | `(paths, children, leaves, interning, interning_idx) -> Index` | `Dsl::compile` 出力を保持するIndexを構築 | index.rs | -| `Index::traverse` | Index | `(&self, path: &str) -> Box<[LeafRef]>` | ドット区切りパスを辿り、leaf参照リストを返す | index.rs | -| `Index::keyword_of` | Index | `(&self, path_idx: u32) -> &[u8]` | path_idxに対応するkeywordバイト列を返す | index.rs | -| `Index::load_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_load` client名とargsを返す | index.rs | -| `Index::store_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_store` client名とargsを返す | index.rs | -| `Context::new` | Context | `(index: Arc, registry: &'r dyn StoreRegistry) -> Context` | リクエストスコープのContextインスタンスを構築 | context.rs | -| `Context::get` | Context | `(&mut self, key: &str) -> Result, ContextError>` | cache→_store→_load の順で値を解決 | context.rs | -| `Context::set` | Context | `(&mut self, key: &str, value: Tree) -> Result` | _storeに値を書き込み、cacheを更新 | context.rs | -| `Context::delete` | Context | `(&mut self, key: &str) -> Result` | _storeから値を削除し、cacheを無効化 | context.rs | -| `Context::exists` | Context | `(&mut self, key: &str) -> Result` | cache or _storeに値が存在するか確認(_loadは起動しない) | context.rs | -| `ParseError` | provided | — | DSL解析エラー | provided.rs | -| `LoadError` | provided | — | _loadクライアント呼び出しエラー | provided.rs | -| `StoreError` | provided | — | _storeクライアント呼び出しエラー | provided.rs | -| `ContextError` | provided | — | Context操作の最上位エラー | provided.rs | -| `StoreClient` | required | `get / set / delete` | 単一ストアのadapter trait。利用者がimplする | required.rs | -| `StoreRegistry` | required | `client_for(&str) -> Option<&dyn StoreClient>` | yaml_name→StoreClientのdispatch trait。利用者がimplする | required.rs | -| `SetOutcome` | required | — | `StoreClient::set` の戻り値 | required.rs | +| `new` | Index | `(paths, children, leaves, interning, interning_idx) -> Index` | compile済みdslからIndex構築 | index.rs | +| `traverse` | Index | `(&self, path: &str) -> Box<[LeafRef]>` | パス文字列からleaf参照リストを返す | index.rs | +| `keyword_of` | Index | `(&self, path_idx: u32) -> &[u8]` | path_idxからkeywordバイト列を返す | index.rs | +| `load_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_load` client名とargsを返す | index.rs | +| `store_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_store` client名とargsを返す | index.rs | +| `new` | Context | `(index: Arc, registry: &'r dyn StoreRegistry) -> Context` | IndexとStoreRegistoryからContextを構築 | context.rs | +| `get` | Context | `(&mut self, key: &str) -> Result, ContextError>` | パス文字列から値(cache→_store→_load)を取得して返す | context.rs | +| `set` | Context | `(&mut self, key: &str, value: Tree) -> Result` | 値から_storeに書き込み、cacheも更新 | context.rs | +| `delete` | Context | `(&mut self, key: &str) -> Result` | パスから_storeの値を削除し、cacheもnullで更新 | context.rs | +| `exists` | Context | `(&mut self, key: &str) -> Result` | パスからcacheか_storeに値が存在するか確認し、cacheを更新 | context.rs | +| `ParseError` | - | - | ※エラー | provided.rs | +| `LoadError` | - | - | _loadクライアント呼び出しエラー | provided.rs | +| `StoreError` | - | - | _storeクライアント呼び出しエラー | provided.rs | +| `ContextError` | - | - | Contextの出力するエラー | provided.rs | +| `SetOutcome` | - | - | `StoreClient::set`が返すCreatedとUpdated | required.rs | +| `get` | StoreClient | `&self, key: &str, args: &BTreeMap<&str, Tree> -> Option` | keyとdsl記載のmapから値をlistで返す | required.rs | +| `set` | StoreClient | `&self, key: &str, args: &BTreeMap<&str, Tree> -> Option` | keyとdsl記載のマップから値を保存しSetOutcomeを返す | required.rs | +| `delete` | StoreClient | `&self, key: &str, args: &BTreeMap<&str, Tree> -> bool` | keyとdsl記載のマップから値を削除し成否を返す | required.rs | +| `client_for` | StoreRegistry | `&self, keyword: &str -> Option<&dyn StoreClient>` | StoreClientのkeywordからStoreClientを返す | required.rs | + +※ dslコンパイル時検出エラーとしたいがTreeパースエラーが混在 ## 用語 @@ -57,7 +61,7 @@ template: placeholderと静的な文字列を混合した動的生成文 called_path: Context.get()等に渡されるパス文字列 ``` -## mod:fn詳細仕様 +## モジュール仕様 ### StoreClient @@ -81,7 +85,7 @@ YAMLの`client:`名称とStoreClientの対応を管理するtrait。利用者が ```rust pub trait StoreRegistry { - fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient>; + fn client_for(&self, keyword: &str) -> Option<&dyn StoreClient>; } ``` diff --git a/docs/Dsl_guide.md b/docs/Dsl_guide.md index 7e1181b..adb20a5 100644 --- a/docs/Dsl_guide.md +++ b/docs/Dsl_guide.md @@ -68,7 +68,7 @@ user: | `_load` | meta_key | load source definition | | `_store` | meta_key | store destination definition | | `_state` | meta_key | reserved | -| `client` | _load / _store prop | StoreRegistry yaml_name | +| `client` | _load / _store prop | StoreRegistry keyword | | `key` | _load / _store prop | reserved arg passed to StoreClient | | `map` | _load / _store prop | field mapping definition | diff --git a/examples/implements.rs b/examples/implements.rs index d261cff..efe934f 100644 --- a/examples/implements.rs +++ b/examples/implements.rs @@ -176,8 +176,8 @@ impl MyRegistry { } impl StoreRegistry for MyRegistry { - fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient> { - match yaml_name { + fn client_for(&self, keyword: &str) -> Option<&dyn StoreClient> { + match keyword { "Memory" => Some(self.memory.as_ref()), "Kvs" => Some(self.kvs.as_ref()), "Env" => Some(self.env.as_ref()), diff --git a/src/context.rs b/src/context.rs index 7025627..af460c7 100644 --- a/src/context.rs +++ b/src/context.rs @@ -106,10 +106,10 @@ impl<'r> ContextTrait for Context<'r> { } let leaf = &leaves[0]; - let (yaml_name, args) = self.index.store_args(leaf); - let client = self.registry.client_for(yaml_name) + let (keyword, args) = self.index.store_args(leaf); + let client = self.registry.client_for(keyword) .ok_or_else(|| ContextError::StoreFailed( - StoreError::ClientNotFound(yaml_name.to_string()) + StoreError::ClientNotFound(keyword.to_string()) ))?; let store_key = args.get("key").and_then(|v| { @@ -138,10 +138,10 @@ impl<'r> ContextTrait for Context<'r> { } let leaf = &leaves[0]; - let (yaml_name, args) = self.index.store_args(leaf); - let client = self.registry.client_for(yaml_name) + let (keyword, args) = self.index.store_args(leaf); + let client = self.registry.client_for(keyword) .ok_or_else(|| ContextError::StoreFailed( - StoreError::ClientNotFound(yaml_name.to_string()) + StoreError::ClientNotFound(keyword.to_string()) ))?; let store_key = args.get("key").and_then(|v| { @@ -172,8 +172,8 @@ impl<'r> ContextTrait for Context<'r> { return Ok(!matches!(v, Tree::Null)); } - let (yaml_name, args) = self.index.store_args(leaf); - let Some(client) = self.registry.client_for(yaml_name) else { + let (keyword, args) = self.index.store_args(leaf); + let Some(client) = self.registry.client_for(keyword) else { return Ok(false); }; diff --git a/src/dsl.rs b/src/dsl.rs index 42bd90f..dde849b 100644 --- a/src/dsl.rs +++ b/src/dsl.rs @@ -118,7 +118,7 @@ impl Dsl { #[derive(Clone)] struct MetaBlock { - client_idx: u32, // interning_idx of client yaml_name + client_idx: u32, // interning_idx of client keyword key_idx: u32, // interning_idx of key value args: Vec<(u32, u32)>, // (key_interning_idx, value_interning_idx) } diff --git a/src/index.rs b/src/index.rs index 0cbba71..b3a0021 100644 --- a/src/index.rs +++ b/src/index.rs @@ -58,13 +58,13 @@ impl Index { self.interning_str(interning_idx) } - /// Extract _load client yaml_name and args for the given leaf. + /// Extract _load client keyword and args for the given leaf. /// Returns ("", empty) if no _load is configured. pub fn load_args(&self, leaf: &LeafRef) -> (&str, BTreeMap) { self.decode_meta(leaf.path_idx, leaf.leaf_offset, MetaKind::Load) } - /// Extract _store client yaml_name and args for the given leaf. + /// Extract _store client keyword and args for the given leaf. /// Returns ("", empty) if no _store is configured. pub fn store_args(&self, leaf: &LeafRef) -> (&str, BTreeMap) { self.decode_meta(leaf.path_idx, leaf.leaf_offset, MetaKind::Store) diff --git a/src/ports/provided.rs b/src/ports/provided.rs index 165cb76..7721c18 100644 --- a/src/ports/provided.rs +++ b/src/ports/provided.rs @@ -47,7 +47,7 @@ impl fmt::Display for ParseError { #[derive(Debug, PartialEq)] pub enum LoadError { - /// StoreRegistry::client_for() returned None for the given yaml_name. + /// StoreRegistry::client_for() returned None for the given keyword. ClientNotFound(String), /// A required config key is missing in the manifest. ConfigMissing(String), @@ -70,7 +70,7 @@ impl fmt::Display for LoadError { #[derive(Debug, PartialEq)] pub enum StoreError { - /// StoreRegistry::client_for() returned None for the given yaml_name. + /// StoreRegistry::client_for() returned None for the given keyword. ClientNotFound(String), /// A required config key is missing in the manifest. ConfigMissing(String), diff --git a/src/ports/required.rs b/src/ports/required.rs index c175328..a23e7ac 100644 --- a/src/ports/required.rs +++ b/src/ports/required.rs @@ -19,7 +19,7 @@ pub trait StoreClient: Send + Sync { fn delete(&self, key: &str, args: &BTreeMap<&str, Tree>) -> bool; } -/// Dispatches yaml_name → StoreClient. Implemented by the library user. +/// Dispatches keyword → StoreClient. Implemented by the library user. pub trait StoreRegistry { - fn client_for(&self, yaml_name: &str) -> Option<&dyn StoreClient>; + fn client_for(&self, keyword: &str) -> Option<&dyn StoreClient>; } From a17b97877ebdfdc3ba3871adb3919df787a8a1da Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 07:01:19 +0900 Subject: [PATCH 34/41] update md --- docs/Architecture.md | 55 ++++++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/docs/Architecture.md b/docs/Architecture.md index 1b238ab..65f147e 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -13,33 +13,34 @@ ## ポート構成 -| Port | Module | Signature | Description | Filename | -|------|--------|-----------|-------------|----------| -| `debug_log!` | - | `(class, fn $(, arg)*)` | `feature=logging` ログマクロ | debug_log.rs | -| `Tree` | - | - | n次元scalar map型 | provided.rs | -| `wire` | Tree | `(&self) -> Vec` | Treeをワイヤフォーマットに変換 | tree.rs | -| `dewire` | Tree | `(bytes: &[u8]) -> Option` | ワイヤフォーマットからTreeへ変換 | tree.rs | -| `compile` | Dsl | `(tree: &Tree) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | Treeから静的list(paths/children/leaves/interning/interning_idx) を返す | dsl.rs | -| `write` | Dsl | `(src: &[u8], out_path: &str) -> Result<(), String>` | YAML→.rs出力[precompile] | dsl.rs | -| `new` | Index | `(paths, children, leaves, interning, interning_idx) -> Index` | compile済みdslからIndex構築 | index.rs | -| `traverse` | Index | `(&self, path: &str) -> Box<[LeafRef]>` | パス文字列からleaf参照リストを返す | index.rs | -| `keyword_of` | Index | `(&self, path_idx: u32) -> &[u8]` | path_idxからkeywordバイト列を返す | index.rs | -| `load_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_load` client名とargsを返す | index.rs | -| `store_args` | Index | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_store` client名とargsを返す | index.rs | -| `new` | Context | `(index: Arc, registry: &'r dyn StoreRegistry) -> Context` | IndexとStoreRegistoryからContextを構築 | context.rs | -| `get` | Context | `(&mut self, key: &str) -> Result, ContextError>` | パス文字列から値(cache→_store→_load)を取得して返す | context.rs | -| `set` | Context | `(&mut self, key: &str, value: Tree) -> Result` | 値から_storeに書き込み、cacheも更新 | context.rs | -| `delete` | Context | `(&mut self, key: &str) -> Result` | パスから_storeの値を削除し、cacheもnullで更新 | context.rs | -| `exists` | Context | `(&mut self, key: &str) -> Result` | パスからcacheか_storeに値が存在するか確認し、cacheを更新 | context.rs | -| `ParseError` | - | - | ※エラー | provided.rs | -| `LoadError` | - | - | _loadクライアント呼び出しエラー | provided.rs | -| `StoreError` | - | - | _storeクライアント呼び出しエラー | provided.rs | -| `ContextError` | - | - | Contextの出力するエラー | provided.rs | -| `SetOutcome` | - | - | `StoreClient::set`が返すCreatedとUpdated | required.rs | -| `get` | StoreClient | `&self, key: &str, args: &BTreeMap<&str, Tree> -> Option` | keyとdsl記載のmapから値をlistで返す | required.rs | -| `set` | StoreClient | `&self, key: &str, args: &BTreeMap<&str, Tree> -> Option` | keyとdsl記載のマップから値を保存しSetOutcomeを返す | required.rs | -| `delete` | StoreClient | `&self, key: &str, args: &BTreeMap<&str, Tree> -> bool` | keyとdsl記載のマップから値を削除し成否を返す | required.rs | -| `client_for` | StoreRegistry | `&self, keyword: &str -> Option<&dyn StoreClient>` | StoreClientのkeywordからStoreClientを返す | required.rs | +| Module | Port | Signature | Description | Filename | +|--------|------|-----------|-------------|----------| +| - | `debug_log!` | `(class, fn $(, arg)*)` | `feature=logging` ログマクロ | debug_log.rs | +| - | `Tree` | - | n次元scalar map型 | provided.rs | +| - | `SetOutcome` | - | `StoreClient::set`が返すCreatedかUpdated | required.rs | +| Tree | `wire` | `(&self) -> Vec` | Treeをワイヤフォーマットに変換 | tree.rs | +| | `dewire` | `(bytes: &[u8]) -> Option` | ワイヤフォーマットからTreeへ変換 | tree.rs | +| Dsl | `compile` | `(tree: &Tree) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | Treeから静的list(paths/children/leaves/interning/interning_idx) を返す | dsl.rs | +| | `write` | `(src: &[u8], out_path: &str) -> Result<(), String>` | YAML→.rs出力[precompile] | dsl.rs | +| Index | `new` | `(paths, children, leaves, interning, interning_idx) -> Index` | compile済みdslからIndex構築 | index.rs | +| | `traverse` | `(&self, path: &str) -> Box<[LeafRef]>` | パス文字列からleaf参照リストを返す | index.rs | +| | `keyword_of` | `(&self, path_idx: u32) -> &[u8]` | path_idxからkeywordバイト列を返す | index.rs | +| | `load_args` | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_load` client名とargsを返す | index.rs | +| | `store_args` | `(&self, leaf: &LeafRef) -> (&str, BTreeMap)` | leafの`_store` client名とargsを返す | index.rs | +| Context | `new` | `(index: Arc, registry: &'r dyn StoreRegistry) -> Context` | IndexとStoreRegistoryからContextを構築 | context.rs | +| | `get` | `(&mut self, key: &str) -> Result, ContextError>` | パス文字列から値(cache→_store→_load)を取得して返す | context.rs | +| | `set` | `(&mut self, key: &str, value: Tree) -> Result` | 値から_storeに書き込み、cacheも更新 | context.rs | +| | `delete` | `(&mut self, key: &str) -> Result` | パスから_storeの値を削除し、cacheもnullで更新 | context.rs | +| | `exists` | `(&mut self, key: &str) -> Result` | パスからcacheか_storeに値が存在するか確認し、cacheを更新 | context.rs | +| StoreClient | `get` | `&self, key: &str, args: &BTreeMap<&str, Tree> -> Option` | keyとdsl記載のmapから値をlistで返す | required.rs | +| | `set` | `&self, key: &str, args: &BTreeMap<&str, Tree> -> Option` | keyとdsl記載のマップから値を保存しSetOutcomeを返す | required.rs | +| | `delete` | `&self, key: &str, args: &BTreeMap<&str, Tree> -> bool` | keyとdsl記載のマップから値を削除し成否を返す | required.rs | +| StoreRegistry | `client_for` | `&self, keyword: &str -> Option<&dyn StoreClient>` | StoreClientのkeywordからStoreClientを返す | required.rs | +| ParseError | `fmt` | `&self, f: &mut fmt::Formatter<'_> -> fmt::Result` | ※エラーを返す | provided.rs | +| LoadError | `fmt` | | _loadクライアント呼び出しエラーを返す | provided.rs | +| StoreError | `fmt` | | _storeクライアント呼び出しエラーを返す | provided.rs | +| ContextError | `fmt` | | Contextの出力するエラーを返す | provided.rs | + ※ dslコンパイル時検出エラーとしたいがTreeパースエラーが混在 From 2f273650a2bafd0359a9be10b7f74f8b7a5ad469 Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 07:06:29 +0900 Subject: [PATCH 35/41] update --- docs/Architecture.md | 4 ++-- src/lib.rs | 2 +- src/ports/provided.rs | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/Architecture.md b/docs/Architecture.md index 65f147e..340399f 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -21,7 +21,7 @@ | Tree | `wire` | `(&self) -> Vec` | Treeをワイヤフォーマットに変換 | tree.rs | | | `dewire` | `(bytes: &[u8]) -> Option` | ワイヤフォーマットからTreeへ変換 | tree.rs | | Dsl | `compile` | `(tree: &Tree) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | Treeから静的list(paths/children/leaves/interning/interning_idx) を返す | dsl.rs | -| | `write` | `(src: &[u8], out_path: &str) -> Result<(), String>` | YAML→.rs出力[precompile] | dsl.rs | +| | `write` | `(src: &[u8], out_path: &str) -> Result<(), String>` | YAMLファイルパスから.rsを出力[precompile] | dsl.rs | | Index | `new` | `(paths, children, leaves, interning, interning_idx) -> Index` | compile済みdslからIndex構築 | index.rs | | | `traverse` | `(&self, path: &str) -> Box<[LeafRef]>` | パス文字列からleaf参照リストを返す | index.rs | | | `keyword_of` | `(&self, path_idx: u32) -> &[u8]` | path_idxからkeywordバイト列を返す | index.rs | @@ -36,7 +36,7 @@ | | `set` | `&self, key: &str, args: &BTreeMap<&str, Tree> -> Option` | keyとdsl記載のマップから値を保存しSetOutcomeを返す | required.rs | | | `delete` | `&self, key: &str, args: &BTreeMap<&str, Tree> -> bool` | keyとdsl記載のマップから値を削除し成否を返す | required.rs | | StoreRegistry | `client_for` | `&self, keyword: &str -> Option<&dyn StoreClient>` | StoreClientのkeywordからStoreClientを返す | required.rs | -| ParseError | `fmt` | `&self, f: &mut fmt::Formatter<'_> -> fmt::Result` | ※エラーを返す | provided.rs | +| DslError | `fmt` | `&self, f: &mut fmt::Formatter<'_> -> fmt::Result` | ※エラーを返す | provided.rs | | LoadError | `fmt` | | _loadクライアント呼び出しエラーを返す | provided.rs | | StoreError | `fmt` | | _storeクライアント呼び出しエラーを返す | provided.rs | | ContextError | `fmt` | | Contextの出力するエラーを返す | provided.rs | diff --git a/src/lib.rs b/src/lib.rs index 0eb9368..bca02f4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,7 @@ pub mod index; pub use ports::provided::{ Tree, - ParseError, LoadError, StoreError, ContextError, + DslError, LoadError, StoreError, ContextError, Context, }; pub use ports::required::{ diff --git a/src/ports/provided.rs b/src/ports/provided.rs index 7721c18..9922534 100644 --- a/src/ports/provided.rs +++ b/src/ports/provided.rs @@ -29,18 +29,18 @@ pub enum Tree { // ── Errors ──────────────────────────────────────────────────────────────────── #[derive(Debug, PartialEq)] -pub enum ParseError { +pub enum DslError { FileNotFound(String), AmbiguousFile(String), ParseError(String), } -impl fmt::Display for ParseError { +impl fmt::Display for DslError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - ParseError::FileNotFound(msg) => write!(f, "FileNotFound: {}", msg), - ParseError::AmbiguousFile(msg) => write!(f, "AmbiguousFile: {}", msg), - ParseError::ParseError(msg) => write!(f, "ParseError: {}", msg), + DslError::FileNotFound(msg) => write!(f, "FileNotFound: {}", msg), + DslError::AmbiguousFile(msg) => write!(f, "AmbiguousFile: {}", msg), + DslError::ParseError(msg) => write!(f, "ParseError: {}", msg), } } } From 675fb9cee3cd47fe19dada38a9765a552f26dce1 Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 07:24:54 +0900 Subject: [PATCH 36/41] update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 7c73ba2..c5788ec 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # context-engine -Data labels used by a web system's runtime within a single processing cycle should have their session-context-dependent variations resolved outside of code (e.g., data should be accessible as system_context["session.user"] rather than users[session[user-id]]). context-engine processes for each label, the data retrieval methods that application developers define as a DSL in YAML files. This allows, for example, server/client differences in system_context["session.user.preference"] and multi-tenant differences in context[session.user.tenant] to be resolved appropriately through the data retrieval methods defined in YAML. This library is a foundational technology for the reconstructed web system architecture(see [## background](#background)). +Data labels used by a web system's runtime within a single processing cycle should have their session-context-dependent variations resolved outside of code (e.g., `system_context["session.user"]` rather than `users[session[user_id]]`). context-engine processes the data retrieval methods that application developers define as a DSL in YAML files, for each label. This allows server/client differences in `system_context["session.user.preference"]` and multi-tenant differences in `context["session.user.tenant"]` to be resolved appropriately through the methods defined in YAML. This library is a foundational technology for the reconstructed web system architecture (see [Background](#background)). - [See original text(ja)](#original-text-ja) @@ -200,13 +200,13 @@ computer: "Network-capable nodes in the system." ## Original Text (ja) -webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。context-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このライブラリは、[### 背景](#背景)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 +webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。context-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このライブラリは、[背景](#背景)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 ### 背景 **webシステムの構成再定義** -人々の営みの動作の一部を、ネットワーク機能を持ったコンピューターのデータ処理で代替えすることで、その間の検証可能性の保証と、物理的制約の緩和などの恩恵を受けることができる。これを実現する、ハードウェアを通して電気信号として入力を受け取り、処理後、所定のハードウェア群に出力する仕組みのことを、webシステムと呼ぶ。webシステムの実現には、第一に、システムに必要な概念体系を、人間言語とコンピューターのビット列それぞれで定義することが必要である。 +人々の営みの一部を、ネットワーク機能を持ったコンピューターのデータ処理で代替することで、検証可能性の保証や物理的制約の緩和といった恩恵を受けます。これを実現する、ハードウェアを通して電気信号として入力を受け取り、処理後、所定のハードウェア群に出力する仕組みのことをwebシステムと呼びます。webシステムの実現には、まず、システムに必要な概念体系を、人間言語とコンピューターのビット列それぞれで定義する必要があります。 ```yaml # computers structure of web system From 55682a19a84352288ad1a2f419350e132bcff254 Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 07:25:56 +0900 Subject: [PATCH 37/41] u --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index c5788ec..7c6ae5b 100644 --- a/README.md +++ b/README.md @@ -200,6 +200,8 @@ computer: "Network-capable nodes in the system." ## Original Text (ja) +### context-engine + webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。context-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このライブラリは、[背景](#背景)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 ### 背景 From 5f90a0ea77f7fd3e5bdef5796115680ea1a33cdf Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 07:26:18 +0900 Subject: [PATCH 38/41] u --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7c6ae5b..239f99a 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,7 @@ computer: "Network-capable nodes in the system." ## Original Text (ja) -### context-engine +### 概要 webシステムのランタイムが1回の処理の中で使用するデータのラベルは、セッションコンテクストによる変動を、コード外で処理するべきです(例: users[session[user-id]]では無く、system_context["session.user"]で呼び出せるべき)。context-engineは、アプリ開発者がYAMLファイルにDSLとして定義したデータの取得方法を、ラベルごとに処理します。これにより、例えばsystem_context["session.user.preference"]のサーバー/クライアント差異が、context[session.user.tenant]のマルチテナント差異が、YAML内のデータ取得方法によって、適切に解決されます。このライブラリは、[背景](#背景)記載の、再構成されたwebシステムアーキテクチャの基盤技術に位置付けられています。 From bee5bc3b6a0b7a135d274cb7127aeec022a88307 Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 09:20:40 +0900 Subject: [PATCH 39/41] underway examples integrations_test --- .gitignore | 1 + Cargo.toml | 8 + docs/Architecture.md | 108 +++++++++++++- examples/implements.rs | 8 + examples/integration_tests.rs | 260 ++++++++++++++++++++++++++++++++ src/context.rs | 45 +++--- src/dsl.rs | 273 +++++++++++++++++++--------------- src/index.rs | 199 +++++++++++++++++++++++-- 8 files changed, 739 insertions(+), 163 deletions(-) create mode 100644 examples/integration_tests.rs diff --git a/.gitignore b/.gitignore index 2d1069b..ef3f377 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ **/target/** Cargo.lock **/Cargo.lock +**/dsl_compiled.rs **/.npmrc CLAUDE.md \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 2c086de..dcd8927 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,3 +21,11 @@ serde_yaml_ng = { version = "0.10", optional = true } default = [] logging = ["dep:log"] precompile = ["dep:serde_yaml_ng"] + +[[example]] +name = "precompile" +required-features = ["precompile"] + +[[example]] +name = "integration_tests" +required-features = ["precompile"] diff --git a/docs/Architecture.md b/docs/Architecture.md index 340399f..ec49a29 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -19,7 +19,7 @@ | - | `Tree` | - | n次元scalar map型 | provided.rs | | - | `SetOutcome` | - | `StoreClient::set`が返すCreatedかUpdated | required.rs | | Tree | `wire` | `(&self) -> Vec` | Treeをワイヤフォーマットに変換 | tree.rs | -| | `dewire` | `(bytes: &[u8]) -> Option` | ワイヤフォーマットからTreeへ変換 | tree.rs | +| | `unwire` | `(bytes: &[u8]) -> Option` | ワイヤフォーマットからTreeへ変換 | tree.rs | | Dsl | `compile` | `(tree: &Tree) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | Treeから静的list(paths/children/leaves/interning/interning_idx) を返す | dsl.rs | | | `write` | `(src: &[u8], out_path: &str) -> Result<(), String>` | YAMLファイルパスから.rsを出力[precompile] | dsl.rs | | Index | `new` | `(paths, children, leaves, interning, interning_idx) -> Index` | compile済みdslからIndex構築 | index.rs | @@ -42,7 +42,38 @@ | ContextError | `fmt` | | Contextの出力するエラーを返す | provided.rs | -※ dslコンパイル時検出エラーとしたいがTreeパースエラーが混在 +## プライベートfn構成 + +| Module | fn | Signature | Description | Filename | +|--------|----|-----------|-------------|----------| +| Compiler | `new` | `() -> Compiler` | Compiler初期化 | dsl.rs | +| | `walk_field_key` | `(&mut self, keyword: &[u8], value: &Tree, inh_load: Option<&MetaBlock>, inh_store: Option<&MetaBlock>)` | field_keyノードを再帰処理しpaths/children/leavesを構築 | dsl.rs | +| | `resolve_meta` | `(&mut self, pairs: &[(Vec, Tree)], meta_key: &[u8], inherited: Option<&MetaBlock>) -> Option` | `_load`/`_store`ブロックを親から継承しつつ現ノードで上書きして返す | dsl.rs | +| | `write_leaf` | `(&mut self, path_idx: u32, keyword_idx: u32, value_idx: Option, load: Option<&MetaBlock>, store: Option<&MetaBlock>)` | leavesにleafデータを書き込みpaths[path_idx]をis_leaf=1で更新 | dsl.rs | +| | `intern` | `(&mut self, s: &[u8]) -> u32` | バイト列をinterningに追加しinterning_idxを返す(重複排除) | dsl.rs | +| | `intern_tree_scalar` | `(&mut self, v: &Tree) -> u32` | TreeスカラーまたはNullをinternしてindexを返す | dsl.rs | +| | `push_u32` | `(&mut self, v: u32)` | u32leをleavesに追記 | dsl.rs | +| | `finish` | `(self) -> (Box<[u64]>, Box<[u32]>, Box<[u8]>, Box<[u8]>, Box<[u64]>)` | 各Vecをboxed sliceに変換して返す | dsl.rs | +| Compiler (precompile) | `parse_yaml` | `(src: &[u8]) -> Result` | YAMLバイト列をTreeにパース | dsl.rs | +| | `yaml_value_to_tree` | `(v: serde_yaml_ng::Value) -> Tree` | serde_yaml_ng::ValueをTreeに変換 | dsl.rs | +| | `emit_u64_slice` | `(out: &mut String, name: &str, data: &[u64])` | `&[u64]`をRustスタティック宣言として出力 | dsl.rs | +| | `emit_u32_slice` | `(out: &mut String, name: &str, data: &[u32])` | `&[u32]`をRustスタティック宣言として出力 | dsl.rs | +| | `emit_u8_slice` | `(out: &mut String, name: &str, data: &[u8])` | `&[u8]`をRustスタティック宣言として出力 | dsl.rs | +| Index | `find` | `(&self, path: &str) -> Option` | '.'区切りパスをルートからたどりpath_idxを返す | index.rs | +| | `find_child` | `(&self, path_idx: u32, keyword: &[u8]) -> Option` | path_idxの子の中からkeywordに一致するpath_idxを返す | index.rs | +| | `collect_leaves` | `(&self, path_idx: u32, out: &mut Vec)` | path_idx以下の全leafをoutに再帰収集 | index.rs | +| | `decode_meta` | `(&self, path_idx: u32, leaf_offset: u32, kind: MetaKind) -> (&str, BTreeMap)` | leavesから`_load`または`_store`のclient名とargsを読み出す | index.rs | +| | `read_u32` | `(&self, off: usize) -> u32` | leavesのoffからu32leを読む | index.rs | +| | `interning_str` | `(&self, idx: usize) -> &[u8]` | interning_idxのidxからinterningのバイト列スライスを返す | index.rs | +| Tree | `write_value` | `(value: &Tree, buf: &mut Vec)` | Treeをワイヤフォーマットにシリアライズしbufに追記 | tree.rs | +| | `read_value` | `(bytes: &[u8]) -> Option<(Tree, &[u8])>` | bytesの先頭からTreeをデシリアライズし残バイトと返す | tree.rs | +| | `read_u32` | `(bytes: &[u8]) -> Option<(usize, &[u8])>` | bytesの先頭4バイトをu32leとして読みusizeで返す | tree.rs | +| | `split_at` | `(bytes: &[u8], n: usize) -> Option<(&[u8], &[u8])>` | bytesをn位置で分割しNoneを安全に返す | tree.rs | +| Context | `cache_get` | `(&self, path_idx: u32) -> Option<&Tree>` | インスタンスキャッシュからpath_idxの値を返す | context.rs | +| | `cache_set` | `(&mut self, path_idx: u32, value: Tree)` | インスタンスキャッシュにpath_idxの値を書き込む(上書き) | context.rs | +| | `cache_remove` | `(&mut self, path_idx: u32)` | インスタンスキャッシュのpath_idxエントリをNullで無効化 | context.rs | +| | `guard_recursion` | `(&self, path_idx: u32) -> Result<(), ContextError>` | called_keysの重複・上限超過を検出しエラーを返す | context.rs | +| | `resolve_leaf` | `(&mut self, path_idx: u32, leaf_offset: u32) -> Result, ContextError>` | cache→_store→_loadの順で値を解決しwrite-throughする | context.rs | ## 用語 @@ -115,13 +146,78 @@ Contextインスタンス固有のキャッシュ。StoreClientとは独立。 5. miss時、`_load` client で自動ロード → write-through to `_store` 6. `Ok(Some(value))` / `Ok(None)` / `Err(ContextError)` を返却 +## データ構造仕様 + +### 静的データ配列 + +`Dsl::compile`が返す5配列。アプリ起動時に一度だけ構築し、`Index`が保持する。 + +``` +paths: Box<[u64]> // pathノード一覧。[0]がvirtual root +children: Box<[u32]> // 各pathの子path_idxをフラットに連結 +leaves: Box<[u8]> // leafデータのバイト列 +interning: Box<[u8]> // 文字列バイト列をフラットに連結 +interning_idx: Box<[u64]> // interningのoffset(u32)+len(u32)エントリ一覧 +``` + +### path (u64) + +| field | bits | 範囲 | +|-------------|------|-----------| +| is_leaf | 1 | bit 63 | +| offset | 32 | bits 62..31 | +| count | 8 | bits 30..23 | +| keyword_idx | 23 | bits 22..0 | + +- `is_leaf=0`: 非leafノード。`children[offset..offset+count[3:0]]`に子path_idxが並ぶ +- `is_leaf=1`: leafノード。`leaves[offset..]`にleafデータが並ぶ。`count[7:4]=load_args数`, `count[3:0]=store_args数` +- `keyword_idx`: このノードのkeywordのinterning_idx + +### children ([u32]) + +各エントリはpath_idx。各pathノードが持つ子の範囲は`path.offset`と`path.count[3:0]`で決まる。 + +### leaves + +leafノード1つ分のバイト列レイアウト: + +| フィールド | サイズ | 説明 | +|---------------------|----------|------| +| keyword_idx | u32 | このleafのkeywordのinterning_idx | +| value_token_count | u32 | valueトークン数。0=null | +| token_type[0] | u8 | 0=static(interning_idx), 1=placeholder(path_idx) | +| token_idx[0] | u32 | interning_idxまたはpath_idx | +| ... × value_token_count | | | +| load_client_idx | u32 | interning_idx | +| load_key_idx | u32 | interning_idx | +| store_client_idx | u32 | interning_idx | +| store_key_idx | u32 | interning_idx | +| load.args × N | u32+u32 | key_idx + value_idx (interning) | +| store.args × N | u32+u32 | key_idx + value_idx (interning) | + +**valueの解釈:** +- `token_count=0`: null +- `token_count=1, type=static`: 静的文字列 +- `token_count=1, type=placeholder`: 単独`${path}` → `Context.get(path_idx)`の値をそのままコピー(型保持) +- `token_count≥2` または混在: template → 各tokenを解決しstring結合 + +**placeholder解決はcompile時に2パスで行う:** +1. 1パス目: path構造を確定(全path_idxを決定) +2. 2パス目: value内の`${path}`文字列をpath_idxに解決してleavesに書き込む + +### interning_idx ([u64]) + +各エントリ: `offset(u32, bits63..32) | len(u32, bits31..0)` + +インデックス0は空文字列(virtual rootのkeyword)。 + ## Placeholder Resolution Rules -`${}` paths are always treated as absolute paths. +`${}`内のパスは常に絶対パスとして扱う。 -**Placeholder resolution at runtime:** -- `is_template=false`(単独 `${path}`): `Context.get(path)` の値をそのままコピー(string化しない) -- `is_template=true`(文字列混在): 各placeholderを `Context.get()` で解決しstringとして結合 +**実行時の解決:** +- `token_count=1, type=placeholder`: `Context.get(path_idx)`の値をそのままコピー(string化しない) +- template: 各tokenを`Context.get()`で解決しstringとして結合 ## Error Types diff --git a/examples/implements.rs b/examples/implements.rs index efe934f..ec2c39a 100644 --- a/examples/implements.rs +++ b/examples/implements.rs @@ -173,6 +173,14 @@ impl MyRegistry { tenant_db: Arc::new(TenantDbClient::new()), } } + + pub fn memory_set(&self, key: &str, value: context_engine::Tree) { + self.memory.data.lock().unwrap().insert(key.to_string(), value); + } + + pub fn memory_clear(&self) { + self.memory.data.lock().unwrap().clear(); + } } impl StoreRegistry for MyRegistry { diff --git a/examples/integration_tests.rs b/examples/integration_tests.rs new file mode 100644 index 0000000..0395b19 --- /dev/null +++ b/examples/integration_tests.rs @@ -0,0 +1,260 @@ +extern crate std; + +#[path = "implements.rs"] +mod implements; + +use implements::MyRegistry; +use context_engine::context::Context; +use context_engine::ports::provided::{Context as ContextTrait, ContextError}; +use context_engine::{Index, Tree}; +use std::sync::Arc; + +// ── fixture ─────────────────────────────────────────────────────────────────── + +fn make_context<'r>(registry: &'r MyRegistry) -> Context<'r> { + let src = include_bytes!("tenant.yml"); + let tree = context_engine::dsl::parse_yaml(src).expect("parse failed"); + let (paths, children, leaves, interning, interning_idx) = context_engine::dsl::Dsl::compile(&tree); + let index = Arc::new(Index::new(paths, children, leaves, interning, interning_idx)); + Context::new(index, registry) +} + +fn scalar(s: &str) -> Tree { + Tree::Scalar(s.as_bytes().to_vec()) +} + +// ── test runner ─────────────────────────────────────────────────────────────── + +fn main() { + let mut passed = 0usize; + let mut failed = 0usize; + + macro_rules! test { + ($name:expr, $body:block) => {{ + std::print!(" {} ... ", $name); + let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| $body)); + match result { + Ok(()) => { std::println!("ok"); passed += 1; } + Err(_) => { std::println!("FAILED"); failed += 1; } + } + }}; + } + + // ========================================================================= + // session.user.id + // _load: Memory(key="request.authorization.user"), _store: Kvs(inherited) + // ========================================================================= + std::println!("\n[session.user.id]"); + + test!("get loads from Memory when key is preset", { + let registry = MyRegistry::new(); + registry.memory_set("request.authorization.user", scalar("42")); + let mut ctx = make_context(®istry); + let got = ctx.get("session.user.id").unwrap(); + assert_eq!(got, Some(scalar("42"))); + }); + + test!("get returns None when Memory has no key", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + // _load returns None → LoadFailed(NotFound) + let result = ctx.get("session.user.id"); + assert!(matches!(result, Err(ContextError::LoadFailed(_)))); + }); + + test!("get cache hit on second call", { + let registry = MyRegistry::new(); + registry.memory_set("request.authorization.user", scalar("42")); + let mut ctx = make_context(®istry); + ctx.get("session.user.id").unwrap(); + // Memory cleared — second get must come from cache + registry.memory_clear(); + let got = ctx.get("session.user.id").unwrap(); + assert_eq!(got, Some(scalar("42"))); + }); + + test!("set writes to Kvs and cache", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + assert!(ctx.set("session.user.id", scalar("99")).unwrap()); + let got = ctx.get("session.user.id").unwrap(); + assert_eq!(got, Some(scalar("99"))); + }); + + test!("exists true after set", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + ctx.set("session.user.id", scalar("1")).unwrap(); + assert!(ctx.exists("session.user.id").unwrap()); + }); + + test!("exists false before set", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + assert!(!ctx.exists("session.user.id").unwrap()); + }); + + test!("delete removes from Kvs and cache", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + ctx.set("session.user.id", scalar("1")).unwrap(); + assert!(ctx.delete("session.user.id").unwrap()); + assert!(!ctx.exists("session.user.id").unwrap()); + }); + + // ========================================================================= + // session.user.name + // _load: TenantDb(key="users.id.${session.user.id}") — placeholder依存 + // _store: Kvs(inherited) + // ========================================================================= + std::println!("\n[session.user.name — placeholder in key]"); + + test!("set and get without placeholder resolution", { + // key contains ${session.user.id} but set bypasses _load + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + assert!(ctx.set("session.user.name", scalar("alice")).unwrap()); + let got = ctx.get("session.user.name").unwrap(); + assert_eq!(got, Some(scalar("alice"))); + }); + + // ========================================================================= + // session.user.name_copy + // value: ${session.user.name} — single placeholder, type-preserving copy + // ========================================================================= + std::println!("\n[session.user.name_copy — placeholder value]"); + + test!("get resolves placeholder to session.user.name value", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + ctx.set("session.user.name", scalar("alice")).unwrap(); + let got = ctx.get("session.user.name_copy").unwrap(); + assert_eq!(got, Some(scalar("alice"))); + }); + + test!("get returns LoadFailed when referenced path has no value", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + // session.user.name not set, _load will fail + let result = ctx.get("session.user.name_copy"); + assert!(matches!(result, Err(ContextError::LoadFailed(_)))); + }); + + // ========================================================================= + // session.user.tenant.id + // _load: Memory(key="request.authorization.tenant"), _store: Kvs(inherited from session.user) + // ========================================================================= + std::println!("\n[session.user.tenant.id]"); + + test!("get loads from Memory", { + let registry = MyRegistry::new(); + registry.memory_set("request.authorization.tenant", scalar("10")); + let mut ctx = make_context(®istry); + let got = ctx.get("session.user.tenant.id").unwrap(); + assert_eq!(got, Some(scalar("10"))); + }); + + test!("set and get", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + assert!(ctx.set("session.user.tenant.id", scalar("10")).unwrap()); + let got = ctx.get("session.user.tenant.id").unwrap(); + assert_eq!(got, Some(scalar("10"))); + }); + + // ========================================================================= + // connection.common_db — static leaf values + // _load: Env, static values: driver="postgres", charset="UTF8" + // ========================================================================= + std::println!("\n[connection.common_db — static values]"); + + test!("get driver returns static value postgres", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + let got = ctx.get("connection.common_db.driver").unwrap(); + assert_eq!(got, Some(scalar("postgres"))); + }); + + test!("get charset returns static value UTF8", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + let got = ctx.get("connection.common_db.charset").unwrap(); + assert_eq!(got, Some(scalar("UTF8"))); + }); + + test!("get host returns None when Env not set", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + // Env has no _load client registered for connection.common_db.host + let result = ctx.get("connection.common_db.host"); + assert!(matches!(result, Err(ContextError::LoadFailed(_)))); + }); + + // ========================================================================= + // connection.tenant_db — static leaf values + // ========================================================================= + std::println!("\n[connection.tenant_db — static values]"); + + test!("get driver returns static value postgres", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + let got = ctx.get("connection.tenant_db.driver").unwrap(); + assert_eq!(got, Some(scalar("postgres"))); + }); + + // ========================================================================= + // recursion guard + // ========================================================================= + std::println!("\n[recursion]"); + + test!("get same path twice in sequence does not recurse", { + let registry = MyRegistry::new(); + registry.memory_set("request.authorization.user", scalar("1")); + let mut ctx = make_context(®istry); + ctx.get("session.user.id").unwrap(); + // second independent call — called_paths should be cleared between calls + let got = ctx.get("session.user.id").unwrap(); + assert_eq!(got, Some(scalar("1"))); + }); + + // ========================================================================= + // KeyNotFound + // ========================================================================= + std::println!("\n[KeyNotFound]"); + + test!("get nonexistent path", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + let result = ctx.get("session.user.nonexistent"); + assert!(matches!(result, Err(ContextError::KeyNotFound(_)))); + }); + + test!("set nonexistent path", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + let result = ctx.set("session.user.nonexistent", scalar("x")); + assert!(matches!(result, Err(ContextError::KeyNotFound(_)))); + }); + + test!("delete nonexistent path", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + let result = ctx.delete("session.nonexistent"); + assert!(matches!(result, Err(ContextError::KeyNotFound(_)))); + }); + + test!("exists nonexistent path", { + let registry = MyRegistry::new(); + let mut ctx = make_context(®istry); + let result = ctx.exists("session.nonexistent"); + assert!(matches!(result, Err(ContextError::KeyNotFound(_)))); + }); + + // ========================================================================= + // results + // ========================================================================= + std::println!("\n{} passed, {} failed", passed, failed); + if failed > 0 { + std::process::exit(1); + } +} diff --git a/src/context.rs b/src/context.rs index af460c7..6449879 100644 --- a/src/context.rs +++ b/src/context.rs @@ -15,7 +15,7 @@ pub struct Context<'r> { registry: &'r dyn StoreRegistry, cache_keys: Vec, // path_idx cache_vals: Vec, // parallel to cache_keys - called_keys: BTreeSet, + called_paths: BTreeSet, max_recursion: usize, } @@ -26,7 +26,7 @@ impl<'r> Context<'r> { registry, cache_keys: Vec::new(), cache_vals: Vec::new(), - called_keys: BTreeSet::new(), + called_paths: BTreeSet::new(), max_recursion: 20, } } @@ -54,7 +54,7 @@ impl<'r> Context<'r> { } fn guard_recursion(&self, path_idx: u32) -> Result<(), ContextError> { - if self.called_keys.len() >= self.max_recursion || self.called_keys.contains(&path_idx) { + if self.called_paths.len() >= self.max_recursion || self.called_paths.contains(&path_idx) { return Err(ContextError::RecursionLimitExceeded); } Ok(()) @@ -75,21 +75,21 @@ impl<'r> ContextTrait for Context<'r> { if leaves.len() == 1 { let leaf = &leaves[0]; self.guard_recursion(leaf.path_idx)?; - self.called_keys.insert(leaf.path_idx); + self.called_paths.insert(leaf.path_idx); let result = self.resolve_leaf(leaf.path_idx, leaf.leaf_offset); - self.called_keys.remove(&leaf.path_idx); + self.called_paths.remove(&leaf.path_idx); result } else { let mut pairs: Vec<(Vec, Tree)> = Vec::new(); for leaf in leaves.iter() { self.guard_recursion(leaf.path_idx)?; - self.called_keys.insert(leaf.path_idx); + self.called_paths.insert(leaf.path_idx); let value = self.resolve_leaf(leaf.path_idx, leaf.leaf_offset)?; - self.called_keys.remove(&leaf.path_idx); + self.called_paths.remove(&leaf.path_idx); if let Some(v) = value { let keyword = self.index.keyword_of(leaf.path_idx).to_vec(); pairs.push((keyword, v)); @@ -112,15 +112,14 @@ impl<'r> ContextTrait for Context<'r> { StoreError::ClientNotFound(keyword.to_string()) ))?; - let store_key = args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } - }).ok_or_else(|| ContextError::StoreFailed( - StoreError::ConfigMissing("key".to_string()) - ))?; + let store_key = args.get("key") + .and_then(|v| if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None }) + .unwrap_or(key); - let args_ref: BTreeMap<&str, Tree> = args.iter() + let mut args_ref: BTreeMap<&str, Tree> = args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); + args_ref.insert("value", value.clone()); match client.set(store_key, &args_ref) { Some(SetOutcome::Created) | Some(SetOutcome::Updated) => { @@ -144,11 +143,9 @@ impl<'r> ContextTrait for Context<'r> { StoreError::ClientNotFound(keyword.to_string()) ))?; - let store_key = args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } - }).ok_or_else(|| ContextError::StoreFailed( - StoreError::ConfigMissing("key".to_string()) - ))?; + let store_key = args.get("key") + .and_then(|v| if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None }) + .unwrap_or(key); let args_ref: BTreeMap<&str, Tree> = args.iter() .map(|(k, v)| (k.as_str(), v.clone())) @@ -177,12 +174,9 @@ impl<'r> ContextTrait for Context<'r> { return Ok(false); }; - let store_key = match args.get("key").and_then(|v| { - if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } - }) { - Some(k) => k, - None => return Ok(false), - }; + let store_key = args.get("key") + .and_then(|v| if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None }) + .unwrap_or(key); let args_ref: BTreeMap<&str, Tree> = args.iter() .map(|(k, v)| (k.as_str(), v.clone())) @@ -251,9 +245,10 @@ impl<'r> Context<'r> { if let Tree::Scalar(b) = v { from_utf8(b).ok() } else { None } }); if let Some(sk) = store_key { - let sargs: BTreeMap<&str, Tree> = store_args.iter() + let mut sargs: BTreeMap<&str, Tree> = store_args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); + sargs.insert("value", value.clone()); store_client.set(sk, &sargs); } } diff --git a/src/dsl.rs b/src/dsl.rs index dde849b..87a3e98 100644 --- a/src/dsl.rs +++ b/src/dsl.rs @@ -20,21 +20,21 @@ pub const PROP_MAP: &[u8] = b"map"; // | field | bits | // |-------------|------| // | is_leaf | 1 | bit 63 -// | offset | 32 | bits 54..23 -// | count | 8 | bits 22..15 +// | offset | 32 | bits 62..31 +// | count | 8 | bits 30..23 // | | | is_leaf=0: [3:0]=子path数(1~16), [7:4]=unused // | | | is_leaf=1: [7:4]=load_args count, [3:0]=store_args count (各最大15) -// | keyword_idx | 23 | bits 14..0 interning_idx of this node's keyword +// | keyword_idx | 23 | bits 22..0 interning_idx of this node's keyword -pub const PATH_IS_LEAF_SHIFT: u64 = 63; -pub const PATH_OFFSET_SHIFT: u64 = 23; -pub const PATH_COUNT_SHIFT: u64 = 15; +pub const PATH_IS_LEAF_SHIFT: u64 = 63; +pub const PATH_OFFSET_SHIFT: u64 = 31; +pub const PATH_COUNT_SHIFT: u64 = 23; pub const PATH_KEYWORD_IDX_SHIFT: u64 = 0; pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; pub const PATH_OFFSET_MASK: u64 = 0xffff_ffff << PATH_OFFSET_SHIFT; pub const PATH_COUNT_MASK: u64 = 0xff << PATH_COUNT_SHIFT; -pub const PATH_KEYWORD_IDX_MASK: u64 = 0x7fff; // bits 14..0 +pub const PATH_KEYWORD_IDX_MASK: u64 = 0x7f_ffff; // bits 22..0 // ── Dsl ─────────────────────────────────────────────────────────────────────── @@ -70,15 +70,20 @@ impl Dsl { compiler.paths.push(0u64); // placeholder, filled after walking top-level if let Tree::Mapping(pairs) = tree { let children_offset = compiler.children.len() as u32; - let mut child_count = 0u32; - for (k, v) in pairs { - if k.first() != Some(&b'_') { - let child_idx = compiler.paths.len() as u32; - compiler.walk_field_key(k, v, None, None); - compiler.children.push(child_idx); - child_count += 1; - } + let field_pairs: Vec<_> = pairs.iter() + .filter(|(k, _)| k.first() != Some(&b'_')) + .collect(); + let child_count = field_pairs.len() as u32; + + for _ in 0..child_count { + compiler.children.push(0); // placeholder } + for (i, (k, v)) in field_pairs.iter().enumerate() { + let child_idx = compiler.paths.len() as u32; + compiler.children[children_offset as usize + i] = child_idx; + compiler.walk_field_key(k, v, None, None); + } + let count_bits = (child_count as u64) & 0xf; compiler.paths[0] = (children_offset as u64) << PATH_OFFSET_SHIFT @@ -166,20 +171,29 @@ impl Compiler { let store = self.resolve_meta(pairs, META_STORE, inh_store); // Collect child field_keys. + // Reserve children slots first so they are contiguous, then walk. let children_offset = self.children.len() as u32; - let mut child_count = 0u32; + let field_pairs: Vec<_> = pairs.iter() + .filter(|(k, _)| k.first() != Some(&b'_')) + .collect(); + let child_count = field_pairs.len() as u32; + + // Reserve placeholder slots for each child's path_idx. + let first_child_path_idx = self.paths.len() as u32; + for i in 0..child_count { + self.children.push(first_child_path_idx + i); // will be correct after walk + } - for (k, v) in pairs { - if k.first() == Some(&b'_') { continue; } + // Now walk each child; paths are pushed in order so indices are sequential. + for (i, (k, v)) in field_pairs.iter().enumerate() { let child_idx = self.paths.len() as u32; + self.children[children_offset as usize + i] = child_idx; self.walk_field_key(k, v, load.as_ref(), store.as_ref()); - self.children.push(child_idx); - child_count += 1; } if child_count == 0 { // No child field_keys → treat as leaf. - self.write_leaf(path_idx, keyword_idx, None, load.as_ref(), store.as_ref()); + self.write_leaf(path_idx, keyword_idx, &Tree::Null, load.as_ref(), store.as_ref()); } else { let count_bits = (child_count as u64) & 0xf; self.paths[path_idx as usize] = @@ -190,8 +204,7 @@ impl Compiler { } // Scalar or Null → leaf with optional hardcoded value. _ => { - let value_idx = self.intern_tree_scalar(value); - self.write_leaf(path_idx, keyword_idx, Some(value_idx), inh_load, inh_store); + self.write_leaf(path_idx, keyword_idx, value, inh_load, inh_store); } } } @@ -222,7 +235,7 @@ impl Compiler { client_idx = self.intern(b); } } else if k.as_slice() == PROP_KEY { - key_idx = self.intern_tree_scalar(v); + key_idx = if let Tree::Scalar(b) = v { self.intern(b) } else { 0 }; } else if k.as_slice() == PROP_MAP { // map entries: each value is a string (store column name etc.) // stored as (dst_path_interning_idx, src_value_interning_idx) @@ -230,7 +243,7 @@ impl Compiler { args.clear(); // local map overrides inherited for (mk, mv) in map_pairs { let mk_idx = self.intern(mk); - let mv_idx = self.intern_tree_scalar(mv); + let mv_idx = if let Tree::Scalar(b) = mv { self.intern(b) } else { 0 }; args.push((mk_idx, mv_idx)); } } @@ -239,7 +252,7 @@ impl Compiler { && k.as_slice() != META_STATE { // arbitrary implementor arg let ak = self.intern(k); - let av = self.intern_tree_scalar(v); + let av = if let Tree::Scalar(b) = v { self.intern(b) } else { 0 }; // overwrite if key already present, otherwise append if let Some(entry) = args.iter_mut().find(|(ek, _)| *ek == ak) { entry.1 = av; @@ -258,29 +271,72 @@ impl Compiler { /// Write leaf data to `leaves` and update `paths[path_idx]`. /// - /// Leaf layout: - /// keyword_idx (u32le) - /// value_idx (u32le) // 0 = null/absent + /// Leaf layout (Architecture.md #データ構造仕様 参照): + /// keyword_idx (u32le) + /// value_token_count (u32le) + /// token_type[i] (u8) 0=static(interning_idx), 1=placeholder(path文字列のinterning_idx) + /// token_idx[i] (u32le) + /// ... × value_token_count /// _load client_idx (u32le) | key_idx (u32le) /// _store client_idx (u32le) | key_idx (u32le) /// _load.args × load_args_count : key_idx(u32le) | value_idx(u32le) /// _store.args × store_args_count : key_idx(u32le) | value_idx(u32le) fn write_leaf( &mut self, - path_idx: u32, + path_idx: u32, keyword_idx: u32, - value_idx: Option, - load: Option<&MetaBlock>, - store: Option<&MetaBlock>, + value: &Tree, + load: Option<&MetaBlock>, + store: Option<&MetaBlock>, ) { let leaf_offset = self.leaves.len() as u32; let load_args_count = load.map(|b| b.args.len()).unwrap_or(0); let store_args_count = store.map(|b| b.args.len()).unwrap_or(0); - // keyword + value + // keyword self.push_u32(keyword_idx); - self.push_u32(value_idx.unwrap_or(0)); + + // value tokens: tokenize scalar by ${}, Null → 0 tokens + if let Tree::Scalar(b) = value { + // split by ${...} into (type, bytes) pairs + let mut tokens: Vec<(u8, u32)> = Vec::new(); + let mut rest = b.as_slice(); + while !rest.is_empty() { + if let Some(start) = rest.windows(2).position(|w| w == b"${") { + if start > 0 { + // static prefix + let idx = self.intern(&rest[..start]); + tokens.push((0, idx)); + } + rest = &rest[start + 2..]; + if let Some(end) = rest.iter().position(|&c| c == b'}') { + // placeholder path string + let idx = self.intern(&rest[..end]); + tokens.push((1, idx)); + rest = &rest[end + 1..]; + } else { + // malformed: treat remainder as static + let idx = self.intern(rest); + tokens.push((0, idx)); + break; + } + } else { + // no more placeholders + let idx = self.intern(rest); + tokens.push((0, idx)); + break; + } + } + self.push_u32(tokens.len() as u32); + for (t, idx) in tokens { + self.leaves.push(t); + self.push_u32(idx); + } + } else { + // Null → 0 tokens + self.push_u32(0); + } // _load header self.push_u32(load.map(|b| b.client_idx).unwrap_or(0)); @@ -311,9 +367,9 @@ impl Compiler { | ((store_args_count as u64) & 0xf); self.paths[path_idx as usize] = PATH_IS_LEAF_MASK - | (leaf_offset as u64) << PATH_OFFSET_SHIFT - | count << PATH_COUNT_SHIFT - | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; + | (leaf_offset as u64) << PATH_OFFSET_SHIFT + | count << PATH_COUNT_SHIFT + | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; } // ── interning ───────────────────────────────────────────────────────────── @@ -336,15 +392,6 @@ impl Compiler { idx } - /// Intern a Tree scalar as bytes. Null → intern empty slice → index 0. - fn intern_tree_scalar(&mut self, v: &Tree) -> u32 { - match v { - Tree::Scalar(b) => self.intern(b), - Tree::Null => self.intern(b""), - _ => self.intern(b""), - } - } - // ── helpers ─────────────────────────────────────────────────────────────── fn push_u32(&mut self, v: u32) { @@ -365,7 +412,7 @@ impl Compiler { // ── precompile helpers ──────────────────────────────────────────────────────── #[cfg(feature = "precompile")] -fn parse_yaml(src: &[u8]) -> Result { +pub fn parse_yaml(src: &[u8]) -> Result { extern crate std; use std::string::ToString; use std::format; @@ -461,40 +508,38 @@ mod tests { (p.into_vec(), c.into_vec(), l.into_vec(), i.into_vec(), ii.into_vec()) } + // --- single_leaf --- + #[test] - fn test_single_leaf() { - let tree = mapping(vec![ + fn single_leaf() { + let (paths, ..) = compile(&mapping(vec![ ("name", Tree::Null), - ]); - let (paths, _children, _leaves, _interning, _interning_idx) = compile(&tree); - // root(0) + name(1) - assert_eq!(paths.len(), 2); - assert!(paths[0] & PATH_IS_LEAF_MASK == 0); // root is not a leaf - assert!(paths[1] & PATH_IS_LEAF_MASK != 0); + ])); + assert_eq!(paths.len(), 2); // root(0) + name(1) + assert!(paths[0] & PATH_IS_LEAF_MASK == 0); // root is not a leaf + assert!(paths[1] & PATH_IS_LEAF_MASK != 0); // name is a leaf } + // --- nested --- + #[test] - fn test_nested_field_keys() { - let tree = mapping(vec![ + fn nested() { + let (paths, children, ..) = compile(&mapping(vec![ ("user", mapping(vec![ ("id", Tree::Null), ("name", Tree::Null), ])), - ]); - let (paths, children, _leaves, _interning, _interning_idx) = compile(&tree); - // root(0) + user(1) + id(2) + name(3) - assert_eq!(paths.len(), 4); - assert!(paths[0] & PATH_IS_LEAF_MASK == 0); // root - assert!(paths[1] & PATH_IS_LEAF_MASK == 0); // user is not a leaf - assert!(paths[2] & PATH_IS_LEAF_MASK != 0); // id - assert!(paths[3] & PATH_IS_LEAF_MASK != 0); // name - // root→user(1 child) + user→id,name(2 children) = 3 entries - assert_eq!(children.len(), 3); + ])); + assert_eq!(paths.len(), 4); // root(0) + user(1) + id(2) + name(3) + assert!(paths[1] & PATH_IS_LEAF_MASK == 0); // user is not a leaf + assert_eq!(children.len(), 3); // root→user(1) + user→id,name(2) } + // --- meta_key --- + #[test] - fn test_meta_key_excluded_from_paths() { - let tree = mapping(vec![ + fn meta_key_excluded_from_paths() { + let (paths, ..) = compile(&mapping(vec![ ("user", mapping(vec![ ("_load", mapping(vec![ ("client", scalar("Memory")), @@ -502,15 +547,16 @@ mod tests { ])), ("id", Tree::Null), ])), - ]); - let (paths, _children, _leaves, _interning, _interning_idx) = compile(&tree); - // root(0) + user(1) + id(2) — _load must not appear as a path + ])); + // root(0) + user(1) + id(2) — _load must not appear assert_eq!(paths.len(), 3); } + // --- load in leaf --- + #[test] - fn test_load_stored_in_leaf() { - let tree = mapping(vec![ + fn load_client_stored_in_leaf() { + let (paths, _, leaves, interning, interning_idx) = compile(&mapping(vec![ ("user", mapping(vec![ ("_load", mapping(vec![ ("client", scalar("Memory")), @@ -518,27 +564,23 @@ mod tests { ])), ("id", Tree::Null), ])), - ]); - let (paths, _children, leaves, interning, interning_idx) = compile(&tree); - + ])); // root(0), user(1), id(2) - let id_path = paths[2]; - assert!(id_path & PATH_IS_LEAF_MASK != 0); - let leaf_offset = ((id_path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; - - // keyword_idx(4) + value_idx(4) + load_client(4) + load_key(4) + store_client(4) + store_key(4) = 24 bytes - assert!(leaves.len() >= leaf_offset + 24); - - // load client_idx points to "Memory" - let load_client_idx = u32::from_le_bytes(leaves[leaf_offset+8..leaf_offset+12].try_into().unwrap()) as usize; - let offset = (interning_idx[load_client_idx] >> 32) as usize; - let len = (interning_idx[load_client_idx] & 0xffff_ffff) as usize; - assert_eq!(&interning[offset..offset+len], b"Memory"); + // leaf: keyword(4) + token_count(4) + tokens(0×5) + load_client(4) + let leaf_offset = ((paths[2] & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; + let token_count = u32::from_le_bytes(leaves[leaf_offset+4..leaf_offset+8].try_into().unwrap()) as usize; + let meta_base = leaf_offset + 8 + token_count * 5; + let client_idx = u32::from_le_bytes(leaves[meta_base..meta_base+4].try_into().unwrap()) as usize; + let off = (interning_idx[client_idx] >> 32) as usize; + let len = (interning_idx[client_idx] & 0xffff_ffff) as usize; + assert_eq!(&interning[off..off+len], b"Memory"); } + // --- store inheritance --- + #[test] - fn test_store_inheritance() { - let tree = mapping(vec![ + fn store_inherited_to_child_leaf() { + let (paths, _, leaves, interning, interning_idx) = compile(&mapping(vec![ ("session", mapping(vec![ ("_store", mapping(vec![ ("client", scalar("Kvs")), @@ -548,50 +590,45 @@ mod tests { ("id", Tree::Null), ])), ])), - ]); - let (paths, _children, leaves, interning, interning_idx) = compile(&tree); - + ])); // root(0), session(1), user(2), id(3) - let id_path = paths[3]; - assert!(id_path & PATH_IS_LEAF_MASK != 0); - let leaf_offset = ((id_path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; - - // store client_idx (offset 16) should point to "Kvs" - let store_client_idx = u32::from_le_bytes(leaves[leaf_offset+16..leaf_offset+20].try_into().unwrap()) as usize; - let offset = (interning_idx[store_client_idx] >> 32) as usize; - let len = (interning_idx[store_client_idx] & 0xffff_ffff) as usize; - assert_eq!(&interning[offset..offset+len], b"Kvs"); + // leaf: keyword(4) + token_count(4) + tokens(0×5) + load_client(4) + load_key(4) + store_client(4) + let leaf_offset = ((paths[3] & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; + let token_count = u32::from_le_bytes(leaves[leaf_offset+4..leaf_offset+8].try_into().unwrap()) as usize; + let meta_base = leaf_offset + 8 + token_count * 5; + let client_idx = u32::from_le_bytes(leaves[meta_base+8..meta_base+12].try_into().unwrap()) as usize; + let off = (interning_idx[client_idx] >> 32) as usize; + let len = (interning_idx[client_idx] & 0xffff_ffff) as usize; + assert_eq!(&interning[off..off+len], b"Kvs"); } + // --- intern --- + #[test] - fn test_intern_dedup() { - let tree = mapping(vec![ + fn intern_dedup() { + let (_, _, _, interning, interning_idx) = compile(&mapping(vec![ ("a", scalar("hello")), ("b", scalar("hello")), - ]); - let (_paths, _children, _leaves, _interning, interning_idx) = compile(&tree); - // "hello" should be interned only once + ])); let hello_count = (0..interning_idx.len()).filter(|&i| { - let offset = (interning_idx[i] >> 32) as usize; - let len = (interning_idx[i] & 0xffff_ffff) as usize; - &_interning[offset..offset+len] == b"hello" + let off = (interning_idx[i] >> 32) as usize; + let len = (interning_idx[i] & 0xffff_ffff) as usize; + interning.get(off..off+len) == Some(b"hello" as &[u8]) }).count(); assert_eq!(hello_count, 1); } + // --- precompile --- + #[cfg(feature = "precompile")] #[test] - fn test_write_tenant_yml() { + fn write_tenant_yml() { extern crate std; let src = std::include_bytes!("../examples/tenant.yml"); let out = std::env::temp_dir().join("tenant_compiled.rs"); - std::fs::remove_file(&out).ok(); // idempotency + std::fs::remove_file(&out).ok(); Dsl::write(src, out.to_str().unwrap()).expect("write failed"); let content = std::fs::read_to_string(&out).expect("output not written"); assert!(content.contains("pub static PATHS:")); - assert!(content.contains("pub static LEAVES:")); - assert!(content.contains("pub static INTERNING:")); - assert!(content.contains("// @generated")); - // output intentionally left for inspection } } diff --git a/src/index.rs b/src/index.rs index b3a0021..e06c09e 100644 --- a/src/index.rs +++ b/src/index.rs @@ -117,15 +117,16 @@ impl Index { /// Decode _load or _store from `leaves` at `leaf_offset`. /// - /// Leaf layout (u32le each): - /// +0 keyword_idx - /// +4 value_idx - /// +8 load_client_idx - /// +12 load_key_idx - /// +16 store_client_idx - /// +20 store_key_idx - /// +24 load.args × load_args_count : key_idx | value_idx - /// +24+N store.args × store_args_count : key_idx | value_idx + /// Leaf layout (Architecture.md #データ構造仕様 参照): + /// +0 keyword_idx (u32) + /// +4 value_token_count (u32) + /// +8 token[i]: type(u8) + idx(u32) × value_token_count + /// +8+N load_client_idx (u32) N = token_count × 5 + /// +8+N+4 load_key_idx (u32) + /// +8+N+8 store_client_idx (u32) + /// +8+N+12 store_key_idx (u32) + /// +8+N+16 load.args × load_count : key_idx(u32) | value_idx(u32) + /// +8+N+16+M store.args × store_count : key_idx(u32) | value_idx(u32) /// /// args counts are in path.count: [7:4]=load, [3:0]=store fn decode_meta(&self, path_idx: u32, leaf_offset: u32, kind: MetaKind) -> (&str, BTreeMap) { @@ -139,13 +140,17 @@ impl Index { let load_count = ((count_byte >> 4) & 0xf) as usize; let store_count = (count_byte & 0xf) as usize; + // skip keyword(4) + value_token_count(4) + tokens(token_count × 5) + let token_count = self.read_u32(base + 4) as usize; + let meta_base = base + 8 + token_count * 5; + let (client_offset, key_offset, args_count, args_start) = match kind { - MetaKind::Load => (8, 12, load_count, 24), - MetaKind::Store => (16, 20, store_count, 24 + load_count * 8), + MetaKind::Load => (0, 4, load_count, 16), + MetaKind::Store => (8, 12, store_count, 16 + load_count * 8), }; - let client_idx = self.read_u32(base + client_offset) as usize; - let key_idx = self.read_u32(base + key_offset) as usize; + let client_idx = self.read_u32(meta_base + client_offset) as usize; + let key_idx = self.read_u32(meta_base + key_offset) as usize; let client_name = from_utf8(self.interning_str(client_idx)).unwrap_or(""); if client_name.is_empty() { @@ -165,7 +170,7 @@ impl Index { // additional args for i in 0..args_count { - let off = base + args_start + i * 8; + let off = meta_base + args_start + i * 8; let ak = self.read_u32(off) as usize; let av = self.read_u32(off + 4) as usize; let k = from_utf8(self.interning_str(ak)).unwrap_or(""); @@ -196,3 +201,169 @@ impl Index { self.interning.get(offset..offset + len).unwrap_or(b"") } } + +// ── tests ───────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec; + use crate::dsl::Dsl; + + fn scalar(s: &str) -> Tree { Tree::Scalar(s.as_bytes().to_vec()) } + fn mapping(pairs: Vec<(&str, Tree)>) -> Tree { + Tree::Mapping(pairs.into_iter().map(|(k, v)| (k.as_bytes().to_vec(), v)).collect()) + } + + fn make_index(tree: &Tree) -> Index { + let (paths, children, leaves, interning, interning_idx) = Dsl::compile(tree); + Index::new(paths, children, leaves, interning, interning_idx) + } + + // --- traverse --- + + #[test] + fn traverse_leaf_path() { + let idx = make_index(&mapping(vec![ + ("session", mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])), + ])); + let leaves = idx.traverse("session.user.id"); + assert_eq!(leaves.len(), 1); + } + + #[test] + fn traverse_intermediate_collects_all_leaves() { + let idx = make_index(&mapping(vec![ + ("session", mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ("name", Tree::Null), + ])), + ])), + ])); + let leaves = idx.traverse("session.user"); + assert_eq!(leaves.len(), 2); + } + + #[test] + fn traverse_nonexistent_returns_empty() { + let idx = make_index(&mapping(vec![ + ("session", mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])), + ])); + let leaves = idx.traverse("session.user.missing"); + assert!(leaves.is_empty()); + } + + // --- keyword_of --- + + #[test] + fn keyword_of_leaf() { + let idx = make_index(&mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])); + // root(0), user(1), id(2) + assert_eq!(idx.keyword_of(2), b"id"); + } + + #[test] + fn keyword_of_intermediate() { + let idx = make_index(&mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])); + assert_eq!(idx.keyword_of(1), b"user"); + } + + // --- load_args --- + + #[test] + fn load_args_client_name() { + let idx = make_index(&mapping(vec![ + ("session", mapping(vec![ + ("_load", mapping(vec![ + ("client", scalar("Memory")), + ("key", scalar("session:1")), + ])), + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])), + ])); + let leaves = idx.traverse("session.user.id"); + let (client, _) = idx.load_args(&leaves[0]); + assert_eq!(client, "Memory"); + } + + #[test] + fn load_args_key() { + let idx = make_index(&mapping(vec![ + ("session", mapping(vec![ + ("_load", mapping(vec![ + ("client", scalar("Memory")), + ("key", scalar("session:1")), + ])), + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])), + ])); + let leaves = idx.traverse("session.user.id"); + let (_, args) = idx.load_args(&leaves[0]); + assert_eq!(args.get("key"), Some(&Tree::Scalar(b"session:1".to_vec()))); + } + + #[test] + fn load_args_no_load_returns_empty() { + let idx = make_index(&mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])); + let leaves = idx.traverse("user.id"); + let (client, args) = idx.load_args(&leaves[0]); + assert!(client.is_empty() && args.is_empty()); + } + + // --- store_args --- + + #[test] + fn store_args_client_name() { + let idx = make_index(&mapping(vec![ + ("session", mapping(vec![ + ("_store", mapping(vec![ + ("client", scalar("Kvs")), + ("key", scalar("session:1")), + ])), + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])), + ])); + let leaves = idx.traverse("session.user.id"); + let (client, _) = idx.store_args(&leaves[0]); + assert_eq!(client, "Kvs"); + } + + #[test] + fn store_args_no_store_returns_empty() { + let idx = make_index(&mapping(vec![ + ("user", mapping(vec![ + ("id", Tree::Null), + ])), + ])); + let leaves = idx.traverse("user.id"); + let (client, args) = idx.store_args(&leaves[0]); + assert!(client.is_empty() && args.is_empty()); + } +} From d74ff99136891645a9c3127a3668120566513918 Mon Sep 17 00:00:00 2001 From: Andyou Date: Wed, 8 Apr 2026 16:46:27 +0900 Subject: [PATCH 40/41] del unused --- docs/Architecture.md | 2 +- src/unused/codec.rs | 102 ------ src/unused/fixed_bits.rs | 170 ---------- src/unused/manifest.rs | 668 --------------------------------------- src/unused/mod.rs | 6 - src/unused/parser.rs | 593 ---------------------------------- src/unused/pool.rs | 64 ---- src/unused/state.rs | 659 -------------------------------------- 8 files changed, 1 insertion(+), 2263 deletions(-) delete mode 100644 src/unused/codec.rs delete mode 100644 src/unused/fixed_bits.rs delete mode 100644 src/unused/manifest.rs delete mode 100644 src/unused/mod.rs delete mode 100644 src/unused/parser.rs delete mode 100644 src/unused/pool.rs delete mode 100644 src/unused/state.rs diff --git a/docs/Architecture.md b/docs/Architecture.md index ec49a29..23ef97f 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -72,7 +72,7 @@ | Context | `cache_get` | `(&self, path_idx: u32) -> Option<&Tree>` | インスタンスキャッシュからpath_idxの値を返す | context.rs | | | `cache_set` | `(&mut self, path_idx: u32, value: Tree)` | インスタンスキャッシュにpath_idxの値を書き込む(上書き) | context.rs | | | `cache_remove` | `(&mut self, path_idx: u32)` | インスタンスキャッシュのpath_idxエントリをNullで無効化 | context.rs | -| | `guard_recursion` | `(&self, path_idx: u32) -> Result<(), ContextError>` | called_keysの重複・上限超過を検出しエラーを返す | context.rs | +| | `guard_recursion` | `(&self, path_idx: u32) -> Result<(), ContextError>` | called_pathsの重複・上限超過を検出しエラーを返す | context.rs | | | `resolve_leaf` | `(&mut self, path_idx: u32, leaf_offset: u32) -> Result, ContextError>` | cache→_store→_loadの順で値を解決しwrite-throughする | context.rs | ## 用語 diff --git a/src/unused/codec.rs b/src/unused/codec.rs deleted file mode 100644 index f255479..0000000 --- a/src/unused/codec.rs +++ /dev/null @@ -1,102 +0,0 @@ -use super::fixed_bits; - -pub const ROOT_NAMES: &[(&[u8], u64)] = &[ - (b"_load", fixed_bits::ROOT_LOAD), - (b"_store", fixed_bits::ROOT_STORE), - (b"_state", fixed_bits::ROOT_STATE), -]; - -pub fn root_encode(s: &[u8]) -> u64 { - ROOT_NAMES.iter() - .find(|(name, _)| *name == s) - .map(|(_, v)| *v) - .unwrap_or(fixed_bits::ROOT_NULL) -} - -pub const CLIENT_NAMES: &[(&[u8], u64)] = &[ - (b"State", fixed_bits::CLIENT_STATE), - (b"InMemory", fixed_bits::CLIENT_IN_MEMORY), - (b"Env", fixed_bits::CLIENT_ENV), - (b"KVS", fixed_bits::CLIENT_KVS), - (b"Db", fixed_bits::CLIENT_DB), - (b"HTTP", fixed_bits::CLIENT_HTTP), - (b"File", fixed_bits::CLIENT_FILE), -]; - -pub fn client_encode(s: &[u8]) -> u64 { - CLIENT_NAMES.iter() - .find(|(name, _)| *name == s) - .map(|(_, v)| *v) - .unwrap_or(fixed_bits::CLIENT_NULL) -} - -pub const PROP_NAMES: &[(&[u8], u64)] = &[ - (b"type", fixed_bits::PROP_TYPE), - (b"key", fixed_bits::PROP_KEY), - (b"connection", fixed_bits::PROP_CONNECTION), - (b"map", fixed_bits::PROP_MAP), - (b"ttl", fixed_bits::PROP_TTL), - (b"table", fixed_bits::PROP_TABLE), - (b"where", fixed_bits::PROP_WHERE), - (b"url", fixed_bits::PROP_URL), - (b"headers", fixed_bits::PROP_HEADERS), -]; - -pub fn prop_encode(s: &[u8]) -> u64 { - PROP_NAMES.iter() - .find(|(name, _)| *name == s) - .map(|(_, v)| *v) - .unwrap_or(fixed_bits::PROP_NULL) -} - -pub const TYPE_NAMES: &[(&[u8], u64)] = &[ - (b"integer", fixed_bits::TYPE_I64), - (b"string", fixed_bits::TYPE_UTF8), - (b"float", fixed_bits::TYPE_F64), - (b"boolean", fixed_bits::TYPE_BOOLEAN), - (b"datetime", fixed_bits::TYPE_DATETIME), -]; - -pub fn type_encode(s: &[u8]) -> u64 { - TYPE_NAMES.iter() - .find(|(name, _)| *name == s) - .map(|(_, v)| *v) - .unwrap_or(fixed_bits::TYPE_NULL) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_client_encode() { - for &(name, val) in CLIENT_NAMES { - assert_eq!(client_encode(name), val); - } - assert_eq!(client_encode(b"Unknown"), fixed_bits::CLIENT_NULL); - } - - #[test] - fn test_root_encode() { - for &(name, val) in ROOT_NAMES { - assert_eq!(root_encode(name), val); - } - assert_eq!(root_encode(b"_unknown"), fixed_bits::ROOT_NULL); - } - - #[test] - fn test_prop_encode() { - for &(name, val) in PROP_NAMES { - assert_eq!(prop_encode(name), val); - } - assert_eq!(prop_encode(b"unknown"), fixed_bits::PROP_NULL); - } - - #[test] - fn test_type_encode() { - for &(name, val) in TYPE_NAMES { - assert_eq!(type_encode(name), val); - } - assert_eq!(type_encode(b"unknown"), fixed_bits::TYPE_NULL); - } -} diff --git a/src/unused/fixed_bits.rs b/src/unused/fixed_bits.rs deleted file mode 100644 index 4ef3bf4..0000000 --- a/src/unused/fixed_bits.rs +++ /dev/null @@ -1,170 +0,0 @@ -// - keyword: a string used as a key in DSL -// - key: fixed bits of flags, reserved value or keyword index -// - path: keyword indices -// - value: indices composed of -// - arg -// -// note: -// - Value 0 means null in each field. - -// key record (64 bits) -// note: -// - is_path: is child keys of "_load.map" or has "{}" (placeholder) -// - has_children: has multiple child keys -// - is_leaf: has value(= has no child keys) -// -// | category | field | bits | offset | -// |-------------|---------------|------|--------| -// | flags | is_path | 1 | 63 | -// | flags | has_children | 1 | 62 | -// | flags | is_leaf | 1 | 61 | -// | key index | root index | 2 | 59 | -// | key index | client index | 4 | 55 | -// | key index | prop index | 4 | 51 | -// | key index | type index | 5 | 46 | -// | key index | dynamic index | 16 | 30 | -// | child index | child index | 16 | 14 | -// | padding | - | 14 | 0 | - -// value record (128 bits) -// note: -// - is_template: contains both static and placeholder (handled as string concat) -// - is_path: is a segment of placeholder (Value parser removes "{}") -// -// | category | field | bits | offset | -// |----------|---------------|------|--------| -// | flags | is_template | 1 | 63 | -// | token[0] | is_path | 1 | 62 | -// | token[0] | dynamic index | 16 | 46 | -// | token[1] | is_path | 1 | 45 | -// | token[1] | dynamic index | 16 | 29 | -// | token[2] | is_path | 1 | 28 | -// | token[2] | dynamic index | 16 | 12 | -// | padding | - | 12 | 0 | -// | token[3] | is_path | 1 | 63 | -// | token[3] | dynamic index | 16 | 47 | -// | token[4] | is_path | 1 | 46 | -// | token[4] | dynamic index | 16 | 30 | -// | token[5] | is_path | 1 | 29 | -// | token[5] | dynamic index | 16 | 13 | -// | padding | - | 13 | 0 | - -// --- key record offsets --- - -pub const K_OFFSET_IS_PATH: u32 = 63; -pub const K_OFFSET_HAS_CHILDREN: u32 = 62; -pub const K_OFFSET_IS_LEAF: u32 = 61; -pub const K_OFFSET_ROOT: u32 = 59; -pub const K_OFFSET_CLIENT: u32 = 55; -pub const K_OFFSET_PROP: u32 = 51; -pub const K_OFFSET_TYPE: u32 = 46; -pub const K_OFFSET_DYNAMIC: u32 = 30; -pub const K_OFFSET_CHILD: u32 = 14; - -// --- key record masks --- - -pub const K_MASK_IS_PATH: u64 = 0x1; -pub const K_MASK_HAS_CHILDREN: u64 = 0x1; -pub const K_MASK_IS_LEAF: u64 = 0x1; -pub const K_MASK_ROOT: u64 = 0x3; -pub const K_MASK_CLIENT: u64 = 0xF; -pub const K_MASK_PROP: u64 = 0xF; -pub const K_MASK_TYPE: u64 = 0x1F; -pub const K_MASK_DYNAMIC: u64 = 0xFFFF; -pub const K_MASK_CHILD: u64 = 0xFFFF; - -// --- value record offsets --- - -pub const V_OFFSET_IS_TEMPLATE: u32 = 63; -pub const V_OFFSET_T0_IS_PATH: u32 = 62; -pub const V_OFFSET_T0_DYNAMIC: u32 = 46; -pub const V_OFFSET_T1_IS_PATH: u32 = 45; -pub const V_OFFSET_T1_DYNAMIC: u32 = 29; -pub const V_OFFSET_T2_IS_PATH: u32 = 28; -pub const V_OFFSET_T2_DYNAMIC: u32 = 12; - -pub const V_OFFSET_T3_IS_PATH: u32 = 63; -pub const V_OFFSET_T3_DYNAMIC: u32 = 47; -pub const V_OFFSET_T4_IS_PATH: u32 = 46; -pub const V_OFFSET_T4_DYNAMIC: u32 = 30; -pub const V_OFFSET_T5_IS_PATH: u32 = 29; -pub const V_OFFSET_T5_DYNAMIC: u32 = 13; - -// --- value record masks --- - -pub const V_MASK_IS_TEMPLATE: u64 = 0x1; -pub const V_MASK_IS_PATH: u64 = 0x1; -pub const V_MASK_DYNAMIC: u64 = 0xFFFF; - -// --- static --- - -pub const ROOT_NULL: u64 = 0b00; // means field key -pub const ROOT_LOAD: u64 = 0b01; -pub const ROOT_STORE: u64 = 0b10; -pub const ROOT_STATE: u64 = 0b11; - -pub const CLIENT_NULL: u64 = 0b0000; -pub const CLIENT_STATE: u64 = 0b0001; -pub const CLIENT_IN_MEMORY: u64 = 0b0010; -pub const CLIENT_ENV: u64 = 0b0011; -pub const CLIENT_KVS: u64 = 0b0100; -pub const CLIENT_DB: u64 = 0b0101; -pub const CLIENT_HTTP: u64 = 0b0110; -pub const CLIENT_FILE: u64 = 0b0111; - -pub const PROP_NULL: u64 = 0b0000; -pub const PROP_TYPE: u64 = 0b0001; -pub const PROP_KEY: u64 = 0b0010; -pub const PROP_CONNECTION: u64 = 0b0011; -pub const PROP_MAP: u64 = 0b0100; -pub const PROP_TTL: u64 = 0b0101; -pub const PROP_TABLE: u64 = 0b0110; -pub const PROP_WHERE: u64 = 0b0111; -pub const PROP_URL: u64 = 0b1000; -pub const PROP_HEADERS: u64 = 0b1001; - -pub const TYPE_NULL: u64 = 0b00000; -pub const TYPE_I32: u64 = 0b00100; -pub const TYPE_I64: u64 = 0b00101; // "integer" -pub const TYPE_U32: u64 = 0b00110; -pub const TYPE_U64: u64 = 0b00111; -pub const TYPE_UTF8: u64 = 0b01000; // "string" -pub const TYPE_ASCII: u64 = 0b01001; -pub const TYPE_DATETIME: u64 = 0b01010; -pub const TYPE_F32: u64 = 0b01100; -pub const TYPE_F64: u64 = 0b01101; // "float" -pub const TYPE_BOOLEAN: u64 = 0b11100; - -pub fn new() -> u64 { - 0 -} - -pub fn get(ko: u64, offset: u32, mask: u64) -> u64 { - (ko >> offset) & mask -} - -pub fn set(ko: u64, offset: u32, mask: u64, value: u64) -> u64 { - (ko & !(mask << offset)) | ((value & mask) << offset) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_set_get_independent_fields() { - let ko = new(); - let ko = set(ko, K_OFFSET_ROOT, K_MASK_ROOT, 0b01); - assert_eq!(get(ko, K_OFFSET_ROOT, K_MASK_ROOT), 0b01); - - let ko = set(ko, K_OFFSET_CLIENT, K_MASK_CLIENT, 0b0101); - assert_eq!(get(ko, K_OFFSET_CLIENT, K_MASK_CLIENT), 0b0101); - assert_eq!(get(ko, K_OFFSET_ROOT, K_MASK_ROOT), 0b01); - } - - #[test] - fn test_set_clamps_to_field_width() { - let ko = set(new(), K_OFFSET_ROOT, K_MASK_ROOT, 0xFF); - assert_eq!(get(ko, K_OFFSET_ROOT, K_MASK_ROOT), K_MASK_ROOT); - } -} diff --git a/src/unused/manifest.rs b/src/unused/manifest.rs deleted file mode 100644 index 65703eb..0000000 --- a/src/unused/manifest.rs +++ /dev/null @@ -1,668 +0,0 @@ -extern crate alloc; -use alloc::string::String; -use alloc::vec::Vec; -use alloc::collections::BTreeMap; - -use super::codec; -use super::fixed_bits; -use super::pool::DynamicPool; -use super::parser::ParsedManifest; - -/// A single token in a template value. -#[derive(Debug)] -pub enum TemplateToken { - /// Literal byte sequence. - Literal(Vec), - /// A path placeholder — intern-index sequence (file segment + field segments). - Path(Vec), -} - -/// A resolved or unresolved config value produced by `build_config`. -/// State layer is responsible for resolving `Path` and `Template` variants. -#[derive(Debug)] -pub enum ConfigValue { - /// Static string value (no placeholder resolution needed). - Str(String), - /// A single path reference — intern-index sequence to resolve via State. - Path(Vec), - /// A mixed literal+path template — tokens to resolve and concatenate. - Template(Vec), - /// A map of (yaml_key → db_column) pairs. - Map(Vec<(String, String)>), - /// Numeric client id. - Client(u64), -} - -/// Owns all parsed manifest data and provides decode queries. -/// Pure logic — no I/O, no std, no serde_json. -pub struct Manifest { - pub files: BTreeMap, - pub dynamic: DynamicPool, - pub keys: Vec, - pub values: Vec<[u64; 2]>, - pub path_map: Vec>, - pub children_map: Vec>, -} - -impl Manifest { - pub fn new() -> Self { - Self { - files: BTreeMap::new(), - dynamic: DynamicPool::new(), - keys: alloc::vec![0], - values: alloc::vec![[0, 0]], - path_map: alloc::vec![alloc::vec![]], - children_map: alloc::vec![alloc::vec![]], - } - } - - pub fn is_loaded(&self, file: &str) -> bool { - self.files.contains_key(file) - } - - pub fn insert(&mut self, file: String, pm: ParsedManifest) { - self.files.insert(file, pm); - } - - /// Returns the direct field-key and meta-key children indices of a record. - pub fn children_of(&self, record: u64) -> Vec { - let child_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD) as usize; - if child_idx == 0 { - return alloc::vec![]; - } - let has_children = fixed_bits::get(record, fixed_bits::K_OFFSET_HAS_CHILDREN, fixed_bits::K_MASK_HAS_CHILDREN); - if has_children == 1 { - self.children_map.get(child_idx) - .map(|s| s.to_vec()) - .unwrap_or_default() - } else { - alloc::vec![child_idx as u16] - } - } - - /// Looks up a key record index by dot-separated path within a file. - pub fn find(&self, file: &str, path: &str) -> Option { - let file_idx = self.files.get(file)?.file_key_idx; - let file_record = self.keys.get(file_idx as usize).copied()?; - - if path.is_empty() { - return Some(file_idx); - } - - let segments: Vec<&str> = path.split('.').collect(); - let top_level = self.children_of(file_record); - self.find_in(&segments, &top_level) - } - - fn find_in(&self, segments: &[&str], candidates: &[u16]) -> Option { - let target = segments[0].as_bytes(); - let rest = &segments[1..]; - - for &idx in candidates { - let record = self.keys.get(idx as usize).copied()?; - if fixed_bits::get(record, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT) != fixed_bits::ROOT_NULL { - continue; - } - let dyn_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; - if self.dynamic.get(dyn_idx)? != target { - continue; - } - if rest.is_empty() { - return Some(idx); - } - let next = self.children_of(record); - if next.is_empty() { - return None; - } - return self.find_in(rest, &next); - } - None - } - - /// Returns meta record indices (_load/_store/_state) for a dot-path node. - /// Collects from root to node; child overrides parent. - pub fn get_meta(&self, file: &str, path: &str) -> MetaIndices { - let file_idx = match self.files.get(file) { - Some(pm) => pm.file_key_idx, - None => return MetaIndices::default(), - }; - let file_record = match self.keys.get(file_idx as usize).copied() { - Some(r) => r, - None => return MetaIndices::default(), - }; - - let segments: Vec<&str> = if path.is_empty() { alloc::vec![] } else { path.split('.').collect() }; - let mut meta = MetaIndices::default(); - self.collect_meta(file_record, file_idx, &mut meta); - - let mut candidates = self.children_of(file_record); - for segment in &segments { - let mut found_idx = None; - for &idx in &candidates { - let record = match self.keys.get(idx as usize).copied() { - Some(r) => r, - None => continue, - }; - if fixed_bits::get(record, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT) != fixed_bits::ROOT_NULL { - continue; - } - let dyn_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; - if self.dynamic.get(dyn_idx) == Some(segment.as_bytes()) { - self.collect_meta(record, idx, &mut meta); - found_idx = Some(idx); - break; - } - } - match found_idx { - Some(idx) => { - let record = self.keys[idx as usize]; - candidates = self.children_of(record); - } - None => return MetaIndices::default(), - } - } - meta - } - - fn collect_meta(&self, record: u64, node_idx: u16, meta: &mut MetaIndices) { - for &idx in &self.children_of(record) { - let child = match self.keys.get(idx as usize).copied() { - Some(r) => r, - None => continue, - }; - let root = fixed_bits::get(child, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT); - match root { - fixed_bits::ROOT_LOAD => { meta.load = Some(idx); meta.load_owner = node_idx; } - fixed_bits::ROOT_STORE => { meta.store = Some(idx); meta.store_owner = node_idx; } - fixed_bits::ROOT_STATE => { meta.state = Some(idx); } - _ => {} - } - } - } - - /// Returns the client id encoded in a meta record (e.g. _load or _store). - pub fn get_client(&self, meta_idx: u16) -> u64 { - let record = match self.keys.get(meta_idx as usize).copied() { - Some(r) => r, - None => return fixed_bits::CLIENT_NULL, - }; - // client is stored directly on the meta record's children - for &child_idx in &self.children_of(record) { - let child = match self.keys.get(child_idx as usize).copied() { - Some(r) => r, - None => continue, - }; - let client = fixed_bits::get(child, fixed_bits::K_OFFSET_CLIENT, fixed_bits::K_MASK_CLIENT); - if client != fixed_bits::CLIENT_NULL { - return client; - } - } - fixed_bits::CLIENT_NULL - } - - /// Decodes a meta record into a list of (prop_name, ConfigValue) pairs. - /// The caller (State) is responsible for resolving any `ConfigValue::Placeholder` entries. - pub fn build_config(&self, meta_idx: u16) -> Option> { - let record = self.keys.get(meta_idx as usize).copied()?; - let child_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD) as usize; - if child_idx == 0 { - return None; - } - let children = if fixed_bits::get(record, fixed_bits::K_OFFSET_HAS_CHILDREN, fixed_bits::K_MASK_HAS_CHILDREN) == 1 { - self.children_map.get(child_idx)?.to_vec() - } else { - alloc::vec![child_idx as u16] - }; - - let mut entries: Vec<(String, ConfigValue)> = alloc::vec![]; - - for &child_idx in &children { - let child_record = match self.keys.get(child_idx as usize).copied() { - Some(r) => r, - None => continue, - }; - let prop = fixed_bits::get(child_record, fixed_bits::K_OFFSET_PROP, fixed_bits::K_MASK_PROP) as u8; - let client = fixed_bits::get(child_record, fixed_bits::K_OFFSET_CLIENT, fixed_bits::K_MASK_CLIENT); - let is_leaf = fixed_bits::get(child_record, fixed_bits::K_OFFSET_IS_LEAF, fixed_bits::K_MASK_IS_LEAF) == 1; - let value_idx = if is_leaf { - fixed_bits::get(child_record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD) as u16 - } else { 0 }; - - if client != fixed_bits::CLIENT_NULL { - entries.push(("client".into(), ConfigValue::Client(client))); - continue; - } - - if prop as u64 == fixed_bits::PROP_NULL { continue; } - - if prop as u64 == fixed_bits::PROP_MAP { - if let Some(pairs) = self.decode_map(child_idx) { - entries.push(("map".into(), ConfigValue::Map(pairs))); - } - } else if prop as u64 == fixed_bits::PROP_CONNECTION { - if value_idx != 0 { - if let Some(cv) = self.decode_value(value_idx) { - entries.push(("connection".into(), cv)); - } - } - } else if value_idx != 0 { - if let Some(cv) = self.decode_value(value_idx) { - if let Some((name_bytes, _)) = codec::PROP_NAMES.iter().find(|(_, v)| *v == prop as u64) { - let name = String::from_utf8_lossy(name_bytes).into_owned(); - entries.push((name, cv)); - } - } - } - } - - Some(entries) - } - - /// Decodes a map prop's children into (yaml_key, db_column) pairs. - pub fn decode_map(&self, map_idx: u16) -> Option> { - let record = self.keys.get(map_idx as usize).copied()?; - let child_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD) as usize; - if child_idx == 0 { - return Some(alloc::vec![]); - } - let children = if fixed_bits::get(record, fixed_bits::K_OFFSET_HAS_CHILDREN, fixed_bits::K_MASK_HAS_CHILDREN) == 1 { - self.children_map.get(child_idx)?.to_vec() - } else { - alloc::vec![child_idx as u16] - }; - - let mut pairs = alloc::vec![]; - for &c in &children { - let child = self.keys.get(c as usize).copied()?; - let dyn_idx = fixed_bits::get(child, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; - let value_idx = fixed_bits::get(child, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD) as usize; - let is_path = fixed_bits::get(child, fixed_bits::K_OFFSET_IS_PATH, fixed_bits::K_MASK_IS_PATH) == 1; - - let key_str = if is_path { - let segs = self.path_map.get(dyn_idx as usize)?; - let parts: Vec<&str> = segs.iter() - .filter_map(|&s| self.dynamic.get(s).and_then(|b| core::str::from_utf8(b).ok())) - .collect(); - parts.join(".") - } else { - let b = self.dynamic.get(dyn_idx)?; - String::from_utf8_lossy(b).into_owned() - }; - - let val_vo = self.values.get(value_idx).copied()?; - let col_dyn = fixed_bits::get(val_vo[0], fixed_bits::V_OFFSET_T0_DYNAMIC, fixed_bits::V_MASK_DYNAMIC) as u16; - let col_b = self.dynamic.get(col_dyn)?; - let col_str = String::from_utf8_lossy(col_b).into_owned(); - - pairs.push((key_str, col_str)); - } - Some(pairs) - } - - /// Decodes a value record into a ConfigValue. - /// - Single path token (non-template) → `Path(Vec)` (intern-index segments) - /// - Mixed literal+path (template) → `Template(Vec)` - /// - Pure literal (no paths) → `Str(String)` - pub fn decode_value(&self, value_idx: u16) -> Option { - const TOKEN_OFFSETS: [(u32, u32); 6] = [ - (fixed_bits::V_OFFSET_T0_IS_PATH, fixed_bits::V_OFFSET_T0_DYNAMIC), - (fixed_bits::V_OFFSET_T1_IS_PATH, fixed_bits::V_OFFSET_T1_DYNAMIC), - (fixed_bits::V_OFFSET_T2_IS_PATH, fixed_bits::V_OFFSET_T2_DYNAMIC), - (fixed_bits::V_OFFSET_T3_IS_PATH, fixed_bits::V_OFFSET_T3_DYNAMIC), - (fixed_bits::V_OFFSET_T4_IS_PATH, fixed_bits::V_OFFSET_T4_DYNAMIC), - (fixed_bits::V_OFFSET_T5_IS_PATH, fixed_bits::V_OFFSET_T5_DYNAMIC), - ]; - - let vo = self.values.get(value_idx as usize).copied()?; - let is_template = fixed_bits::get(vo[0], fixed_bits::V_OFFSET_IS_TEMPLATE, fixed_bits::V_MASK_IS_TEMPLATE) == 1; - let is_path0 = fixed_bits::get(vo[0], fixed_bits::V_OFFSET_T0_IS_PATH, fixed_bits::V_MASK_IS_PATH) == 1; - let dyn_idx0 = fixed_bits::get(vo[0], fixed_bits::V_OFFSET_T0_DYNAMIC, fixed_bits::V_MASK_DYNAMIC) as u16; - - // single pure path reference (non-template) → Path - if is_path0 && dyn_idx0 != 0 && !is_template { - let segs = self.path_map.get(dyn_idx0 as usize)?.clone(); - return Some(ConfigValue::Path(segs)); - } - - // template: collect tokens as TemplateToken list - if is_template { - let mut tokens = Vec::new(); - for (i, (off_is_path, off_dynamic)) in TOKEN_OFFSETS.iter().enumerate() { - let word = if i < 3 { 0 } else { 1 }; - let is_path = fixed_bits::get(vo[word], *off_is_path, fixed_bits::V_MASK_IS_PATH) == 1; - let dyn_idx = fixed_bits::get(vo[word], *off_dynamic, fixed_bits::V_MASK_DYNAMIC) as u16; - if dyn_idx == 0 { break; } - if is_path { - let segs = self.path_map.get(dyn_idx as usize)?.clone(); - tokens.push(TemplateToken::Path(segs)); - } else { - let b = self.dynamic.get(dyn_idx)?.to_vec(); - tokens.push(TemplateToken::Literal(b)); - } - } - return Some(ConfigValue::Template(tokens)); - } - - // pure literal - let b = self.dynamic.get(dyn_idx0)?.to_vec(); - Some(ConfigValue::Str(String::from_utf8_lossy(&b).into_owned())) - } - - /// Reconstructs a dot-joined key string from intern-index segments. - /// Returns an error string if any segment index is invalid. - pub fn segs_to_key(&self, segs: &[u16]) -> Result { - let mut parts = Vec::with_capacity(segs.len()); - for &s in segs { - let b = self.dynamic.get(s) - .ok_or_else(|| crate::ports::provided::StateError::KeyNotFound(format!("invalid segment index {}", s)))?; - parts.push(String::from_utf8_lossy(b).into_owned()); - } - Ok(parts.join(".")) - } - - /// Finds a field-key record by intern-index segment list. - /// `segs[0]` must be the file name segment, `segs[1..]` are the field path. - pub fn find_by_segs(&self, segs: &[u16]) -> Option { - if segs.is_empty() { return None; } - let file_name = self.dynamic.get(segs[0])?; - let file_str = core::str::from_utf8(file_name).ok()?; - let file_idx = self.files.get(file_str)?.file_key_idx; - if segs.len() == 1 { - return Some(file_idx); - } - let file_record = self.keys.get(file_idx as usize).copied()?; - let top_level = self.children_of(file_record); - self.find_in_by_segs(&segs[1..], &top_level) - } - - fn find_in_by_segs(&self, segs: &[u16], candidates: &[u16]) -> Option { - let target_idx = segs[0]; - let rest = &segs[1..]; - for &idx in candidates { - let record = self.keys.get(idx as usize).copied()?; - if fixed_bits::get(record, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT) != fixed_bits::ROOT_NULL { - continue; - } - let dyn_idx = fixed_bits::get(record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; - if dyn_idx != target_idx { - continue; - } - if rest.is_empty() { - return Some(idx); - } - let next = self.children_of(record); - if next.is_empty() { - return None; - } - return self.find_in_by_segs(rest, &next); - } - None - } -} - -impl Default for Manifest { - fn default() -> Self { - Self::new() - } -} - -/// Indices of meta records for a given node, collected from root to node (child overrides parent). -/// `load_owner` / `store_owner` are the key_idx of the node that directly defines `_load` / `_store`. -#[derive(Debug, Default)] -pub struct MetaIndices { - pub load: Option, - pub load_owner: u16, - pub store: Option, - pub store_owner: u16, - pub state: Option, -} - -#[cfg(test)] -mod tests { - use super::*; - use super::super::parser::{Value, parse}; - - /// Builds a Manifest from a inline DSL mapping. - /// `entries` is the top-level key→subtree mapping for a single file. - fn make(filename: &str, entries: Vec<(&str, Value)>) -> Manifest { - let mut m = Manifest::new(); - let root = Value::Mapping(entries.into_iter().map(|(k, v)| (k.as_bytes().to_vec(), v)).collect()); - let pm = parse(filename, root, &mut m.dynamic, &mut m.keys, &mut m.values, &mut m.path_map, &mut m.children_map).unwrap(); - m.insert(filename.to_string(), pm); - m - } - - fn scalar(s: &str) -> Value { Value::Scalar(s.as_bytes().to_vec()) } - fn mapping(entries: Vec<(&str, Value)>) -> Value { - Value::Mapping(entries.into_iter().map(|(k, v)| (k.as_bytes().to_vec(), v)).collect()) - } - - fn cache_manifest() -> Manifest { - make("cache", vec![ - ("user", mapping(vec![ - ("_store", mapping(vec![ - ("client", scalar("KVS")), - ("key", scalar("user:${session.sso_user_id}")), - ])), - ("_load", mapping(vec![ - ("client", scalar("Db")), - ("connection", scalar("${connection.tenant}")), - ("table", scalar("users")), - ("map", mapping(vec![ - ("id", scalar("id")), - ("org_id", scalar("sso_org_id")), - ])), - ])), - ("id", mapping(vec![ - ("_state", mapping(vec![("type", scalar("integer"))])), - ])), - ("tenant_id", mapping(vec![ - ("_state", mapping(vec![("type", scalar("integer"))])), - ("_load", mapping(vec![ - ("client", scalar("State")), - ("key", scalar("${org_id}")), - ])), - ])), - ])), - ]) - } - - // --- find --- - - #[test] - fn test_find_file_not_loaded_returns_none() { - let m = Manifest::new(); - assert!(m.find("cache", "user").is_none()); - } - - #[test] - fn test_find_top_level() { - let m = cache_manifest(); - assert!(m.find("cache", "user").is_some()); - } - - #[test] - fn test_find_nested() { - let m = cache_manifest(); - assert!(m.find("cache", "user.id").is_some()); - } - - #[test] - fn test_find_unknown_returns_none() { - let m = cache_manifest(); - assert!(m.find("cache", "nonexistent").is_none()); - } - - #[test] - fn test_find_unknown_nested_returns_none() { - let m = cache_manifest(); - assert!(m.find("cache", "user.nonexistent").is_none()); - } - - #[test] - fn test_find_unique_indices_across_files() { - let mut m = cache_manifest(); - let root2 = Value::Mapping(vec![ - (b"common".to_vec(), Value::Mapping(vec![ - (b"_store".to_vec(), Value::Mapping(vec![ - (b"client".to_vec(), Value::Scalar(b"InMemory".to_vec())), - ])), - ])), - ]); - let pm2 = parse("connection", root2, &mut m.dynamic, &mut m.keys, &mut m.values, &mut m.path_map, &mut m.children_map).unwrap(); - m.insert("connection".to_string(), pm2); - - let cache_idx = m.find("cache", "user").unwrap(); - let conn_idx = m.find("connection", "common").unwrap(); - assert_ne!(cache_idx, conn_idx); - } - - // --- get_meta --- - - #[test] - fn test_get_meta_has_load_and_store() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - assert!(meta.load.is_some()); - assert!(meta.store.is_some()); - } - - #[test] - fn test_get_meta_leaf_has_state() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user.id"); - assert!(meta.state.is_some()); - } - - #[test] - fn test_get_meta_child_inherits_parent_store() { - let m = cache_manifest(); - let parent = m.get_meta("cache", "user"); - let child = m.get_meta("cache", "user.id"); - assert!(child.store.is_some()); - assert_eq!(child.store, parent.store); - } - - #[test] - fn test_get_meta_child_overrides_parent_load() { - let m = cache_manifest(); - let parent = m.get_meta("cache", "user"); - let child = m.get_meta("cache", "user.tenant_id"); - assert!(child.load.is_some()); - assert_ne!(child.load, parent.load); - } - - #[test] - fn test_get_meta_unknown_path_returns_default() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "nonexistent"); - assert!(meta.load.is_none()); - assert!(meta.store.is_none()); - assert!(meta.state.is_none()); - } - - #[test] - fn test_get_meta_file_not_loaded_returns_default() { - let m = Manifest::new(); - let meta = m.get_meta("cache", "user"); - assert!(meta.load.is_none()); - } - - // --- get_client --- - - #[test] - fn test_get_client_kvs() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let client = m.get_client(meta.store.unwrap()); - assert_eq!(client, super::super::fixed_bits::CLIENT_KVS); - } - - #[test] - fn test_get_client_db() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let client = m.get_client(meta.load.unwrap()); - assert_eq!(client, super::super::fixed_bits::CLIENT_DB); - } - - #[test] - fn test_get_client_state() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user.tenant_id"); - let client = m.get_client(meta.load.unwrap()); - assert_eq!(client, super::super::fixed_bits::CLIENT_STATE); - } - - // --- build_config --- - - #[test] - fn test_build_config_contains_client() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let entries = m.build_config(meta.store.unwrap()).unwrap(); - assert!(entries.iter().any(|(k, _)| k == "client")); - } - - #[test] - fn test_build_config_connection_is_path() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let entries = m.build_config(meta.load.unwrap()).unwrap(); - let conn = entries.iter().find(|(k, _)| k == "connection"); - assert!(matches!(conn, Some((_, ConfigValue::Path(_))))); - } - - #[test] - fn test_build_config_map_is_map_variant() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let entries = m.build_config(meta.load.unwrap()).unwrap(); - let map = entries.iter().find(|(k, _)| k == "map"); - assert!(matches!(map, Some((_, ConfigValue::Map(_))))); - if let Some((_, ConfigValue::Map(pairs))) = map { - assert!(!pairs.is_empty()); - } - } - - #[test] - fn test_build_config_key_with_template_is_template() { - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let entries = m.build_config(meta.store.unwrap()).unwrap(); - let key = entries.iter().find(|(k, _)| k == "key"); - assert!(matches!(key, Some((_, ConfigValue::Template(_))))); - } - - // --- decode_value --- - - #[test] - fn test_decode_value_single_path() { - // connection: ${connection.tenant} → Path with segments ["connection", "tenant"] - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let entries = m.build_config(meta.load.unwrap()).unwrap(); - let conn = entries.iter().find(|(k, _)| k == "connection"); - if let Some((_, ConfigValue::Path(segs))) = conn { - let key = m.segs_to_key(segs).unwrap(); - assert_eq!(key, "connection.tenant"); - } else { - panic!("expected Path"); - } - } - - #[test] - fn test_decode_value_template_is_template_variant() { - // key: "user:${session.sso_user_id}" → Template with Literal + Path tokens - let m = cache_manifest(); - let meta = m.get_meta("cache", "user"); - let entries = m.build_config(meta.store.unwrap()).unwrap(); - let key = entries.iter().find(|(k, _)| k == "key"); - if let Some((_, ConfigValue::Template(tokens))) = key { - assert!(tokens.iter().any(|t| matches!(t, TemplateToken::Literal(_)))); - assert!(tokens.iter().any(|t| matches!(t, TemplateToken::Path(_)))); - } else { - panic!("expected Template"); - } - } -} diff --git a/src/unused/mod.rs b/src/unused/mod.rs deleted file mode 100644 index d1a13d0..0000000 --- a/src/unused/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -// #![no_std] -pub mod fixed_bits; -pub mod codec; -pub mod pool; -pub mod parser; -pub mod manifest; \ No newline at end of file diff --git a/src/unused/parser.rs b/src/unused/parser.rs deleted file mode 100644 index 85dcd87..0000000 --- a/src/unused/parser.rs +++ /dev/null @@ -1,593 +0,0 @@ -extern crate alloc; -use alloc::string::{String, ToString}; -use alloc::vec::Vec; -use alloc::format; - -use super::pool::DynamicPool; -use super::fixed_bits; -use super::codec; - -/// Re-export the public Value type for use in parsing. -pub use crate::ports::provided::Value; - -/// Thin record for a single loaded manifest file. -/// Stores only the key_idx of the file root record in the shared keys vec. -pub struct ParsedManifest { - pub file_key_idx: u16, -} - -/// Parses a manifest value tree, appending into caller-owned vecs. -/// Returns a `ParsedManifest` referencing the file root record's index. -/// -/// - `keys`: Vec — fixed-bits key records -/// - `values`: Vec<[u64; 2]> — fixed-bits value records -/// - `path_map`: Vec> — path segment index sequences -/// - `children_map`: Vec> — multi-child index lists -/// -/// Index 0 of each vec is reserved as null by the caller. -pub fn parse( - filename: &str, - root: Value, - dynamic: &mut DynamicPool, - keys: &mut Vec, - values: &mut Vec<[u64; 2]>, - path_map: &mut Vec>, - children_map: &mut Vec>, -) -> Result { - let Value::Mapping(mapping) = root else { - return Err("DSL root must be a mapping".to_string()); - }; - - - // filename root record (placeholder, child index filled below) - let dyn_idx = dynamic.intern(filename.as_bytes()); - let mut file_record = fixed_bits::new(); - file_record = fixed_bits::set(file_record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC, dyn_idx as u64); - let file_idx = keys.len() as u16; - keys.push(file_record); - - // traverse top-level keys - let mut child_indices: Vec = Vec::new(); - for (key_bytes, value) in &mapping { - let child_idx = traverse_field_key(key_bytes, value, filename, &[], dynamic, keys, values, path_map, children_map)?; - child_indices.push(child_idx); - } - - // update file record with children - let file_record = keys[file_idx as usize]; - let file_record = match child_indices.len() { - 0 => file_record, - 1 => fixed_bits::set(file_record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, child_indices[0] as u64), - _ => { - let children_idx = children_map.len() as u16; - children_map.push(child_indices); - let r = fixed_bits::set(file_record, fixed_bits::K_OFFSET_HAS_CHILDREN, fixed_bits::K_MASK_HAS_CHILDREN, 1); - fixed_bits::set(r, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, children_idx as u64) - } - }; - keys[file_idx as usize] = file_record; - - Ok(ParsedManifest { file_key_idx: file_idx }) -} - -/// Traverses a field key node (non-meta key). -/// `ancestors` excludes filename — only field key path segments (for qualify). -fn traverse_field_key( - key_bytes: &[u8], - value: &Value, - filename: &str, - ancestors: &[&[u8]], - dynamic: &mut DynamicPool, - keys: &mut Vec, - values: &mut Vec<[u64; 2]>, - path_map: &mut Vec>, - children_map: &mut Vec>, -) -> Result { - let dyn_idx = dynamic.intern(key_bytes); - let mut record = fixed_bits::new(); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT, fixed_bits::ROOT_NULL); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC, dyn_idx as u64); - - let key_idx = keys.len() as u16; - keys.push(record); - - let mut current: Vec<&[u8]> = ancestors.to_vec(); - current.push(key_bytes); - - if let Value::Mapping(mapping) = value { - let mut child_indices: Vec = Vec::new(); - let mut meta_indices: Vec = Vec::new(); - - for (k_bytes, v) in mapping { - if k_bytes.first() == Some(&b'_') { - let meta_idx = traverse_meta_key(k_bytes, v, filename, ancestors, dynamic, keys, values, path_map, children_map)?; - meta_indices.push(meta_idx); - } else { - let child_idx = traverse_field_key(k_bytes, v, filename, ¤t, dynamic, keys, values, path_map, children_map)?; - child_indices.push(child_idx); - } - } - - let all_children: Vec = child_indices.iter() - .chain(meta_indices.iter()) - .copied() - .collect(); - - let record = keys[key_idx as usize]; - let record = match all_children.len() { - 0 => record, - 1 => fixed_bits::set(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, all_children[0] as u64), - _ => { - let children_idx = children_map.len() as u16; - children_map.push(all_children); - let r = fixed_bits::set(record, fixed_bits::K_OFFSET_HAS_CHILDREN, fixed_bits::K_MASK_HAS_CHILDREN, 1); - fixed_bits::set(r, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, children_idx as u64) - } - }; - keys[key_idx as usize] = record; - } else { - // scalar value → is_leaf - let val_idx = build_yaml_value(value, filename, ancestors, dynamic, values, path_map)?; - let record = keys[key_idx as usize]; - let record = fixed_bits::set(record, fixed_bits::K_OFFSET_IS_LEAF, fixed_bits::K_MASK_IS_LEAF, 1); - let record = fixed_bits::set(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, val_idx as u64); - keys[key_idx as usize] = record; - } - - Ok(key_idx) -} - -/// Traverses a meta key node (_load, _store, _state). -fn traverse_meta_key( - key_bytes: &[u8], - value: &Value, - filename: &str, - ancestors: &[&[u8]], - dynamic: &mut DynamicPool, - keys: &mut Vec, - values: &mut Vec<[u64; 2]>, - path_map: &mut Vec>, - children_map: &mut Vec>, -) -> Result { - let root_val = codec::root_encode(key_bytes); - - let mut record = fixed_bits::new(); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT, root_val); - - let key_idx = keys.len() as u16; - keys.push(record); - - if let Value::Mapping(mapping) = value { - let mut child_indices: Vec = Vec::new(); - - for (k_bytes, v) in mapping { - let child_idx = traverse_prop_key(k_bytes, v, filename, ancestors, dynamic, keys, values, path_map, children_map)?; - child_indices.push(child_idx); - } - - let record = keys[key_idx as usize]; - let record = match child_indices.len() { - 0 => record, - 1 => fixed_bits::set(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, child_indices[0] as u64), - _ => { - let children_idx = children_map.len() as u16; - children_map.push(child_indices); - let r = fixed_bits::set(record, fixed_bits::K_OFFSET_HAS_CHILDREN, fixed_bits::K_MASK_HAS_CHILDREN, 1); - fixed_bits::set(r, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, children_idx as u64) - } - }; - keys[key_idx as usize] = record; - } - - Ok(key_idx) -} - -/// Traverses a prop key node (client, key, ttl, table, connection, where, map, type). -fn traverse_prop_key( - key_bytes: &[u8], - value: &Value, - filename: &str, - ancestors: &[&[u8]], - dynamic: &mut DynamicPool, - keys: &mut Vec, - values: &mut Vec<[u64; 2]>, - path_map: &mut Vec>, - children_map: &mut Vec>, -) -> Result { - let (prop_val, client_val) = if key_bytes == b"client" { - (fixed_bits::PROP_NULL, codec::client_encode( - match value { Value::Scalar(s) => s.as_slice(), _ => b"" } - )) - } else { - (codec::prop_encode(key_bytes), fixed_bits::CLIENT_NULL) - }; - - let mut record = fixed_bits::new(); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_PROP, fixed_bits::K_MASK_PROP, prop_val); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_CLIENT, fixed_bits::K_MASK_CLIENT, client_val); - - if key_bytes == b"type" { - let type_val = codec::type_encode( - match value { Value::Scalar(s) => s.as_slice(), _ => b"" } - ); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_TYPE, fixed_bits::K_MASK_TYPE, type_val); - } - - let key_idx = keys.len() as u16; - keys.push(record); - - if key_bytes == b"map" { - if let Value::Mapping(mapping) = value { - let mut child_indices: Vec = Vec::new(); - for (k_bytes, v) in mapping { - let child_idx = traverse_map_key(k_bytes, v, filename, ancestors, dynamic, keys, values, path_map)?; - child_indices.push(child_idx); - } - let record = keys[key_idx as usize]; - let record = match child_indices.len() { - 0 => record, - 1 => fixed_bits::set(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, child_indices[0] as u64), - _ => { - let children_idx = children_map.len() as u16; - children_map.push(child_indices); - let r = fixed_bits::set(record, fixed_bits::K_OFFSET_HAS_CHILDREN, fixed_bits::K_MASK_HAS_CHILDREN, 1); - fixed_bits::set(r, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, children_idx as u64) - } - }; - keys[key_idx as usize] = record; - } - } else if key_bytes != b"client" { - let val_idx = build_yaml_value(value, filename, ancestors, dynamic, values, path_map)?; - let record = keys[key_idx as usize]; - let record = fixed_bits::set(record, fixed_bits::K_OFFSET_IS_LEAF, fixed_bits::K_MASK_IS_LEAF, 1); - let record = fixed_bits::set(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, val_idx as u64); - keys[key_idx as usize] = record; - } - - Ok(key_idx) -} - -/// Traverses a map child key (is_path=true). -fn traverse_map_key( - key_bytes: &[u8], - value: &Value, - filename: &str, - ancestors: &[&[u8]], - dynamic: &mut DynamicPool, - keys: &mut Vec, - values: &mut Vec<[u64; 2]>, - path_map: &mut Vec>, -) -> Result { - let qualified = build_qualified_path(filename, ancestors, key_bytes); - let seg_indices: Vec = qualified.split(|&b| b == b'.') - .map(|seg| dynamic.intern(seg)) - .collect(); - let path_idx = path_map.len() as u16; - path_map.push(seg_indices); - - let mut record = fixed_bits::new(); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_IS_PATH, fixed_bits::K_MASK_IS_PATH, 1); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC, path_idx as u64); - - let val_idx = build_yaml_value(value, filename, ancestors, dynamic, values, path_map)?; - record = fixed_bits::set(record, fixed_bits::K_OFFSET_IS_LEAF, fixed_bits::K_MASK_IS_LEAF, 1); - record = fixed_bits::set(record, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD, val_idx as u64); - - let key_idx = keys.len() as u16; - keys.push(record); - Ok(key_idx) -} - -/// Builds a YAML value record ([u64; 2]) from a scalar or template string. -fn build_yaml_value( - value: &Value, - filename: &str, - ancestors: &[&[u8]], - dynamic: &mut DynamicPool, - values: &mut Vec<[u64; 2]>, - path_map: &mut Vec>, -) -> Result { - let s = match value { - Value::Scalar(s) => s.clone(), - Value::Null => return Ok(0), - Value::Mapping(_) => return Err("unexpected mapping as scalar value".to_string()), - Value::Sequence(_) => return Err("unexpected sequence as scalar value".to_string()), - }; - - let tokens = split_template(&s); - if tokens.len() > 6 { - return Err(format!("value has {} tokens, max 6", tokens.len())); - } - let is_template = tokens.len() > 1; - - let mut vo = [0u64; 2]; - - if is_template { - vo[0] = fixed_bits::set(vo[0], fixed_bits::V_OFFSET_IS_TEMPLATE, fixed_bits::V_MASK_IS_TEMPLATE, 1); - } - - const TOKEN_OFFSETS: [(u32, u32); 6] = [ - (fixed_bits::V_OFFSET_T0_IS_PATH, fixed_bits::V_OFFSET_T0_DYNAMIC), - (fixed_bits::V_OFFSET_T1_IS_PATH, fixed_bits::V_OFFSET_T1_DYNAMIC), - (fixed_bits::V_OFFSET_T2_IS_PATH, fixed_bits::V_OFFSET_T2_DYNAMIC), - (fixed_bits::V_OFFSET_T3_IS_PATH, fixed_bits::V_OFFSET_T3_DYNAMIC), - (fixed_bits::V_OFFSET_T4_IS_PATH, fixed_bits::V_OFFSET_T4_DYNAMIC), - (fixed_bits::V_OFFSET_T5_IS_PATH, fixed_bits::V_OFFSET_T5_DYNAMIC), - ]; - - for (i, token) in tokens.iter().enumerate().take(6) { - let dyn_idx = if token.is_path { - let qualified = qualify_path(&token.text, filename, ancestors); - let seg_indices: Vec = qualified.split(|&b| b == b'.') - .map(|seg| dynamic.intern(seg)) - .collect(); - let path_idx = path_map.len() as u16; - path_map.push(seg_indices); - path_idx - } else { - dynamic.intern(&token.text) - }; - - let word = if i < 3 { 0 } else { 1 }; - let (off_is_path, off_dynamic) = TOKEN_OFFSETS[i]; - vo[word] = fixed_bits::set(vo[word], off_is_path, fixed_bits::V_MASK_IS_PATH, token.is_path as u64); - vo[word] = fixed_bits::set(vo[word], off_dynamic, fixed_bits::V_MASK_DYNAMIC, dyn_idx as u64); - } - - let val_idx = values.len() as u16; - values.push(vo); - Ok(val_idx) -} - - -/// A single template token: either a literal byte sequence or a path placeholder. -struct Token { - text: Vec, - is_path: bool, -} - -/// Splits a byte slice by `${}` placeholders into tokens. -/// `b"user:${session.id}"` → [Token(b"user:", false), Token(b"session.id", true)] -fn split_template(s: &[u8]) -> Vec { - let mut tokens = Vec::new(); - let mut rest = s; - - loop { - if let Some(start) = find_bytes(rest, b"${") { - if start > 0 { - tokens.push(Token { text: rest[..start].to_vec(), is_path: false }); - } - rest = &rest[start + 2..]; - if let Some(end) = rest.iter().position(|&b| b == b'}') { - tokens.push(Token { text: rest[..end].to_vec(), is_path: true }); - rest = &rest[end + 1..]; - } else { - tokens.push(Token { text: rest.to_vec(), is_path: false }); - break; - } - } else { - if !rest.is_empty() { - tokens.push(Token { text: rest.to_vec(), is_path: false }); - } - break; - } - } - - if tokens.is_empty() { - tokens.push(Token { text: s.to_vec(), is_path: false }); - } - - tokens -} - -fn find_bytes(haystack: &[u8], needle: &[u8]) -> Option { - haystack.windows(needle.len()).position(|w| w == needle) -} - -/// Qualifies a placeholder path to an absolute path. -fn qualify_path(path: &[u8], filename: &str, ancestors: &[&[u8]]) -> Vec { - if path.contains(&b'.') { - return path.to_vec(); - } - let mut result = filename.as_bytes().to_vec(); - for ancestor in ancestors { - result.push(b'.'); - result.extend_from_slice(ancestor); - } - result.push(b'.'); - result.extend_from_slice(path); - result -} - -/// Builds a qualified path for map keys: `filename.ancestors.key` -fn build_qualified_path(filename: &str, ancestors: &[&[u8]], key: &[u8]) -> Vec { - let mut result = filename.as_bytes().to_vec(); - for ancestor in ancestors { - result.push(b'.'); - result.extend_from_slice(ancestor); - } - result.push(b'.'); - result.extend_from_slice(key); - result -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::core::fixed_bits; - use alloc::vec::Vec; - #[allow(unused_imports)] - use alloc::vec; - - fn make_vecs() -> (DynamicPool, Vec, Vec<[u64; 2]>, Vec>, Vec>) { - (DynamicPool::new(), vec![0], vec![[0, 0]], vec![vec![]], vec![vec![]]) - } - - fn s(v: &str) -> Value { Value::Scalar(v.as_bytes().to_vec()) } - fn m(pairs: Vec<(&str, Value)>) -> Value { - Value::Mapping(pairs.into_iter().map(|(k, v)| (k.as_bytes().to_vec(), v)).collect()) - } - - // --- split_template --- - - #[test] - fn test_split_template_static() { - let tokens = split_template(b"literal"); - assert_eq!(tokens.len(), 1); - assert!(!tokens[0].is_path); - assert_eq!(tokens[0].text, b"literal"); - } - - #[test] - fn test_split_template_path_only() { - let tokens = split_template(b"${connection.tenant}"); - assert_eq!(tokens.len(), 1); - assert!(tokens[0].is_path); - assert_eq!(tokens[0].text, b"connection.tenant"); - } - - #[test] - fn test_split_template_mixed() { - let tokens = split_template(b"user:${session.id}"); - assert_eq!(tokens.len(), 2); - assert!(!tokens[0].is_path); - assert_eq!(tokens[0].text, b"user:"); - assert!(tokens[1].is_path); - assert_eq!(tokens[1].text, b"session.id"); - } - - // --- qualify_path --- - - #[test] - fn test_qualify_path_absolute() { - assert_eq!(qualify_path(b"connection.common", "cache", &[b"user".as_slice()]), b"connection.common"); - } - - #[test] - fn test_qualify_path_relative() { - assert_eq!(qualify_path(b"org_id", "cache", &[b"user".as_slice()]), b"cache.user.org_id"); - } - - #[test] - fn test_qualify_path_relative_no_ancestors() { - assert_eq!(qualify_path(b"org_id", "cache", &[]), b"cache.org_id"); - } - - // --- parse: field key → ROOT_NULL --- - - #[test] - fn test_field_key_root_is_null() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - let root = m(vec![("foo", m(vec![]))]); - let pm = parse("f", root, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - - let file_rec = keys[pm.file_key_idx as usize]; - let child_idx = fixed_bits::get(file_rec, fixed_bits::K_OFFSET_CHILD, fixed_bits::K_MASK_CHILD) as usize; - assert_eq!(fixed_bits::get(keys[child_idx], fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT), fixed_bits::ROOT_NULL); - } - - // --- parse: meta key → ROOT bits --- - - #[test] - fn test_meta_key_root_bits() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - let root = m(vec![("foo", m(vec![ - ("_state", m(vec![("type", s("integer"))])), - ("_load", m(vec![("client", s("InMemory")), ("key", s("k"))])), - ("_store", m(vec![("client", s("InMemory")), ("key", s("k"))])), - ]))]); - parse("f", root, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - - let roots: Vec = keys.iter().map(|&r| fixed_bits::get(r, fixed_bits::K_OFFSET_ROOT, fixed_bits::K_MASK_ROOT)).collect(); - assert!(roots.contains(&fixed_bits::ROOT_STATE)); - assert!(roots.contains(&fixed_bits::ROOT_LOAD)); - assert!(roots.contains(&fixed_bits::ROOT_STORE)); - } - - // --- parse: type encoding --- - - #[test] - fn test_type_encoding() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - let root = m(vec![("foo", m(vec![ - ("_state", m(vec![("type", s("integer"))])), - ]))]); - parse("f", root, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - - let types: Vec = keys.iter().map(|&r| fixed_bits::get(r, fixed_bits::K_OFFSET_TYPE, fixed_bits::K_MASK_TYPE)).collect(); - assert!(types.contains(&fixed_bits::TYPE_I64)); - } - - // --- parse: client encoding --- - - #[test] - fn test_client_encoding() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - let root = m(vec![("foo", m(vec![ - ("_store", m(vec![("client", s("KVS")), ("key", s("k")), ("ttl", s("3600"))])), - ]))]); - parse("f", root, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - - let clients: Vec = keys.iter().map(|&r| fixed_bits::get(r, fixed_bits::K_OFFSET_CLIENT, fixed_bits::K_MASK_CLIENT)).collect(); - assert!(clients.contains(&fixed_bits::CLIENT_KVS)); - } - - // --- parse: template value → is_template flag + path_map --- - - #[test] - fn test_template_value() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - let root = m(vec![("foo", m(vec![ - ("_store", m(vec![("client", s("KVS")), ("key", s("foo:${session.id}"))])), - ]))]); - parse("f", root, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - - let has_template = values.iter().any(|&vo| fixed_bits::get(vo[0], fixed_bits::V_OFFSET_IS_TEMPLATE, fixed_bits::V_MASK_IS_TEMPLATE) == 1); - assert!(has_template); - assert!(path_map.len() > 1); - } - - // --- parse: map key → path_map expansion --- - - #[test] - fn test_map_key_path_expansion() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - let root = m(vec![("foo", m(vec![ - ("_load", m(vec![ - ("client", s("Env")), - ("map", m(vec![("host", s("DB_HOST")), ("port", s("DB_PORT"))])), - ])), - ]))]); - parse("f", root, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - - // map keys produce is_path=1 records - let has_path = keys.iter().any(|&r| fixed_bits::get(r, fixed_bits::K_OFFSET_IS_PATH, fixed_bits::K_MASK_IS_PATH) == 1); - assert!(has_path); - } - - // --- parse: two files → globally unique key indices --- - - #[test] - fn test_two_files_unique_indices() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - let a = m(vec![("x", m(vec![]))]); - let b = m(vec![("y", m(vec![]))]); - let pm_a = parse("a", a, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - let pm_b = parse("b", b, &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).unwrap(); - - assert_ne!(pm_a.file_key_idx, pm_b.file_key_idx); - - let dyn_a = fixed_bits::get(keys[pm_a.file_key_idx as usize], fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; - let dyn_b = fixed_bits::get(keys[pm_b.file_key_idx as usize], fixed_bits::K_OFFSET_DYNAMIC, fixed_bits::K_MASK_DYNAMIC) as u16; - assert_eq!(dynamic.get(dyn_a), Some(b"a".as_slice())); - assert_eq!(dynamic.get(dyn_b), Some(b"b".as_slice())); - } - - // --- parse: root must be Mapping --- - - #[test] - fn test_root_must_be_mapping() { - let (mut dynamic, mut keys, mut values, mut path_map, mut children_map) = make_vecs(); - assert!(parse("f", s("bad"), &mut dynamic, &mut keys, &mut values, &mut path_map, &mut children_map).is_err()); - } -} diff --git a/src/unused/pool.rs b/src/unused/pool.rs deleted file mode 100644 index 2f8939b..0000000 --- a/src/unused/pool.rs +++ /dev/null @@ -1,64 +0,0 @@ -extern crate alloc; -use alloc::vec::Vec; - -/// Interns unique byte slices and assigns each a u16 index. -/// Index 0 is reserved as null. -pub struct DynamicPool { - slots: Vec>, -} - -impl DynamicPool { - pub fn new() -> Self { - let mut slots = Vec::new(); - slots.push(Vec::new()); // index 0 = null - Self { slots } - } - - pub fn intern(&mut self, s: &[u8]) -> u16 { - if let Some(idx) = self.slots.iter().position(|x| x == s) { - return idx as u16; - } - let idx = self.slots.len() as u16; - self.slots.push(s.to_vec()); - idx - } - - pub fn get(&self, index: u16) -> Option<&[u8]> { - self.slots.get(index as usize).map(|s| s.as_slice()) - } - - /// Returns the index of an already-interned byte slice, or None if not present. - pub fn find(&self, s: &[u8]) -> Option { - self.slots.iter().position(|x| x.as_slice() == s).map(|i| i as u16) - } -} - -impl Default for DynamicPool { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_intern_dedup() { - let mut pool = DynamicPool::new(); - let i0 = pool.intern(b"foo"); - let i1 = pool.intern(b"bar"); - assert_eq!(pool.intern(b"foo"), i0); - assert_ne!(i0, i1); - assert_ne!(i0, 0); - } - - #[test] - fn test_get() { - let mut pool = DynamicPool::new(); - let i0 = pool.intern(b"foo"); - assert_eq!(pool.get(i0), Some(b"foo".as_slice())); - assert_eq!(pool.get(0), Some(b"".as_slice())); // null slot - assert_eq!(pool.get(999), None); - } -} diff --git a/src/unused/state.rs b/src/unused/state.rs deleted file mode 100644 index 7e9bb06..0000000 --- a/src/unused/state.rs +++ /dev/null @@ -1,659 +0,0 @@ -use std::collections::{HashMap, HashSet}; -use std::path::PathBuf; -use crate::core::fixed_bits; -use crate::core::manifest::{Manifest, ConfigValue, TemplateToken}; -use crate::core::parser::{Value as ParseValue, parse}; -use crate::ports::provided::{ManifestError, StateError, Value}; -use crate::ports::required::FileClient; -use crate::store::Store; -use crate::load::Load; - -use std::sync::Arc; - -pub struct State { - manifest_dir: PathBuf, - manifest_file: Box, - manifest: Manifest, - state_keys: Vec, - state_vals: Vec, - store: Store, - load: Load, - max_recursion: usize, - called_keys: HashSet, -} - -impl State { - /// Creates a new State with the given manifest directory. - /// - /// # Examples - /// - /// ``` - /// use state_engine::State; - /// - /// let state = State::new("./examples/manifest"); - /// ``` - pub fn new(manifest_dir: &str) -> Self { - Self { - manifest_dir: PathBuf::from(manifest_dir), - manifest_file: Box::new(crate::ports::default::DefaultFileClient), - manifest: Manifest::new(), - state_keys: vec![0], - state_vals: vec![Value::Null], - store: Store::new(), - load: Load::new(), - max_recursion: 20, - called_keys: HashSet::new(), - } - } - - pub fn with_in_memory(mut self, client: Arc) -> Self { - self.store = self.store.with_in_memory(Arc::clone(&client)); - self.load = self.load.with_in_memory(client); - self - } - - pub fn with_kvs(mut self, client: Arc) -> Self { - self.store = self.store.with_kvs(Arc::clone(&client)); - self.load = self.load.with_kvs(client); - self - } - - pub fn with_db(mut self, client: Arc) -> Self { - self.load = self.load.with_db(client); - self - } - - pub fn with_env(mut self, client: Arc) -> Self { - self.load = self.load.with_env(client); - self - } - - pub fn with_http(mut self, client: Arc) -> Self { - self.store = self.store.with_http(Arc::clone(&client)); - self.load = self.load.with_http(client); - self - } - - pub fn with_file(mut self, client: Arc) -> Self { - self.store = self.store.with_file(Arc::clone(&client)); - self.load = self.load.with_file(client); - self - } - - pub fn with_manifest_file(mut self, client: impl FileClient + 'static) -> Self { - self.manifest_file = Box::new(client); - self - } - - fn load_manifest(&mut self, file: &str) -> Result<(), ManifestError> { - crate::fn_log!("State", "load_manifest", file); - if self.manifest.is_loaded(file) { - return Ok(()); - } - - let yml_path = self.manifest_dir.join(format!("{}.yml", file)); - let yaml_path = self.manifest_dir.join(format!("{}.yaml", file)); - let yml_key = yml_path.to_string_lossy(); - let yaml_key = yaml_path.to_string_lossy(); - let yml_content = self.manifest_file.get(&yml_key); - let yaml_content = self.manifest_file.get(&yaml_key); - - let content = match (yml_content, yaml_content) { - (Some(_), Some(_)) => return Err(ManifestError::AmbiguousFile( - format!("both '{}.yml' and '{}.yaml' exist.", file, file) - )), - (Some(c), None) => c, - (None, Some(c)) => c, - (None, None) => return Err(ManifestError::FileNotFound( - format!("'{}.yml' or '{}.yaml'", file, file) - )), - }; - - let content_str = std::str::from_utf8(&content) - .map_err(|e| ManifestError::ParseError(format!("UTF-8 error: {}", e)))?; - - let yaml_root: serde_yaml_ng::Value = serde_yaml_ng::from_str(content_str) - .map_err(|e| ManifestError::ParseError(format!("YAML parse error: {}", e)))?; - - let pm = parse( - file, - yaml_to_parse_value(yaml_root), - &mut self.manifest.dynamic, - &mut self.manifest.keys, - &mut self.manifest.values, - &mut self.manifest.path_map, - &mut self.manifest.children_map, - ).map_err(|e| ManifestError::ParseError(e))?; - - self.manifest.insert(file.to_string(), pm); - Ok(()) - } - - fn split_key<'k>(key: &'k str) -> (&'k str, &'k str) { - match key.find('.') { - Some(pos) => (&key[..pos], &key[pos + 1..]), - None => (key, ""), - } - } - - fn find_state_value(&self, key_idx: u16) -> Option { - self.state_keys.iter().skip(1).position(|&k| k == key_idx).map(|p| p + 1) - } - - fn get_by_path(&mut self, segs: &[u16]) -> Result, StateError> { - let key = self.manifest.segs_to_key(segs)?; - if self.called_keys.len() >= self.max_recursion || self.called_keys.contains(&key) { - return Err(StateError::RecursionLimitExceeded); - } - let key_idx = match self.manifest.find_by_segs(segs) { - Some(idx) => idx, - None => return Err(StateError::KeyNotFound(key)), - }; - self.called_keys.insert(key.clone()); - let result = self.get_core(key_idx, &key); - self.called_keys.remove(&key); - result - } - - fn resolve_config_value(&mut self, cv: ConfigValue) -> Result, StateError> { - match cv { - ConfigValue::Client(c) => Ok(Some(Value::Scalar(c.to_le_bytes().to_vec()))), - ConfigValue::Path(segs) => self.get_by_path(&segs), - ConfigValue::Template(tokens) => { - let mut result = Vec::new(); - for token in tokens { - match token { - TemplateToken::Literal(b) => result.extend_from_slice(&b), - TemplateToken::Path(segs) => { - match self.get_by_path(&segs)? { - Some(Value::Scalar(b)) => result.extend_from_slice(&b), - _ => return Ok(None), - } - } - } - } - Ok(Some(Value::Scalar(result))) - } - ConfigValue::Str(s) => Ok(Some(Value::Scalar(s.into_bytes()))), - ConfigValue::Map(pairs) => { - Ok(Some(Value::Mapping( - pairs.into_iter() - .map(|(k, v)| (k.into_bytes(), Value::Scalar(v.into_bytes()))) - .collect() - ))) - } - } - } - - fn resolve_config(&mut self, meta_idx: u16) -> Result>, StateError> { - let entries = match self.manifest.build_config(meta_idx) { - Some(e) => e, - None => return Ok(None), - }; - - let mut config = HashMap::new(); - for (key, cv) in entries { - if key == "map" { - if let ConfigValue::Map(pairs) = cv { - let yaml_keys = Value::Sequence(pairs.iter().map(|(k, _)| Value::Scalar(k.as_bytes().to_vec())).collect()); - let ext_keys = Value::Sequence(pairs.into_iter().map(|(_, v)| Value::Scalar(v.into_bytes())).collect()); - config.insert("yaml_keys".into(), yaml_keys); - config.insert("ext_keys".into(), ext_keys); - } - } else if let Some(v) = self.resolve_config_value(cv)? { - config.insert(key, v); - } - } - Ok(Some(config)) - } - - /// Returns the value for `key`, checking state cache → _store → _load in order. - /// - /// # Examples - /// - /// ``` - /// use state_engine::{State, Value}; - /// use state_engine::InMemoryClient; - /// - /// struct MockInMemory { data: std::sync::Mutex> } - /// impl MockInMemory { fn new() -> Self { Self { data: Default::default() } } } - /// impl InMemoryClient for MockInMemory { - /// fn get(&self, key: &str) -> Option { self.data.lock().unwrap().get(key).cloned() } - /// fn set(&self, key: &str, value: Value) -> bool { self.data.lock().unwrap().insert(key.to_string(), value); true } - /// fn delete(&self, key: &str) -> bool { self.data.lock().unwrap().remove(key).is_some() } - /// } - /// - /// let client = MockInMemory::new(); - /// let mut state = State::new("./examples/manifest") - /// .with_in_memory(std::sync::Arc::new(client)); - /// - /// // set then get - /// state.set("connection.common", Value::Scalar(b"test".to_vec()), None).unwrap(); - /// assert!(state.get("connection.common").unwrap().is_some()); - /// ``` - pub fn get(&mut self, key: &str) -> Result, StateError> { - crate::fn_log!("State", "get", key); - if self.called_keys.len() >= self.max_recursion { - return Err(StateError::RecursionLimitExceeded); - } - if self.called_keys.contains(&key.to_string()) { - return Err(StateError::RecursionLimitExceeded); - } - - self.called_keys.insert(key.to_string()); - - let (file, path) = Self::split_key(key); - let file = file.to_string(); - let path = path.to_string(); - - if let Err(e) = self.load_manifest(&file) { - self.called_keys.remove(key); - return Err(StateError::ManifestLoadFailed(e.to_string())); - } - - let key_idx = match self.manifest.find(&file, &path) { - Some(idx) => idx, - None => { - self.called_keys.remove(key); - return Err(StateError::KeyNotFound(key.to_string())); - } - }; - - if let Some(sv_idx) = self.find_state_value(key_idx) { - let val = self.state_vals.get(sv_idx).cloned(); - self.called_keys.remove(key); - return Ok(val); - } - - let meta = self.manifest.get_meta(&file, &path); - - let has_state_client = meta.load - .map(|load_idx| self.manifest.get_client(load_idx) == fixed_bits::CLIENT_STATE) - .unwrap_or(false); - - if !has_state_client { - if let Some(store_idx) = meta.store { - match self.resolve_config(store_idx) { - Ok(Some(config)) => { - if let Some(value) = self.store.get(&config) { - self.state_keys.push(key_idx); - self.state_vals.push(value.clone()); - self.called_keys.remove(key); - return Ok(Some(value)); - } - } - Ok(None) => {} - Err(e) => { - self.called_keys.remove(key); - return Err(e); - } - } - } - } - - // CLIENT_STATE: extract key path directly from build_config without resolving - if has_state_client { - if let Some(load_idx) = meta.load { - let state_key_segs = self.manifest.build_config(load_idx) - .and_then(|entries| entries.into_iter().find(|(k, _)| k == "key")) - .and_then(|(_, cv)| match cv { - ConfigValue::Path(segs) => Some(segs), - _ => None, - }); - let result = match state_key_segs { - Some(segs) => self.get_by_path(&segs), - None => Ok(None), - }; - self.called_keys.remove(key); - return result; - } - } - - let result = if let Some(load_idx) = meta.load { - match self.resolve_config(load_idx) { - Ok(Some(mut config)) => { - if !config.contains_key("client") { - self.called_keys.remove(key); - return Ok(None); - } - - // unqualify map keys for Load - if let Some(Value::Mapping(map_pairs)) = config.get("map").cloned() { - let unqualified: Vec<(Vec, Value)> = map_pairs.into_iter() - .map(|(qk, v)| { - let field = qk.iter().rposition(|&b| b == b'.') - .map_or(qk.clone(), |p| qk[p+1..].to_vec()); - (field, v) - }) - .collect(); - config.insert("map".to_string(), Value::Mapping(unqualified)); - } - - match self.load.handle(&config) { - Ok(loaded) => { - if let Some(store_idx) = meta.store { - match self.resolve_config(store_idx) { - Ok(Some(store_config)) => { - if self.store.set(&store_config, loaded.clone(), None).unwrap_or(false) { - self.state_keys.push(key_idx); - self.state_vals.push(loaded.clone()); - } - } - Ok(None) => { - self.state_keys.push(key_idx); - self.state_vals.push(loaded.clone()); - } - Err(_) => {} - } - } else { - self.state_keys.push(key_idx); - self.state_vals.push(loaded.clone()); - } - Ok(Some(loaded)) - } - Err(e) => Err(StateError::LoadFailed(e)), - } - } - Ok(None) => Ok(None), - Err(e) => Err(e), - } - } else { Ok(None) }; - - self.called_keys.remove(key); - result - } - - /// Writes `value` to the _store backend for `key`. - /// - /// # Examples - /// - /// ``` - /// # use state_engine::{State, Value}; - /// # use state_engine::InMemoryClient; - /// # struct MockInMemory { data: std::sync::Mutex> } - /// # impl MockInMemory { fn new() -> Self { Self { data: Default::default() } } } - /// # impl InMemoryClient for MockInMemory { - /// # fn get(&self, key: &str) -> Option { self.data.lock().unwrap().get(key).cloned() } - /// # fn set(&self, key: &str, value: Value) -> bool { self.data.lock().unwrap().insert(key.to_string(), value); true } - /// # fn delete(&self, key: &str) -> bool { self.data.lock().unwrap().remove(key).is_some() } - /// # } - /// let client = MockInMemory::new(); - /// let mut state = State::new("./examples/manifest") - /// .with_in_memory(std::sync::Arc::new(client)); - /// - /// assert!(state.set("connection.common", Value::Scalar(b"data".to_vec()), None).unwrap()); - /// ``` - pub fn set(&mut self, key: &str, value: Value, ttl: Option) -> Result { - crate::fn_log!("State", "set", key); - let (file, path) = Self::split_key(key); - let file = file.to_string(); - let path = path.to_string(); - - if let Err(e) = self.load_manifest(&file) { - return Err(StateError::ManifestLoadFailed(e.to_string())); - } - - let key_idx = match self.manifest.find(&file, &path) { - Some(idx) => idx, - None => return Err(StateError::KeyNotFound(key.to_string())), - }; - - let meta = self.manifest.get_meta(&file, &path); - - if let Some(store_idx) = meta.store { - let owner_idx = meta.store_owner; - let is_leaf = owner_idx != key_idx; - - // For leaf keys: build updated owner Mapping via read-modify-write - let store_value = if is_leaf { - let field = path.rsplit('.').next().unwrap_or(&path).as_bytes().to_vec(); - - // 1. state_vals から owner Mapping を取得、なければ store から read - let owner_mapping = match self.find_state_value(owner_idx) - .and_then(|i| self.state_vals.get(i).cloned()) - { - Some(v @ Value::Mapping(_)) => Some(v), - _ => { - match self.resolve_config(store_idx)? { - Some(ref config) => self.store.get(config), - None => None, - } - } - }; - - // 2. Mapping にフィールドを差し込む - let mut pairs = match owner_mapping { - Some(Value::Mapping(p)) => p, - _ => vec![], - }; - if let Some(entry) = pairs.iter_mut().find(|(k, _)| *k == field) { - entry.1 = value.clone(); - } else { - pairs.push((field, value.clone())); - } - Value::Mapping(pairs) - } else { - value.clone() - }; - - match self.resolve_config(store_idx)? { - Some(config) => { - return match self.store.set(&config, store_value.clone(), ttl) { - Ok(ok) => { - if ok { - // owner の state_vals を更新 - if let Some(sv_idx) = self.find_state_value(owner_idx) { - self.state_vals[sv_idx] = store_value; - } else { - self.state_keys.push(owner_idx); - self.state_vals.push(store_value); - } - // 葉キー自身も state_vals に記録 - if is_leaf { - if let Some(sv_idx) = self.find_state_value(key_idx) { - self.state_vals[sv_idx] = value; - } else { - self.state_keys.push(key_idx); - self.state_vals.push(value); - } - } - } - Ok(ok) - } - Err(e) => Err(StateError::StoreFailed(e)), - }; - } - None => {} - } - } - Ok(false) - } - - /// Removes the value for `key` from the _store backend. - /// - /// # Examples - /// - /// ``` - /// # use state_engine::{State, Value}; - /// # use state_engine::InMemoryClient; - /// # struct MockInMemory { data: std::sync::Mutex> } - /// # impl MockInMemory { fn new() -> Self { Self { data: Default::default() } } } - /// # impl InMemoryClient for MockInMemory { - /// # fn get(&self, key: &str) -> Option { self.data.lock().unwrap().get(key).cloned() } - /// # fn set(&self, key: &str, value: Value) -> bool { self.data.lock().unwrap().insert(key.to_string(), value); true } - /// # fn delete(&self, key: &str) -> bool { self.data.lock().unwrap().remove(key).is_some() } - /// # } - /// let client = MockInMemory::new(); - /// let mut state = State::new("./examples/manifest") - /// .with_in_memory(std::sync::Arc::new(client)); - /// - /// state.set("connection.common", Value::Scalar(b"data".to_vec()), None).unwrap(); - /// assert!(state.delete("connection.common").unwrap()); - /// // after delete, store has no data; _load is attempted but EnvClient is not configured here - /// assert!(state.get("connection.common").is_err() || state.get("connection.common").unwrap().is_none()); - /// ``` - pub fn delete(&mut self, key: &str) -> Result { - crate::fn_log!("State", "delete", key); - let (file, path) = Self::split_key(key); - let file = file.to_string(); - let path = path.to_string(); - - if let Err(e) = self.load_manifest(&file) { - return Err(StateError::ManifestLoadFailed(e.to_string())); - } - - let key_idx = match self.manifest.find(&file, &path) { - Some(idx) => idx, - None => return Err(StateError::KeyNotFound(key.to_string())), - }; - - let meta = self.manifest.get_meta(&file, &path); - - if let Some(store_idx) = meta.store { - match self.resolve_config(store_idx)? { - Some(config) => { - return match self.store.delete(&config) { - Ok(ok) => { - if ok { - if let Some(sv_idx) = self.find_state_value(key_idx) { - self.state_keys[sv_idx] = 0; - self.state_vals[sv_idx] = Value::Null; - } - } - Ok(ok) - } - Err(e) => Err(StateError::StoreFailed(e)), - }; - } - None => {} - } - } - Ok(false) - } - - /// Returns `true` if a value exists for `key` in state cache or _store. - /// Does not trigger _load. - /// - /// # Examples - /// - /// ``` - /// # use state_engine::{State, Value}; - /// # use state_engine::InMemoryClient; - /// # struct MockInMemory { data: std::sync::Mutex> } - /// # impl MockInMemory { fn new() -> Self { Self { data: Default::default() } } } - /// # impl InMemoryClient for MockInMemory { - /// # fn get(&self, key: &str) -> Option { self.data.lock().unwrap().get(key).cloned() } - /// # fn set(&self, key: &str, value: Value) -> bool { self.data.lock().unwrap().insert(key.to_string(), value); true } - /// # fn delete(&self, key: &str) -> bool { self.data.lock().unwrap().remove(key).is_some() } - /// # } - /// let client = MockInMemory::new(); - /// let mut state = State::new("./examples/manifest") - /// .with_in_memory(std::sync::Arc::new(client)); - /// - /// assert!(!state.exists("connection.common").unwrap()); - /// state.set("connection.common", Value::Scalar(b"data".to_vec()), None).unwrap(); - /// assert!(state.exists("connection.common").unwrap()); - /// ``` - pub fn exists(&mut self, key: &str) -> Result { - crate::fn_log!("State", "exists", key); - let (file, path) = Self::split_key(key); - let file = file.to_string(); - let path = path.to_string(); - - if let Err(e) = self.load_manifest(&file) { - return Err(StateError::ManifestLoadFailed(e.to_string())); - } - - let key_idx = match self.manifest.find(&file, &path) { - Some(idx) => idx, - None => return Err(StateError::KeyNotFound(key.to_string())), - }; - - if let Some(sv_idx) = self.find_state_value(key_idx) { - return Ok(!matches!(self.state_vals.get(sv_idx), Some(Value::Null) | None)); - } - - let meta = self.manifest.get_meta(&file, &path); - if let Some(store_idx) = meta.store { - if let Some(config) = self.resolve_config(store_idx)? { - return Ok(self.store.get(&config).is_some()); - } - } - Ok(false) - } -} - -fn yaml_to_parse_value(v: serde_yaml_ng::Value) -> ParseValue { - match v { - serde_yaml_ng::Value::Mapping(m) => ParseValue::Mapping( - m.into_iter() - .filter_map(|(k, v)| { - let key = match k { - serde_yaml_ng::Value::String(s) => s.into_bytes(), - _ => return None, - }; - Some((key, yaml_to_parse_value(v))) - }) - .collect(), - ), - serde_yaml_ng::Value::Sequence(s) => ParseValue::Sequence( - s.into_iter().map(yaml_to_parse_value).collect() - ), - serde_yaml_ng::Value::String(s) => ParseValue::Scalar(s.into_bytes()), - serde_yaml_ng::Value::Number(n) => ParseValue::Scalar(n.to_string().into_bytes()), - serde_yaml_ng::Value::Bool(b) => ParseValue::Scalar(b.to_string().into_bytes()), - serde_yaml_ng::Value::Null => ParseValue::Null, - _ => ParseValue::Null, - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::ports::required::{KVSClient, DbClient, EnvClient, FileClient}; - use std::sync::Arc; - - struct StubKVS; - impl KVSClient for StubKVS { - fn get(&self, _: &str) -> Option> { None } - fn set(&self, _: &str, _: Vec, _: Option) -> bool { false } - fn delete(&self, _: &str) -> bool { false } - } - - struct StubDb; - impl DbClient for StubDb { - fn get(&self, _: &Value, _: &str, _: &[Vec], _: Option<&[u8]>) -> Option> { None } - fn set(&self, _: &Value, _: &str, _: &[Vec], _: Option<&[u8]>) -> bool { false } - fn delete(&self, _: &Value, _: &str, _: Option<&[u8]>) -> bool { false } - } - - struct StubEnv; - impl EnvClient for StubEnv { - fn get(&self, _: &[Vec]) -> Option> { None } - fn set(&self, _: &str, _: Vec) -> bool { false } - fn delete(&self, _: &str) -> bool { false } - } - - struct StubFile; - impl FileClient for StubFile { - fn get(&self, _: &str) -> Option> { None } - fn set(&self, _: &str, _: Vec) -> bool { false } - fn delete(&self, _: &str) -> bool { false } - } - - struct StubHttp; - impl crate::ports::required::HttpClient for StubHttp { - fn get(&self, _: &str, _: &[Vec], _: Option<&[(Vec, Vec)]>) -> Option> { None } - fn set(&self, _: &str, _: Value, _: Option<&[(Vec, Vec)]>) -> bool { false } - fn delete(&self, _: &str, _: Option<&[(Vec, Vec)]>) -> bool { false } - } - - #[test] - fn test_with_clients_build() { - let _ = State::new("./examples/manifest").with_kvs(Arc::new(StubKVS)); - let _ = State::new("./examples/manifest").with_db(Arc::new(StubDb)); - let _ = State::new("./examples/manifest").with_env(Arc::new(StubEnv)); - let _ = State::new("./examples/manifest").with_http(Arc::new(StubHttp)); - let _ = State::new("./examples/manifest").with_file(Arc::new(StubFile)); - } -} From 9f5b4da9686a35c99d2d2ed61ab107f4b8da1214 Mon Sep 17 00:00:00 2001 From: Andyou Date: Thu, 9 Apr 2026 07:23:25 +0900 Subject: [PATCH 41/41] update layout for StoreClient map --- docs/Architecture.md | 122 +++++++++++++++++++++++++---------------- examples/implements.rs | 30 +++++----- src/context.rs | 14 ++--- src/dsl.rs | 62 +++++++++++---------- src/index.rs | 5 +- src/ports/required.rs | 10 ++-- 6 files changed, 141 insertions(+), 102 deletions(-) diff --git a/docs/Architecture.md b/docs/Architecture.md index 23ef97f..6174081 100644 --- a/docs/Architecture.md +++ b/docs/Architecture.md @@ -47,8 +47,8 @@ | Module | fn | Signature | Description | Filename | |--------|----|-----------|-------------|----------| | Compiler | `new` | `() -> Compiler` | Compiler初期化 | dsl.rs | -| | `walk_field_key` | `(&mut self, keyword: &[u8], value: &Tree, inh_load: Option<&MetaBlock>, inh_store: Option<&MetaBlock>)` | field_keyノードを再帰処理しpaths/children/leavesを構築 | dsl.rs | -| | `resolve_meta` | `(&mut self, pairs: &[(Vec, Tree)], meta_key: &[u8], inherited: Option<&MetaBlock>) -> Option` | `_load`/`_store`ブロックを親から継承しつつ現ノードで上書きして返す | dsl.rs | +| | `walk_field_key` | `(&mut self, keyword: &[u8], value: &Tree, inh_load: Option<&MetaBlock>, inh_store: Option<&MetaBlock>)` | field_keyを再帰処理しpaths/children/leavesを構築 | dsl.rs | +| | `resolve_meta` | `(&mut self, pairs: &[(Vec, Tree)], meta_key: &[u8], inherited: Option<&MetaBlock>) -> Option` | `_load`/`_store`ブロックを親から継承しつつ現keyで上書きして返す | dsl.rs | | | `write_leaf` | `(&mut self, path_idx: u32, keyword_idx: u32, value_idx: Option, load: Option<&MetaBlock>, store: Option<&MetaBlock>)` | leavesにleafデータを書き込みpaths[path_idx]をis_leaf=1で更新 | dsl.rs | | | `intern` | `(&mut self, s: &[u8]) -> u32` | バイト列をinterningに追加しinterning_idxを返す(重複排除) | dsl.rs | | | `intern_tree_scalar` | `(&mut self, v: &Tree) -> u32` | TreeスカラーまたはNullをinternしてindexを返す | dsl.rs | @@ -151,55 +151,79 @@ Contextインスタンス固有のキャッシュ。StoreClientとは独立。 ### 静的データ配列 `Dsl::compile`が返す5配列。アプリ起動時に一度だけ構築し、`Index`が保持する。 +**読み込むdslの、全(部分含む)path数は、u16(65535個以下)を充てる。** ``` -paths: Box<[u64]> // pathノード一覧。[0]がvirtual root -children: Box<[u32]> // 各pathの子path_idxをフラットに連結 -leaves: Box<[u8]> // leafデータのバイト列 -interning: Box<[u8]> // 文字列バイト列をフラットに連結 -interning_idx: Box<[u64]> // interningのoffset(u32)+len(u32)エントリ一覧 +paths: Box<[u64]> // pathのlist +children: Box<[u16]> // 各pathの子path_idxを連結したu16列 +leaves: Box<[u32]> // leafのlist。u32刻み +interning: Box<[u8]> // 文字列バイト列を連結したバイト列 +interning_idx: Box<[u64]> // interningの文字列境界 interning_idx[u64] のlist ``` ### path (u64) -| field | bits | 範囲 | -|-------------|------|-----------| -| is_leaf | 1 | bit 63 | -| offset | 32 | bits 62..31 | -| count | 8 | bits 30..23 | -| keyword_idx | 23 | bits 22..0 | - -- `is_leaf=0`: 非leafノード。`children[offset..offset+count[3:0]]`に子path_idxが並ぶ -- `is_leaf=1`: leafノード。`leaves[offset..]`にleafデータが並ぶ。`count[7:4]=load_args数`, `count[3:0]=store_args数` -- `keyword_idx`: このノードのkeywordのinterning_idx - -### children ([u32]) - -各エントリはpath_idx。各pathノードが持つ子の範囲は`path.offset`と`path.count[3:0]`で決まる。 - -### leaves - -leafノード1つ分のバイト列レイアウト: - -| フィールド | サイズ | 説明 | -|---------------------|----------|------| -| keyword_idx | u32 | このleafのkeywordのinterning_idx | -| value_token_count | u32 | valueトークン数。0=null | -| token_type[0] | u8 | 0=static(interning_idx), 1=placeholder(path_idx) | -| token_idx[0] | u32 | interning_idxまたはpath_idx | -| ... × value_token_count | | | -| load_client_idx | u32 | interning_idx | -| load_key_idx | u32 | interning_idx | -| store_client_idx | u32 | interning_idx | -| store_key_idx | u32 | interning_idx | -| load.args × N | u32+u32 | key_idx + value_idx (interning) | -| store.args × N | u32+u32 | key_idx + value_idx (interning) | +**paths[0]は常にvirtual root** + +| Field | bits | range | +|-------------|------|-------------| +| is_leaf | 1 | bit 63 | +| offset | 16 | bits 62..47 | +| count | 4 | bits 46..43 | +| padding | 11 | bits 42..32 | +| parent_idx | 16 | bits 31..16 | +| keyword_idx | 16 | bits 15..0 | + +- `is_leaf=0`: 非leaf path。`children[offset..offset+count]`に子path_idxが並ぶ +- `is_leaf=1`: leaf path。`leaves[offset..]`にleafデータが並ぶ。`count`は未使用 +- `parent_idx`: 親path_idx。virtual root(paths[0])は自己参照(0) +- `keyword_idx`: このpathのkeywordのinterning_idx + +### child (u16) + +| Field | Bits | Range | +|-----------|------|------------| +| child_idx | 16 | bits 15..0 | // path_idx + +各path所属の始端終端は、`path.offset`と`path.count[3:0]`で決まる。 +**1pathあたりの直接子path数は、count[3:0]の4bit制限により最大15。** + +### leaf + +leaf 1つ分のレイアウト(u32単位): + +| Category | Field | Bits | Range | +|-------------|----------------------|------|---------------------------------| +| header | keyword_idx | 16 | u32[0] bits 31..16 | // interning_idx +| header | fragment_count | 8 | u32[0] bits 15..8 | // valueフラグメント数。0=null +| header | load_map_count | 8 | u32[0] bits 7..0 | // load.mapエントリ数 +| header | load_args_count | 8 | u32[1] bits 31..24 | // load.argsエントリ数 +| header | store_map_count | 8 | u32[1] bits 23..16 | // store.mapエントリ数 +| header | store_args_count | 8 | u32[1] bits 15..8 | // store.argsエントリ数 +| header | padding | 8 | u32[1] bits 7..0 | +| header | load_client_idx | 16 | u32[2] bits 31..16 | // interning_idx +| header | load_key_idx | 16 | u32[2] bits 15..0 | // interning_idx +| header | store_client_idx | 16 | u32[3] bits 31..16 | // interning_idx +| header | store_key_idx | 16 | u32[3] bits 15..0 | // interning_idx +| fragment×F | padding | 15 | u32[4+i] bits 31..17 | +| fragment×F | is_placeholder | 1 | u32[4+i] bit 16 | // 0=static, 1=placeholder +| fragment×F | idx | 16 | u32[4+i] bits 15..0 | // is_placeholder=0: interning_idx / 1: path_idx +| load.map×M0 | dst_idx | 16 | u32[4+F+i] bits 31..16 | // context path interning_idx +| load.map×M0 | src_idx | 16 | u32[4+F+i] bits 15..0 | // store column interning_idx +| load.args×A0| key_idx | 16 | u32[4+F+M0+i] bits 31..16 | // interning_idx +| load.args×A0| val_idx | 16 | u32[4+F+M0+i] bits 15..0 | // interning_idx +| store.map×M1| dst_idx | 16 | u32[4+F+M0+A0+i] bits 31..16 | // context path interning_idx +| store.map×M1| src_idx | 16 | u32[4+F+M0+A0+i] bits 15..0 | // store column interning_idx +| store.args×A1| key_idx | 16 | u32[4+F+M0+A0+M1+i] bits 31..16 | // interning_idx +| store.args×A1| val_idx | 16 | u32[4+F+M0+A0+M1+i] bits 15..0 | // interning_idx + +// F=fragment_count, M0=load_map_count, A0=load_args_count, M1=store_map_count, A1=store_args_count **valueの解釈:** -- `token_count=0`: null -- `token_count=1, type=static`: 静的文字列 -- `token_count=1, type=placeholder`: 単独`${path}` → `Context.get(path_idx)`の値をそのままコピー(型保持) -- `token_count≥2` または混在: template → 各tokenを解決しstring結合 +- `fragment_count=0`: null +- `fragment_count=1, is_placeholder=0`: 静的文字列 +- `fragment_count=1, is_placeholder=1`: 単独`${path}` → `Context.get(path_idx)`の値をそのままコピー(型保持) +- `fragment_count≥2` または混在: template → 各fragmentを解決しstring結合 **placeholder解決はcompile時に2パスで行う:** 1. 1パス目: path構造を確定(全path_idxを決定) @@ -207,7 +231,13 @@ leafノード1つ分のバイト列レイアウト: ### interning_idx ([u64]) -各エントリ: `offset(u32, bits63..32) | len(u32, bits31..0)` +| Field | Bits | Range | +|--------|------|--------| +| offset | 32 | 63..32 | +| padding| 16 | 31..16 | +| len | 16 | 15..0 | + +**1文字列の最大長はu16(65535バイト以下)を充てる。** インデックス0は空文字列(virtual rootのkeyword)。 @@ -216,8 +246,8 @@ leafノード1つ分のバイト列レイアウト: `${}`内のパスは常に絶対パスとして扱う。 **実行時の解決:** -- `token_count=1, type=placeholder`: `Context.get(path_idx)`の値をそのままコピー(string化しない) -- template: 各tokenを`Context.get()`で解決しstringとして結合 +- `fragment_count=1, is_placeholder=1`: `Context.get(path_idx)`の値をそのままコピー(string化しない) +- template: 各fragmentを`Context.get()`で解決しstringとして結合 ## Error Types diff --git a/examples/implements.rs b/examples/implements.rs index ec2c39a..e557dad 100644 --- a/examples/implements.rs +++ b/examples/implements.rs @@ -22,17 +22,17 @@ impl MemoryClient { } impl StoreClient for MemoryClient { - fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { + fn get(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> Option { self.data.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { + fn set(&self, key: &str, _map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> Option { let value = args.get("value")?.clone(); let mut data = self.data.lock().unwrap(); let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; data.insert(key.to_string(), value); Some(outcome) } - fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { + fn delete(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> bool { self.data.lock().unwrap().remove(key).is_some() } } @@ -52,12 +52,12 @@ impl KvsClient { } impl StoreClient for KvsClient { - fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { + fn get(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> Option { let bytes = self.data.lock().unwrap().get(key).cloned()?; // In real impl: unwire bytes → Tree Some(bytes) } - fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { + fn set(&self, key: &str, _map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> Option { let value = args.get("value")?.clone(); // args["ttl"] ignored in mock let mut data = self.data.lock().unwrap(); @@ -65,7 +65,7 @@ impl StoreClient for KvsClient { data.insert(key.to_string(), value); Some(outcome) } - fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { + fn delete(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> bool { self.data.lock().unwrap().remove(key).is_some() } } @@ -78,7 +78,7 @@ impl StoreClient for KvsClient { pub struct EnvClient; impl StoreClient for EnvClient { - fn get(&self, _key: &str, args: &BTreeMap<&str, Tree>) -> Option { + fn get(&self, _key: &str, _map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> Option { let pairs: Vec<(Vec, Tree)> = args.iter() .filter_map(|(&k, v)| { let env_key = match v { @@ -93,8 +93,8 @@ impl StoreClient for EnvClient { .collect(); if pairs.is_empty() { None } else { Some(Tree::Mapping(pairs)) } } - fn set(&self, _key: &str, _args: &BTreeMap<&str, Tree>) -> Option { None } - fn delete(&self, _key: &str, _args: &BTreeMap<&str, Tree>) -> bool { false } + fn set(&self, _key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> Option { None } + fn delete(&self, _key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> bool { false } } // ── CommonDb (mock) ─────────────────────────────────────────────────────────── @@ -110,17 +110,17 @@ impl CommonDbClient { } impl StoreClient for CommonDbClient { - fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { + fn get(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> Option { self.data.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { + fn set(&self, key: &str, _map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> Option { let value = args.get("value")?.clone(); let mut data = self.data.lock().unwrap(); let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; data.insert(key.to_string(), value); Some(outcome) } - fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { + fn delete(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> bool { self.data.lock().unwrap().remove(key).is_some() } } @@ -138,17 +138,17 @@ impl TenantDbClient { } impl StoreClient for TenantDbClient { - fn get(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> Option { + fn get(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> Option { self.data.lock().unwrap().get(key).cloned() } - fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option { + fn set(&self, key: &str, _map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> Option { let value = args.get("value")?.clone(); let mut data = self.data.lock().unwrap(); let outcome = if data.contains_key(key) { SetOutcome::Updated } else { SetOutcome::Created }; data.insert(key.to_string(), value); Some(outcome) } - fn delete(&self, key: &str, _args: &BTreeMap<&str, Tree>) -> bool { + fn delete(&self, key: &str, _map: &[(Tree, Tree)], _args: &BTreeMap<&str, Tree>) -> bool { self.data.lock().unwrap().remove(key).is_some() } } diff --git a/src/context.rs b/src/context.rs index 6449879..2375cd6 100644 --- a/src/context.rs +++ b/src/context.rs @@ -121,7 +121,7 @@ impl<'r> ContextTrait for Context<'r> { .collect(); args_ref.insert("value", value.clone()); - match client.set(store_key, &args_ref) { + match client.set(store_key, &[], &args_ref) { Some(SetOutcome::Created) | Some(SetOutcome::Updated) => { self.cache_set(leaf.path_idx, value); Ok(true) @@ -151,7 +151,7 @@ impl<'r> ContextTrait for Context<'r> { .map(|(k, v)| (k.as_str(), v.clone())) .collect(); - let ok = client.delete(store_key, &args_ref); + let ok = client.delete(store_key, &[], &args_ref); if ok { self.cache_remove(leaf.path_idx); } @@ -182,7 +182,7 @@ impl<'r> ContextTrait for Context<'r> { .map(|(k, v)| (k.as_str(), v.clone())) .collect(); - Ok(client.get(store_key, &args_ref).is_some()) + Ok(client.get(store_key, &[], &args_ref).is_some()) } } @@ -195,7 +195,7 @@ impl<'r> Context<'r> { return Ok(Some(v.clone())); } - let leaf_ref = crate::index::LeafRef { path_idx, leaf_offset }; + let leaf_ref = crate::index::LeafRef { path_idx, parent_idx: 0, leaf_offset }; // 2. _store let (store_name, store_args) = self.index.store_args(&leaf_ref); @@ -209,7 +209,7 @@ impl<'r> Context<'r> { let args_ref: BTreeMap<&str, Tree> = store_args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); - if let Some(value) = client.get(key, &args_ref) { + if let Some(value) = client.get(key, &[], &args_ref) { self.cache_set(path_idx, value.clone()); return Ok(Some(value)); } @@ -233,7 +233,7 @@ impl<'r> Context<'r> { let args_ref: BTreeMap<&str, Tree> = load_args.iter() .map(|(k, v)| (k.as_str(), v.clone())) .collect(); - let value = client.get(key, &args_ref) + let value = client.get(key, &[], &args_ref) .ok_or_else(|| ContextError::LoadFailed( LoadError::NotFound(key.to_string()) ))?; @@ -249,7 +249,7 @@ impl<'r> Context<'r> { .map(|(k, v)| (k.as_str(), v.clone())) .collect(); sargs.insert("value", value.clone()); - store_client.set(sk, &sargs); + store_client.set(sk, &[], &sargs); } } } diff --git a/src/dsl.rs b/src/dsl.rs index 87a3e98..3c8656c 100644 --- a/src/dsl.rs +++ b/src/dsl.rs @@ -20,21 +20,23 @@ pub const PROP_MAP: &[u8] = b"map"; // | field | bits | // |-------------|------| // | is_leaf | 1 | bit 63 -// | offset | 32 | bits 62..31 -// | count | 8 | bits 30..23 -// | | | is_leaf=0: [3:0]=子path数(1~16), [7:4]=unused -// | | | is_leaf=1: [7:4]=load_args count, [3:0]=store_args count (各最大15) -// | keyword_idx | 23 | bits 22..0 interning_idx of this node's keyword +// | offset | 16 | bits 62..47 +// | count | 4 | bits 46..43 is_leaf=0: 子path数, is_leaf=1: unused +// | padding | 11 | bits 42..32 +// | parent_idx | 16 | bits 31..16 virtual root is self-referential (0) +// | keyword_idx | 16 | bits 15..0 interning_idx of this path's keyword pub const PATH_IS_LEAF_SHIFT: u64 = 63; -pub const PATH_OFFSET_SHIFT: u64 = 31; -pub const PATH_COUNT_SHIFT: u64 = 23; +pub const PATH_OFFSET_SHIFT: u64 = 47; +pub const PATH_COUNT_SHIFT: u64 = 43; +pub const PATH_PARENT_IDX_SHIFT: u64 = 16; pub const PATH_KEYWORD_IDX_SHIFT: u64 = 0; -pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; -pub const PATH_OFFSET_MASK: u64 = 0xffff_ffff << PATH_OFFSET_SHIFT; -pub const PATH_COUNT_MASK: u64 = 0xff << PATH_COUNT_SHIFT; -pub const PATH_KEYWORD_IDX_MASK: u64 = 0x7f_ffff; // bits 22..0 +pub const PATH_IS_LEAF_MASK: u64 = 0x1 << PATH_IS_LEAF_SHIFT; +pub const PATH_OFFSET_MASK: u64 = 0xffff << PATH_OFFSET_SHIFT; +pub const PATH_COUNT_MASK: u64 = 0xf << PATH_COUNT_SHIFT; +pub const PATH_PARENT_IDX_MASK: u64 = 0xffff << PATH_PARENT_IDX_SHIFT; +pub const PATH_KEYWORD_IDX_MASK: u64 = 0xffff; // bits 15..0 // ── Dsl ─────────────────────────────────────────────────────────────────────── @@ -81,13 +83,14 @@ impl Dsl { for (i, (k, v)) in field_pairs.iter().enumerate() { let child_idx = compiler.paths.len() as u32; compiler.children[children_offset as usize + i] = child_idx; - compiler.walk_field_key(k, v, None, None); + compiler.walk_field_key(k, v, 0, None, None); // parent=virtual root(0) } let count_bits = (child_count as u64) & 0xf; compiler.paths[0] = (children_offset as u64) << PATH_OFFSET_SHIFT | count_bits << PATH_COUNT_SHIFT + | 0u64 << PATH_PARENT_IDX_SHIFT // self-referential | 0u64; // keyword_idx=0 (empty) } compiler.finish() @@ -151,13 +154,14 @@ impl Compiler { // ── walk ────────────────────────────────────────────────────────────────── - /// Process a single field_key node. + /// Process a single field_key. fn walk_field_key( &mut self, - keyword: &[u8], - value: &Tree, - inh_load: Option<&MetaBlock>, - inh_store: Option<&MetaBlock>, + keyword: &[u8], + value: &Tree, + parent_idx: u32, + inh_load: Option<&MetaBlock>, + inh_store: Option<&MetaBlock>, ) { let path_idx = self.paths.len() as u32; self.paths.push(0u64); // placeholder, filled below @@ -188,23 +192,24 @@ impl Compiler { for (i, (k, v)) in field_pairs.iter().enumerate() { let child_idx = self.paths.len() as u32; self.children[children_offset as usize + i] = child_idx; - self.walk_field_key(k, v, load.as_ref(), store.as_ref()); + self.walk_field_key(k, v, path_idx, load.as_ref(), store.as_ref()); } if child_count == 0 { // No child field_keys → treat as leaf. - self.write_leaf(path_idx, keyword_idx, &Tree::Null, load.as_ref(), store.as_ref()); + self.write_leaf(path_idx, keyword_idx, parent_idx, &Tree::Null, load.as_ref(), store.as_ref()); } else { let count_bits = (child_count as u64) & 0xf; self.paths[path_idx as usize] = - (children_offset as u64) << PATH_OFFSET_SHIFT - | count_bits << PATH_COUNT_SHIFT - | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; + (children_offset as u64) << PATH_OFFSET_SHIFT + | count_bits << PATH_COUNT_SHIFT + | (parent_idx as u64) << PATH_PARENT_IDX_SHIFT + | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; } } // Scalar or Null → leaf with optional hardcoded value. _ => { - self.write_leaf(path_idx, keyword_idx, value, inh_load, inh_store); + self.write_leaf(path_idx, keyword_idx, parent_idx, value, inh_load, inh_store); } } } @@ -285,6 +290,7 @@ impl Compiler { &mut self, path_idx: u32, keyword_idx: u32, + parent_idx: u32, value: &Tree, load: Option<&MetaBlock>, store: Option<&MetaBlock>, @@ -362,14 +368,12 @@ impl Compiler { } } - // Update path entry: is_leaf=1, offset=leaf_offset, count=load_args<<4|store_args - let count = ((load_args_count as u64) & 0xf) << 4 - | ((store_args_count as u64) & 0xf); + // Update path entry: is_leaf=1, offset=leaf_offset, count=unused self.paths[path_idx as usize] = PATH_IS_LEAF_MASK - | (leaf_offset as u64) << PATH_OFFSET_SHIFT - | count << PATH_COUNT_SHIFT - | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; + | (leaf_offset as u64) << PATH_OFFSET_SHIFT + | (parent_idx as u64) << PATH_PARENT_IDX_SHIFT + | (keyword_idx as u64) & PATH_KEYWORD_IDX_MASK; } // ── interning ───────────────────────────────────────────────────────────── diff --git a/src/index.rs b/src/index.rs index e06c09e..f6f48d6 100644 --- a/src/index.rs +++ b/src/index.rs @@ -8,6 +8,7 @@ use crate::dsl::{ PATH_IS_LEAF_MASK, PATH_OFFSET_SHIFT, PATH_OFFSET_MASK, PATH_COUNT_SHIFT, PATH_COUNT_MASK, + PATH_PARENT_IDX_SHIFT, PATH_PARENT_IDX_MASK, PATH_KEYWORD_IDX_MASK, }; use crate::ports::provided::Tree; @@ -16,6 +17,7 @@ use crate::ports::provided::Tree; pub struct LeafRef { pub path_idx: u32, + pub parent_idx: u32, pub leaf_offset: u32, } @@ -105,7 +107,8 @@ impl Index { let path = self.paths[path_idx as usize]; if path & PATH_IS_LEAF_MASK != 0 { let leaf_offset = ((path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as u32; - out.push(LeafRef { path_idx, leaf_offset }); + let parent_idx = ((path & PATH_PARENT_IDX_MASK) >> PATH_PARENT_IDX_SHIFT) as u32; + out.push(LeafRef { path_idx, parent_idx, leaf_offset }); return; } let offset = ((path & PATH_OFFSET_MASK) >> PATH_OFFSET_SHIFT) as usize; diff --git a/src/ports/required.rs b/src/ports/required.rs index a23e7ac..f41ca94 100644 --- a/src/ports/required.rs +++ b/src/ports/required.rs @@ -9,14 +9,16 @@ pub enum SetOutcome { // Single-store adapter. Implemented by the library user per backing store. // -// - `key`: the value of `_load.key` / `_store.key` from the manifest. Reserved arg. +// - `key`: the value of `_load.key` / `_store.key` from the manifest. Reserved arg. +// - `map`: ordered list of (dst_path, src_column) pairs from `map:` in the manifest. +// Empty slice if `map:` is not defined. // - `args`: all other manifest args (ttl, connection, headers, etc.) as a flat map. // The implementor defines and reads whatever keys it needs. // - Thread-safety and internal mutability are the implementor's responsibility. pub trait StoreClient: Send + Sync { - fn get(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option; - fn set(&self, key: &str, args: &BTreeMap<&str, Tree>) -> Option; - fn delete(&self, key: &str, args: &BTreeMap<&str, Tree>) -> bool; + fn get(&self, key: &str, map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> Option; + fn set(&self, key: &str, map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> Option; + fn delete(&self, key: &str, map: &[(Tree, Tree)], args: &BTreeMap<&str, Tree>) -> bool; } /// Dispatches keyword → StoreClient. Implemented by the library user.