From 62e651359e18b5c891789feab9e0e6d1e60986d5 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Wed, 8 Apr 2026 18:41:48 +0000 Subject: [PATCH 01/10] docs: update documentation for v0.1.3 query engine refactor --- CONTRIBUTING.md | 19 ++++++-- README.md | 66 ++++++--------------------- docs/doc/internals/architecture.mdx | 10 ++-- docs/doc/internals/query-compiler.mdx | 59 ++++++++++++------------ 4 files changed, 66 insertions(+), 88 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9289b03..2d9cef6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,23 +20,32 @@ maturin develop # compile Rust + install in dev mode ``` ### Run Tests - + ```bash # Rust unit tests (no DB needed) cargo test - + # Python unit tests (no DB needed) python test.py - + # Integration tests (SQLite) python test.py --integration - + # All tests python test.py --all ``` - + +### Run Benchmarks + +To measure the performance of the query compiler: + +```bash +cd ryx-query && cargo bench +``` + ### Type Check + ```bash mypy ryx/ ``` diff --git a/README.md b/README.md index 25722f0..20e5791 100644 --- a/README.md +++ b/README.md @@ -75,10 +75,20 @@ async with ryx.transaction(): | **Backends** | All | All | **PG · MySQL · SQLite** | | **Migrations** | Built-in | Alembic | **Built-in** | -## Performance +## Architecture + +

+ Ryx Architecture +

+ +Your Python queries are compiled to SQL in Rust, executed by sqlx, and decoded back — all without blocking the Python event loop. +Since v0.1.3, the query engine has been extracted into a standalone crate `ryx-query`. This decouples the SQL compilation logic from the PyO3 bindings, enabling extreme performance and independent testing. + +## Performance + Benchmark of 1 000 rows on SQLite (lower is better): - + | Operation | Ryx ORM | SQLAlchemy ORM | SQLAlchemy Core | Ryx raw | |-----------|--------:|---------------:|----------------:|--------:| | **bulk_create** | 0.0074 s | 0.1696 s | 0.0022 s | 0.0011 s | @@ -86,59 +96,13 @@ Benchmark of 1 000 rows on SQLite (lower is better): | **bulk_delete** | 0.0005 s | 0.0012 s | 0.0009 s | 0.0004 s | | **filter + order + limit** | 0.0009 s | 0.0019 s | 0.0008 s | 0.0004 s | | **aggregate** | 0.0002 s | 0.0015 s | 0.0005 s | 0.0001 s | - + Ryx ORM is **16× faster** than SQLAlchemy ORM on bulk inserts and **2× faster** on deletes — while keeping the same Django-style API. The raw SQL layer (`raw_execute` / `raw_fetch`) gives you near-C speed when you need it. +**Internal Compilation Speed**: Our query compiler is blindingly fast, with simple lookups compiled in **~248ns** and complex query trees in **~1µs**. + Run the benchmark yourself: -```bash -uv add sqlalchemy[asyncio] aiosqlite -uv run python examples/13_benchmark_sqlalchemy.py -``` - -## Quick Start - -```bash -pip install maturin -maturin develop # compile Rust + install -``` - -```python -import asyncio, ryx -from ryx import Model, CharField - -class Article(Model): - title = CharField(max_length=200) - -async def main(): - await ryx.setup("sqlite:///app.db") - await ryx.migrate([Article]) - await Article.objects.create(title="Hello Ryx") - print(await Article.objects.all()) - -asyncio.run(main()) -``` - -## Key Features - -- **30+ field types** — from `AutoField` to `JSONField`, with validation built in -- **Q objects** — complex `AND` / `OR` / `NOT` expressions with nesting -- **Aggregations** — `Count`, `Sum`, `Avg`, `Min`, `Max` with `GROUP BY` and `HAVING` -- **Relationships** — `ForeignKey`, `OneToOneField`, `ManyToManyField` with `select_related` / `prefetch_related` -- **Transactions** — async context managers with nested savepoints -- **Signals** — `pre_save`, `post_save`, `pre_delete`, `post_delete` and more -- **Migrations** — autodetect schema changes, generate and apply -- **Validation** — field-level + model-level, collects all errors before raising -- **Sync/async bridge** — use from sync or async code seamlessly -- **CLI** — `python -m ryx migrate`, `makemigrations`, `shell`, `inspectdb` - -## Architecture - -

- Ryx Architecture -

- -Your Python queries are compiled to SQL in Rust, executed by sqlx, and decoded back — all without blocking the Python event loop. ## Documentation diff --git a/docs/doc/internals/architecture.mdx b/docs/doc/internals/architecture.mdx index a2d2471..0a8e9c2 100644 --- a/docs/doc/internals/architecture.mdx +++ b/docs/doc/internals/architecture.mdx @@ -7,7 +7,7 @@ sidebar_position: 2 Ryx is built in three layers, each with a clear responsibility. ## Layer Diagram - + ``` ┌──────────────────────────────────────────────────────────┐ │ Python Layer (ryx/) │ @@ -17,8 +17,11 @@ Ryx is built in three layers, each with a clear responsibility. │ PyO3 Boundary (src/lib.rs) │ │ QueryBuilder · TransactionHandle · Type Bridge · Async │ ├──────────────────────────────────────────────────────────┤ -│ Rust Core (src/) │ -│ AST · Q-Trees · SQL Compiler · Executor · Pool · Tx │ +│ Modular Query Engine (ryx-query crate) │ +│ AST · Q-Trees · SQL Compiler · Lookup Registry │ +├──────────────────────────────────────────────────────────┤ +│ Rust Core (src/) │ +│ Executor · Pool · Transaction Logic │ ├──────────────────────────────────────────────────────────┤ │ sqlx 0.8.6 + tokio 1.40 │ │ AnyPool · Async Drivers · Transactions │ @@ -27,6 +30,7 @@ Ryx is built in three layers, each with a clear responsibility. └──────────────────────────────────────────────────────────┘ ``` + ## Query Execution Flow ``` diff --git a/docs/doc/internals/query-compiler.mdx b/docs/doc/internals/query-compiler.mdx index 8905064..2c86595 100644 --- a/docs/doc/internals/query-compiler.mdx +++ b/docs/doc/internals/query-compiler.mdx @@ -3,11 +3,13 @@ sidebar_position: 4 --- # Query Compiler + +The heart of Ryx — transforms Python query expressions into optimized SQL. -The heart of Ryx — transforms Python query expressions into optimized SQL. - +Since v0.1.3, the compiler resides in the standalone `ryx-query` crate, decoupled from the Python bindings for maximum performance and testability. + ## Pipeline - + ``` Python QuerySet methods │ @@ -15,26 +17,27 @@ Python QuerySet methods QueryNode (Rust AST) │ ▼ -compiler::compile() +ryx_query::compiler::compile() │ ▼ CompiledQuery { sql: String, values: Vec } ``` - + ## AST Types - + ### QueryNode - + The root of every query: - + ```rust pub struct QueryNode { - pub operation: QueryOperation, // Select, Aggregate, Count, Delete, Update, Insert pub table: String, - pub columns: Vec, + pub backend: Backend, // DB backend for SQL generation + pub operation: QueryOperation, // Select, Aggregate, Count, Delete, Update, Insert pub filters: Vec, - pub q_tree: Option, + pub q_filter: Option, pub joins: Vec, + pub annotations: Vec, pub group_by: Vec, pub having: Vec, pub order_by: Vec, @@ -43,20 +46,20 @@ pub struct QueryNode { pub distinct: bool, } ``` - + ### QNode — Boolean Expression Tree - + ```rust pub enum QNode { Leaf { field: String, lookup: String, value: SqlValue, negated: bool }, - And { left: Box, right: Box }, - Or { left: Box, right: Box }, - Not { inner: Box }, + And(Vec), + Or(Vec), + Not(Box), } ``` - + ### SqlValue — Type-Safe Values - + ```rust pub enum SqlValue { Null, @@ -64,27 +67,25 @@ pub enum SqlValue { Int(i64), Float(f64), Text(String), - Bytes(Vec), - Date(chrono::NaiveDate), - Time(chrono::NaiveTime), - DateTime(chrono::NaiveDateTime), - Json(serde_json::Value), + List(Vec), } ``` - + ### JoinClause - + ```rust -pub enum JoinKind { Inner, LeftOuter, RightOuter, FullOuter, Cross } - +pub enum JoinKind { Inner, LeftOuter, RightOuter, FullOuter, CrossJoin } + pub struct JoinClause { - pub table: String, - pub condition: String, pub kind: JoinKind, + pub table: String, pub alias: Option, + pub on_left: String, + pub on_right: String, } ``` + ## Compilation Process 1. **SELECT clause** — `columns` or `*` From 6dea1c8cea9fbf84ba30f22ed6ef5d12e8740088 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 11:14:34 +0000 Subject: [PATCH 02/10] feat: implement PoolRegistry for multi-database support --- src/executor.rs | 77 +++++++++---------- src/lib.rs | 26 +++++-- src/pool.rs | 184 +++++++++++++++++++++++++-------------------- src/transaction.rs | 2 +- 4 files changed, 158 insertions(+), 131 deletions(-) diff --git a/src/executor.rs b/src/executor.rs index 585f293..8e6ec30 100644 --- a/src/executor.rs +++ b/src/executor.rs @@ -87,25 +87,22 @@ pub async fn fetch_all(query: CompiledQuery) -> RyxResult> { } return Err(RyxError::Internal("Transaction is no longer active".into())); } - - let pool = pool::get()?; - + + let pool = pool::get(None)?; + debug!(sql = %query.sql, "Executing SELECT"); - - // Build the sqlx query and bind all values. - // We use `sqlx::query()` (the dynamic version) because our SQL is - // constructed at runtime — we can't use the compile-time `query!` macro. + let mut q = sqlx::query(&query.sql); q = bind_values(q, &query.values); - - // Fetch all rows and decode each one into a DecodedRow. - let rows = q.fetch_all(pool).await.map_err(RyxError::Database)?; - + + let rows = q.fetch_all(&*pool).await.map_err(RyxError::Database)?; + let decoded = rows.iter().map(decode_row).collect(); Ok(decoded) } - + /// Execute a SELECT COUNT(*) query and return the count. + /// /// # Errors /// Same as [`fetch_all`]. @@ -118,7 +115,6 @@ pub async fn fetch_count(query: CompiledQuery) -> RyxResult { if rows.is_empty() { return Ok(0); } - // COUNT() returns a single column whose name may vary by backend. if let Some(value) = rows[0].values().next() { if let Some(i) = value.as_i64() { return Ok(i); @@ -133,26 +129,25 @@ pub async fn fetch_count(query: CompiledQuery) -> RyxResult { } return Err(RyxError::Internal("Transaction is no longer active".into())); } - - let pool = pool::get()?; - + + let pool = pool::get(None)?; + debug!(sql = %query.sql, "Executing COUNT"); - + let mut q = sqlx::query(&query.sql); q = bind_values(q, &query.values); - - let row = q.fetch_one(pool).await.map_err(RyxError::Database)?; - - // COUNT(*) always returns a single column. We try to get it as i64 - // first (Postgres/SQLite), then fall back to i32 (some MySQL versions). + + let row = q.fetch_one(&*pool).await.map_err(RyxError::Database)?; + let count: i64 = row.try_get(0).unwrap_or_else(|_| { let n: i32 = row.try_get(0).unwrap_or(0); n as i64 }); - + Ok(count) } + /// Execute a SELECT and return at most one row. /// /// # Errors @@ -178,16 +173,16 @@ pub async fn fetch_one(query: CompiledQuery) -> RyxResult { Err(RyxError::Internal("Transaction is no longer active".into())) } } else { - let pool = pool::get()?; - + let pool = pool::get(None)?; + let mut q = sqlx::query(&query.sql); q = bind_values(q, &query.values); - + // Limit to 2 at the executor level (the QueryNode may already have // LIMIT 1 set by `.first()`, but for `.get()` it doesn't). // We check the count in Rust rather than adding SQL complexity. - let rows = q.fetch_all(pool).await.map_err(RyxError::Database)?; - + let rows = q.fetch_all(&*pool).await.map_err(RyxError::Database)?; + match rows.len() { 0 => Err(RyxError::DoesNotExist), 1 => Ok(decode_row(&rows[0])), @@ -196,6 +191,7 @@ pub async fn fetch_one(query: CompiledQuery) -> RyxResult { } } + /// Execute an INSERT, UPDATE, or DELETE query. /// /// For INSERT queries with `RETURNING` clause, this fetches the returned @@ -228,37 +224,38 @@ pub async fn execute(query: CompiledQuery) -> RyxResult { } return Err(RyxError::Internal("Transaction is no longer active".into())); } - - let pool = pool::get()?; - + + let pool = pool::get(None)?; + debug!(sql = %query.sql, "Executing mutation"); - + // Check if this is a RETURNING query (e.g. INSERT ... RETURNING id) if query.sql.to_uppercase().contains("RETURNING") { let mut q = sqlx::query(&query.sql); q = bind_values(q, &query.values); - - let rows = q.fetch_all(pool).await.map_err(RyxError::Database)?; - + + let rows = q.fetch_all(&*pool).await.map_err(RyxError::Database)?; + let last_insert_id = rows.first().and_then(|row| row.try_get::(0).ok()); - + return Ok(MutationResult { rows_affected: rows.len() as u64, last_insert_id, }); } - + let mut q = sqlx::query(&query.sql); q = bind_values(q, &query.values); - - let result = q.execute(pool).await.map_err(RyxError::Database)?; - + + let result = q.execute(&*pool).await.map_err(RyxError::Database)?; + Ok(MutationResult { rows_affected: result.rows_affected(), last_insert_id: None, }) } + // ### // Internal helpers // ### diff --git a/src/lib.rs b/src/lib.rs index 0a7e04c..d4a2c9d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,7 @@ use crate::transaction::TransactionHandle; #[pyfunction] #[pyo3(signature = ( - url, + urls, max_connections = 10, min_connections = 1, connect_timeout = 30, @@ -37,13 +37,22 @@ use crate::transaction::TransactionHandle; ))] fn setup<'py>( py: Python<'py>, - url: String, + urls: Bound<'_, PyAny>, max_connections: u32, min_connections: u32, connect_timeout: u64, idle_timeout: u64, max_lifetime: u64, ) -> PyResult> { + let urls_py = urls.downcast::()?; + let mut database_urls = HashMap::new(); + + for (key, value) in urls_py.iter() { + let alias = key.downcast::()?.to_str()?.to_string(); + let url = value.downcast::()?.to_str()?.to_string(); + database_urls.insert(alias, url); + } + let config = PoolConfig { max_connections, min_connections, @@ -52,7 +61,7 @@ fn setup<'py>( max_lifetime_secs: max_lifetime, }; pyo3_async_runtimes::tokio::future_into_py(py, async move { - pool::initialize(&url, config).await.map_err(PyErr::from)?; + pool::initialize(database_urls, config).await.map_err(PyErr::from)?; Python::attach(|py| Ok(py.None().into_pyobject(py)?.unbind())) }) } @@ -79,17 +88,18 @@ fn list_transforms() -> Vec<&'static str> { #[pyfunction] -fn is_connected(py: Python<'_>) -> bool { +fn is_connected(_py: Python<'_>, alias: Option) -> bool { + // For now we just check if the registry is initialized pool::is_initialized() } #[pyfunction] -fn pool_stats(py: Python<'_>) -> PyResult> { - let stats = pool::stats().map_err(PyErr::from)?; +fn pool_stats<'py>(py: Python<'py>, alias: Option) -> PyResult> { + let stats = pool::stats(alias.as_deref()).map_err(PyErr::from)?; let dict = PyDict::new(py); dict.set_item("size", stats.size)?; dict.set_item("idle", stats.idle)?; - Ok(dict.into()) + Ok(dict.into_any()) } #[pyfunction] @@ -134,7 +144,7 @@ impl PyQueryBuilder { #[new] fn new(table: String) -> PyResult { // Get the backend from the pool at QueryBuilder creation time - let backend = pool::get_backend().unwrap_or(ryx_query::Backend::PostgreSQL); + let backend = pool::get_backend(None).unwrap_or(ryx_query::Backend::PostgreSQL); Ok(Self { node: QueryNode::select(table).with_backend(backend), diff --git a/src/pool.rs b/src/pool.rs index 38dd92d..0f918ff 100644 --- a/src/pool.rs +++ b/src/pool.rs @@ -24,35 +24,31 @@ // multiple threads race to call `setup()`. Subsequent reads are lock-free. // ### -use std::sync::OnceLock; - +use std::collections::HashMap; +use std::sync::{Arc, OnceLock, RwLock}; + use serde::{Deserialize, Serialize}; use sqlx::{ AnyPool, any::{AnyPoolOptions, install_default_drivers}, }; use tracing::{debug, info}; - + use crate::errors::{RyxError, RyxResult}; use ryx_query::Backend; -// ### -// Global singleton -// -// We use `std::sync::OnceLock` (stable since Rust 1.70) rather than -// `once_cell::sync::OnceCell` to avoid an extra dependency for this specific -// use case. OnceLock is conceptually identical. -// ### +/// A registry of database connection pools. +/// Allows multiple databases to be configured and accessed via aliases. +pub struct PoolRegistry { + /// Map of alias (e.g., "default", "replica") to the connection pool and its backend. + pub pools: HashMap, Backend)>, + /// The alias used when no specific database is requested. + pub default_alias: String, +} -/// The single global connection pool for this process. -/// -/// Initialized exactly once by `initialize()`. All ORM operations retrieve -/// the pool via `get()`. -static POOL: OnceLock = OnceLock::new(); +/// Global singleton for the pool registry. +static REGISTRY: OnceLock> = OnceLock::new(); -/// The backend type for the initialized pool. -/// Set at initialization time based on the database URL. -static BACKEND: OnceLock = OnceLock::new(); // ### // Pool configuration options @@ -105,96 +101,120 @@ impl Default for PoolConfig { // // Public API // - -/// Initialize the global connection pool. +/// Initialize the global connection pool registry. /// /// # Arguments -/// * `database_url` — a standard database URL, e.g.: -/// - `"postgres://user:pass@localhost/dbname"` -/// - `"mysql://user:pass@localhost/dbname"` -/// - `"sqlite:///path/to/db.sqlite3"` or `"sqlite::memory:"` -/// * `config` — optional pool tuning parameters (see [`PoolConfig`]) +/// * `database_urls` — a map of aliases to database URLs. +/// Example: `{"default": "postgres://...", "logs": "sqlite://..."}` +/// * `config` — pool tuning parameters (see [`PoolConfig`]) /// /// # Errors /// - [`RyxError::PoolAlreadyInitialized`] if called more than once -/// - [`RyxError::Database`] if the URL is invalid or the DB is unreachable -/// -/// # Design note -/// We call `install_default_drivers()` here. This registers the Postgres, -/// MySQL, and SQLite drivers with sqlx's `AnyPool` machinery. Without this -/// call, `AnyPool::connect()` panics with "no driver for scheme". The call -/// is idempotent so it's safe to call multiple times (though we only ever -/// call it once via OnceLock). -pub async fn initialize(database_url: &str, config: PoolConfig) -> RyxResult<()> { +/// - [`RyxError::Database`] if any URL is invalid or DB is unreachable +pub async fn initialize(database_urls: HashMap, config: PoolConfig) -> RyxResult<()> { // Register all built-in sqlx drivers with AnyPool. - // This must be called before any AnyPool operation. install_default_drivers(); + + if database_urls.is_empty() { + return Err(RyxError::Internal("No database URLs provided for initialization".into())); + } - debug!(url = %database_url, "Initializing Ryx connection pool"); - - let pool = AnyPoolOptions::new() - .max_connections(config.max_connections) - .min_connections(config.min_connections) - .acquire_timeout(std::time::Duration::from_secs(config.connect_timeout_secs)) - .idle_timeout(std::time::Duration::from_secs(config.idle_timeout_secs)) - .max_lifetime(std::time::Duration::from_secs(config.max_lifetime_secs)) - .connect(database_url) - .await - .map_err(RyxError::Database)?; - - // OnceLock::set returns Err(value) if already set. - // We return our own error type to give a clearer message to users. - POOL.set(pool) + debug!(urls = ?database_urls, "Initializing Ryx connection pool registry"); + + let mut pools = HashMap::new(); + let mut first_alias = None; + + for (alias, url) in database_urls { + if first_alias.is_none() { + first_alias = Some(alias.clone()); + } + + let pool = AnyPoolOptions::new() + .max_connections(config.max_connections) + .min_connections(config.min_connections) + .acquire_timeout(std::time::Duration::from_secs(config.connect_timeout_secs)) + .idle_timeout(std::time::Duration::from_secs(config.idle_timeout_secs)) + .max_lifetime(std::time::Duration::from_secs(config.max_lifetime_secs)) + .connect(&url) + .await + .map_err(RyxError::Database)?; + + let backend = ryx_query::backend::detect_backend(&url); + pools.insert(alias, (Arc::new(pool), backend)); + } + + // Determine the default alias + let default_alias = if pools.contains_key("default") { + "default".to_string() + } else { + first_alias.expect("Registry cannot be empty") + }; + + let registry = PoolRegistry { + pools, + default_alias, + }; + + REGISTRY.set(RwLock::new(registry)) .map_err(|_| RyxError::PoolAlreadyInitialized)?; - - // Set the backend type based on the URL - let backend = ryx_query::backend::detect_backend(database_url); - BACKEND.set(backend).ok(); - - info!("Ryx connection pool initialized successfully"); + + info!("Ryx connection pool registry initialized successfully"); Ok(()) } - -/// Retrieve a reference to the global connection pool. + +/// Retrieve a reference to a specific connection pool. +/// +/// # Arguments +/// * `alias` — the pool alias to retrieve. If `None`, the default pool is used. /// /// # Errors -/// Returns [`RyxError::PoolNotInitialized`] if `initialize()` has not been -/// called. Every ORM operation calls this first, so users get a clear error -/// message rather than a panic. -pub fn get() -> RyxResult<&'static AnyPool> { - POOL.get().ok_or(RyxError::PoolNotInitialized) +/// Returns [`RyxError::PoolNotInitialized`] if `initialize()` has not been called, +/// or if the specified alias does not exist. +pub fn get(alias: Option<&str>) -> RyxResult> { + let registry_lock = REGISTRY.get().ok_or(RyxError::PoolNotInitialized)?; + let registry = registry_lock.read().unwrap(); + + let target_alias = alias.unwrap_or(®istry.default_alias); + + registry.pools.get(target_alias) + .map(|(pool, _)| pool.clone()) + .ok_or_else(|| RyxError::Internal(format!("Database pool '{}' not found", target_alias))) } - -/// Check whether the pool has been initialized without consuming it. -/// Useful for diagnostic / health-check endpoints. + +/// Check whether the pool registry has been initialized. pub fn is_initialized() -> bool { - POOL.get().is_some() + REGISTRY.get().is_some() } - -/// Retrieve the current backend type. + +/// Retrieve the backend type for a specific pool. /// /// # Errors -/// Returns [`RyxError::PoolNotInitialized`] if `initialize()` has not been called. -pub fn get_backend() -> RyxResult { - BACKEND.get().copied().ok_or(RyxError::PoolNotInitialized) +/// Returns [`RyxError::PoolNotInitialized`] if the registry is not set up, +/// or if the specified alias does not exist. +pub fn get_backend(alias: Option<&str>) -> RyxResult { + let registry_lock = REGISTRY.get().ok_or(RyxError::PoolNotInitialized)?; + let registry = registry_lock.read().unwrap(); + + let target_alias = alias.unwrap_or(®istry.default_alias); + + registry.pools.get(target_alias) + .map(|(_, backend)| *backend) + .ok_or_else(|| RyxError::Internal(format!("Database pool '{}' not found", target_alias))) } - -/// Return pool statistics as a simple struct. -/// Exposed to Python for monitoring and debugging. + +/// Return pool statistics for a specific pool. #[derive(Debug)] pub struct PoolStats { pub size: u32, pub idle: u32, } - -/// Retrieve current pool statistics. -/// -/// # Errors -/// Returns [`RyxError::PoolNotInitialized`] if the pool is not yet set up. -pub fn stats() -> RyxResult { - let pool = get()?; + +/// Retrieve current pool statistics for a specific pool. +pub fn stats(alias: Option<&str>) -> RyxResult { + let pool = get(alias)?; Ok(PoolStats { size: pool.size(), idle: pool.num_idle() as u32, }) } + diff --git a/src/transaction.rs b/src/transaction.rs index d5740fd..b8e57e7 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -71,7 +71,7 @@ pub struct TransactionHandle { impl TransactionHandle { /// Begin a new transaction by acquiring a connection from the pool. pub async fn begin() -> RyxResult { - let pool = pool::get()?; + let pool = pool::get(None)?; debug!("Beginning transaction"); let tx = pool.begin().await.map_err(RyxError::Database)?; From 5defeef35911b50e7416ad2cd69bed0cfb7621ee Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 11:26:37 +0000 Subject: [PATCH 03/10] feat: implement db_alias propagation from AST to Executor --- ryx-query/src/ast.rs | 10 +++++++++- ryx-query/src/compiler/compiler.rs | 7 ++++++- src/executor.rs | 8 ++++---- src/lib.rs | 22 +++++++++++++++++++--- 4 files changed, 38 insertions(+), 9 deletions(-) diff --git a/ryx-query/src/ast.rs b/ryx-query/src/ast.rs index 13b00c9..2701b6a 100644 --- a/ryx-query/src/ast.rs +++ b/ryx-query/src/ast.rs @@ -253,7 +253,8 @@ pub enum QueryOperation { #[derive(Debug, Clone)] pub struct QueryNode { pub table: String, - pub backend: Backend, // Database backend for SQL generation + pub backend: Backend, // Database backend for SQL generation + pub db_alias: Option, // Optional alias for multi-db routing pub operation: QueryOperation, // # WHERE @@ -287,6 +288,7 @@ impl QueryNode { Self { table: table.into(), backend: Backend::PostgreSQL, // default, will be overridden at runtime + db_alias: None, operation: QueryOperation::Select { columns: None }, filters: Vec::new(), q_filter: None, @@ -377,4 +379,10 @@ impl QueryNode { self.backend = backend; self } + + #[must_use] + pub fn with_db_alias(mut self, alias: String) -> Self { + self.db_alias = Some(alias); + self + } } diff --git a/ryx-query/src/compiler/compiler.rs b/ryx-query/src/compiler/compiler.rs index 98faa35..7ddab47 100644 --- a/ryx-query/src/compiler/compiler.rs +++ b/ryx-query/src/compiler/compiler.rs @@ -25,6 +25,7 @@ use super::helpers; pub struct CompiledQuery { pub sql: String, pub values: Vec, + pub db_alias: Option, } pub fn compile(node: &QueryNode) -> QueryResult { @@ -42,7 +43,11 @@ pub fn compile(node: &QueryNode) -> QueryResult { returning_id, } => compile_insert(node, cv, *returning_id, &mut values)?, }; - Ok(CompiledQuery { sql, values }) + Ok(CompiledQuery { + sql, + values, + db_alias: node.db_alias.clone(), + }) } fn compile_select( diff --git a/src/executor.rs b/src/executor.rs index 8e6ec30..f37dbb1 100644 --- a/src/executor.rs +++ b/src/executor.rs @@ -88,7 +88,7 @@ pub async fn fetch_all(query: CompiledQuery) -> RyxResult> { return Err(RyxError::Internal("Transaction is no longer active".into())); } - let pool = pool::get(None)?; + let pool = pool::get(query.db_alias.as_deref())?; debug!(sql = %query.sql, "Executing SELECT"); @@ -130,7 +130,7 @@ pub async fn fetch_count(query: CompiledQuery) -> RyxResult { return Err(RyxError::Internal("Transaction is no longer active".into())); } - let pool = pool::get(None)?; + let pool = pool::get(query.db_alias.as_deref())?; debug!(sql = %query.sql, "Executing COUNT"); @@ -173,7 +173,7 @@ pub async fn fetch_one(query: CompiledQuery) -> RyxResult { Err(RyxError::Internal("Transaction is no longer active".into())) } } else { - let pool = pool::get(None)?; + let pool = pool::get(query.db_alias.as_deref())?; let mut q = sqlx::query(&query.sql); q = bind_values(q, &query.values); @@ -225,7 +225,7 @@ pub async fn execute(query: CompiledQuery) -> RyxResult { return Err(RyxError::Internal("Transaction is no longer active".into())); } - let pool = pool::get(None)?; + let pool = pool::get(query.db_alias.as_deref())?; debug!(sql = %query.sql, "Executing mutation"); diff --git a/src/lib.rs b/src/lib.rs index d4a2c9d..c62f0ca 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -108,6 +108,7 @@ fn raw_fetch<'py>(py: Python<'py>, sql: String) -> PyResult> { let compiled = compiler::CompiledQuery { sql, values: vec![], + db_alias: None, }; let rows = executor::fetch_all(compiled).await.map_err(PyErr::from)?; Python::attach(|py| { @@ -116,19 +117,21 @@ fn raw_fetch<'py>(py: Python<'py>, sql: String) -> PyResult> { }) }) } - + #[pyfunction] fn raw_execute<'py>(py: Python<'py>, sql: String) -> PyResult> { pyo3_async_runtimes::tokio::future_into_py(py, async move { let compiled = compiler::CompiledQuery { sql, values: vec![], + db_alias: None, }; executor::execute(compiled).await.map_err(PyErr::from)?; Python::attach(|py| Ok(py.None().into_pyobject(py)?.unbind())) }) } + // ### // QueryBuilder // ### @@ -150,6 +153,12 @@ impl PyQueryBuilder { node: QueryNode::select(table).with_backend(backend), }) } + + fn set_using(&self, alias: String) -> PyResult { + Ok(PyQueryBuilder { + node: self.node.clone().with_db_alias(alias), + }) + } fn add_filter( &self, @@ -672,17 +681,19 @@ fn execute_with_params<'py>( .iter() .map(py_to_sql_value) .collect::>()?; - + pyo3_async_runtimes::tokio::future_into_py(py, async move { let compiled = compiler::CompiledQuery { sql, values: sql_values, + db_alias: None, }; let result = executor::execute(compiled).await.map_err(PyErr::from)?; Python::attach(|py| Ok(result.rows_affected.into_pyobject(py)?.unbind())) }) } + #[pyfunction] fn fetch_with_params<'py>( py: Python<'py>, @@ -693,17 +704,20 @@ fn fetch_with_params<'py>( .iter() .map(py_to_sql_value) .collect::>()?; - + pyo3_async_runtimes::tokio::future_into_py(py, async move { let compiled = compiler::CompiledQuery { sql, values: sql_values, + db_alias: None, }; let rows = executor::fetch_all(compiled).await.map_err(PyErr::from)?; Python::attach(|py| Ok(decoded_rows_to_py(py, rows)?.unbind())) }) } + + /// Bulk delete by primary key list in a single FFI call. /// /// Equivalent to: @@ -734,6 +748,7 @@ fn bulk_delete<'py>( let compiled = compiler::CompiledQuery { sql, values: pk_values, + db_alias: None, }; let result = executor::execute(compiled).await.map_err(PyErr::from)?; Python::attach(|py| { @@ -817,6 +832,7 @@ fn bulk_update<'py>( let compiled = compiler::CompiledQuery { sql, values: all_values, + db_alias: None, }; let result = executor::execute(compiled).await.map_err(PyErr::from)?; Python::attach(|py| { From 855faf06d9f69eaf75e83ef5f89fd72812a1afc4 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 11:36:23 +0000 Subject: [PATCH 04/10] feat: implement multi-db routing in QuerySet and Model Meta --- ryx/models.py | 3 ++ ryx/queryset.py | 75 +++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 69 insertions(+), 9 deletions(-) diff --git a/ryx/models.py b/ryx/models.py index b5ad841..5dbab3e 100644 --- a/ryx/models.py +++ b/ryx/models.py @@ -93,6 +93,7 @@ class Options: Attributes: table_name : SQL table name. app_label : Optional namespace prefix. + database : Optional database alias (e.g. "logs"). fields : Ordered dict name → Field. many_to_many : Dict name → ManyToManyField (populated by M2M fields). pk_field : The primary key Field. @@ -113,7 +114,9 @@ def __init__(self, meta_class: Optional[type], model_name: str) -> None: self.table_name = _to_table_name(model_name) self.app_label: str = getattr(meta_class, "app_label", "") + self.database: Optional[str] = getattr(meta_class, "database", None) self.ordering: List[str] = list(getattr(meta_class, "ordering", [])) + self.unique_together: List[tuple] = list( getattr(meta_class, "unique_together", []) ) diff --git a/ryx/queryset.py b/ryx/queryset.py index 376fac8..e8eb549 100644 --- a/ryx/queryset.py +++ b/ryx/queryset.py @@ -256,6 +256,7 @@ def __init__( _select_columns: Optional[List[str]] = None, _annotations: Optional[List[dict]] = None, _group_by: Optional[List[str]] = None, + _using: Optional[str] = None, ) -> None: self._model = model @@ -265,6 +266,7 @@ def __init__( self._select_columns = _select_columns self._annotations = _annotations or [] self._group_by = _group_by or [] + self._using = _using def _clone(self, builder=None, **overrides) -> "QuerySet": return QuerySet( @@ -273,6 +275,7 @@ def _clone(self, builder=None, **overrides) -> "QuerySet": _select_columns=overrides.get("_select_columns", self._select_columns), _annotations=overrides.get("_annotations", list(self._annotations)), _group_by=overrides.get("_group_by", list(self._group_by)), + _using=overrides.get("_using", self._using), ) def _validate_filters(self, kwargs: Dict[str, Any]) -> None: @@ -558,8 +561,12 @@ def stream( ) def using(self, alias: str) -> "QuerySet": - """Stub for multi-database routing (planned feature).""" - return self._clone() + """Switch the database used for this query. + + Example:: + posts = await Post.objects.using("replica").filter(active=True) + """ + return self._clone(_using=alias) # Evaluation (async) def cache( @@ -606,14 +613,35 @@ def __await__(self): return self._execute().__await__() async def _execute(self) -> list: - raw_rows = await self._builder.fetch_all() + # Resolve database alias: .using() -> Meta.database -> default + alias = self._using or self._model._meta.database + + builder = self._builder + if alias: + builder = builder.set_using(alias) + + raw_rows = await builder.fetch_all() return [self._model._from_row(row) for row in raw_rows] async def count(self) -> int: - return await self._builder.fetch_count() + # Resolve database alias: .using() -> Meta.database -> default + alias = self._using or self._model._meta.database + + builder = self._builder + if alias: + builder = builder.set_using(alias) + + return await builder.fetch_count() async def first(self) -> Optional["Model"]: - raw = await self._builder.set_limit(1).fetch_first() + # Resolve database alias: .using() -> Meta.database -> default + alias = self._using or self._model._meta.database + + builder = self._builder + if alias: + builder = builder.set_using(alias) + + raw = await builder.set_limit(1).fetch_first() return None if raw is None else self._model._from_row(raw) async def last(self) -> Optional["Model"]: @@ -625,8 +653,16 @@ async def last(self) -> Optional["Model"]: async def get(self, *q_args: Q, **kwargs: Any) -> "Model": """Return exactly one instance. Raises DoesNotExist / MultipleObjectsReturned.""" qs = self.filter(*q_args, **kwargs) if (q_args or kwargs) else self + + # Resolve database alias: .using() -> Meta.database -> default + alias = qs._using or qs._model._meta.database + + builder = qs._builder + if alias: + builder = builder.set_using(alias) + try: - raw = await qs._builder.fetch_get() + raw = await builder.fetch_get() except RuntimeError as e: msg = str(e) if "No matching" in msg: @@ -641,13 +677,27 @@ async def get(self, *q_args: Q, **kwargs: Any) -> "Model": return self._model._from_row(raw) async def exists(self) -> bool: - return await self.count() > 0 + # Resolve database alias: .using() -> Meta.database -> default + alias = self._using or self._model._meta.database + + builder = self._builder + if alias: + builder = builder.set_using(alias) + + return await builder.count() > 0 async def delete(self) -> int: """Bulk delete. Fires pre_bulk_delete / post_bulk_delete signals.""" + # Resolve database alias: .using() -> Meta.database -> default + alias = self._using or self._model._meta.database + + builder = self._builder + if alias: + builder = builder.set_using(alias) + await pre_bulk_delete.send(sender=self._model, queryset=self) - n = await self._builder.execute_delete() + n = await builder.execute_delete() await post_bulk_delete.send(sender=self._model, queryset=self, deleted_count=n) return n @@ -658,8 +708,15 @@ async def bulk_delete(self) -> int: async def update(self, **kwargs: Any) -> int: """Bulk update. Fires pre_update / post_update signals.""" + # Resolve database alias: .using() -> Meta.database -> default + alias = self._using or self._model._meta.database + + builder = self._builder + if alias: + builder = builder.set_using(alias) + await pre_update.send(sender=self._model, queryset=self, fields=kwargs) - n = await self._builder.execute_update(list(kwargs.items())) + n = await builder.execute_update(list(kwargs.items())) await post_update.send( sender=self._model, queryset=self, updated_count=n, fields=kwargs ) From 5f1748ad3a1a5ae26d64e7db8b0759dedbbf24a2 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 14:22:23 +0000 Subject: [PATCH 05/10] feat: implement dynamic database routing with BaseRouter --- ryx/models.py | 30 +++++++++++++++++++-- ryx/queryset.py | 71 ++++++++++++++++++++++++++++++++++++------------- ryx/router.py | 49 ++++++++++++++++++++++++++++++++++ 3 files changed, 130 insertions(+), 20 deletions(-) create mode 100644 ryx/router.py diff --git a/ryx/models.py b/ryx/models.py index 5dbab3e..0873287 100644 --- a/ryx/models.py +++ b/ryx/models.py @@ -28,7 +28,7 @@ async def after_delete(self) → post-SQL hook import re from datetime import datetime -from typing import Any, Dict, List, Optional, Type +from typing import Any, Dict, List, Optional from ryx import ryx_core as _core from ryx.exceptions import DoesNotExist, MultipleObjectsReturned @@ -161,7 +161,7 @@ class Manager: """Default query manager. Proxies to QuerySet.""" def __init__(self) -> None: - self._model: Optional[type] = None + self._model: Optional[type[Model]] = None def contribute_to_class(self, model: type, name: str) -> None: self._model = model @@ -532,6 +532,16 @@ async def save( # pre_save signal await pre_save.send(sender=type(self), instance=self, created=created) + # Resolve database alias: Router.db_for_write -> Meta.database -> 'default' + from ryx.router import get_router + + router = get_router() + alias = None + if router: + alias = router.db_for_write(type(self)) + if not alias: + alias = self._meta.database + # SQL execution # Creation if created: @@ -545,6 +555,8 @@ async def save( (f.column, f.to_db(getattr(self, f.attname))) for f in fields_to_save ] builder = _core.QueryBuilder(self._meta.table_name) + if alias: + builder = builder.set_using(alias) new_id = await builder.execute_insert(values, returning_id=True) if self._meta.pk_field: object.__setattr__(self, self._meta.pk_field.attname, new_id) @@ -569,6 +581,8 @@ async def save( ] pk_field = self._meta.pk_field builder = _core.QueryBuilder(self._meta.table_name) + if alias: + builder = builder.set_using(alias) builder = builder.add_filter( pk_field.column, "exact", self.pk, negated=False ) @@ -594,10 +608,22 @@ async def delete(self) -> None: await self.before_delete() await pre_delete.send(sender=type(self), instance=self) + # Resolve database alias: Router.db_for_write -> Meta.database -> 'default' + from ryx.router import get_router + + router = get_router() + alias = None + if router: + alias = router.db_for_write(type(self)) + if not alias: + alias = self._meta.database + from ryx import ryx_core as _core pk_field = self._meta.pk_field builder = _core.QueryBuilder(self._meta.table_name) + if alias: + builder = builder.set_using(alias) builder = builder.add_filter(pk_field.column, "exact", self.pk, negated=False) await builder.execute_delete() diff --git a/ryx/queryset.py b/ryx/queryset.py index e8eb549..512dc0f 100644 --- a/ryx/queryset.py +++ b/ryx/queryset.py @@ -612,9 +612,39 @@ def cache( def __await__(self): return self._execute().__await__() + def _resolve_db_alias(self, operation: str = "read") -> str: + """ + Resolve the database alias based on priority: + 1. .using(alias) + 2. Router.db_for_read/write + 3. Model.Meta.database + 4. 'default' + """ + # 1. Explicitly set via .using() + if self._using: + return self._using + + # 2. Dynamic Router + from ryx.router import get_router + + router = get_router() + if router: + if operation == "read": + res = router.db_for_read(self._model) + else: + res = router.db_for_write(self._model) + if res: + return res + + # 3. Model Meta + if self._model._meta.database: + return self._model._meta.database + + # 4. Fallback + return "default" + async def _execute(self) -> list: - # Resolve database alias: .using() -> Meta.database -> default - alias = self._using or self._model._meta.database + alias = self._resolve_db_alias("read") builder = self._builder if alias: @@ -624,8 +654,7 @@ async def _execute(self) -> list: return [self._model._from_row(row) for row in raw_rows] async def count(self) -> int: - # Resolve database alias: .using() -> Meta.database -> default - alias = self._using or self._model._meta.database + alias = self._resolve_db_alias("read") builder = self._builder if alias: @@ -634,8 +663,7 @@ async def count(self) -> int: return await builder.fetch_count() async def first(self) -> Optional["Model"]: - # Resolve database alias: .using() -> Meta.database -> default - alias = self._using or self._model._meta.database + alias = self._resolve_db_alias("read") builder = self._builder if alias: @@ -644,18 +672,11 @@ async def first(self) -> Optional["Model"]: raw = await builder.set_limit(1).fetch_first() return None if raw is None else self._model._from_row(raw) - async def last(self) -> Optional["Model"]: - # Support explicit ordering from .order_by(...).last(). - # If no rows, return None. - results = await self._execute() - return results[-1] if results else None - async def get(self, *q_args: Q, **kwargs: Any) -> "Model": """Return exactly one instance. Raises DoesNotExist / MultipleObjectsReturned.""" qs = self.filter(*q_args, **kwargs) if (q_args or kwargs) else self - # Resolve database alias: .using() -> Meta.database -> default - alias = qs._using or qs._model._meta.database + alias = qs._resolve_db_alias("read") builder = qs._builder if alias: @@ -677,8 +698,7 @@ async def get(self, *q_args: Q, **kwargs: Any) -> "Model": return self._model._from_row(raw) async def exists(self) -> bool: - # Resolve database alias: .using() -> Meta.database -> default - alias = self._using or self._model._meta.database + alias = self._resolve_db_alias("read") builder = self._builder if alias: @@ -689,8 +709,7 @@ async def exists(self) -> bool: async def delete(self) -> int: """Bulk delete. Fires pre_bulk_delete / post_bulk_delete signals.""" - # Resolve database alias: .using() -> Meta.database -> default - alias = self._using or self._model._meta.database + alias = self._resolve_db_alias("write") builder = self._builder if alias: @@ -701,6 +720,22 @@ async def delete(self) -> int: await post_bulk_delete.send(sender=self._model, queryset=self, deleted_count=n) return n + async def update(self, **kwargs: Any) -> int: + """Bulk update. Fires pre_update / post_update signals.""" + + alias = self._resolve_db_alias("write") + + builder = self._builder + if alias: + builder = builder.set_using(alias) + + await pre_update.send(sender=self._model, queryset=self, fields=kwargs) + n = await builder.execute_update(list(kwargs.items())) + await post_update.send( + sender=self._model, queryset=self, updated_count=n, fields=kwargs + ) + return n + async def bulk_delete(self) -> int: """Alias for delete().""" return await self.delete() diff --git a/ryx/router.py b/ryx/router.py new file mode 100644 index 0000000..6fa6451 --- /dev/null +++ b/ryx/router.py @@ -0,0 +1,49 @@ +""" +Ryx ORM — Database Router + +A router allows you to automatically route queries to different databases +based on the model, the operation (read vs write), or other hints. +""" + +from __future__ import annotations +from typing import Any, Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from ryx.models import Model + + +class BaseRouter: + """ + Base class for database routers. + Override these methods to implement custom routing logic. + + Returning None tells Ryx to fall back to the model's Meta.database + or the global 'default' database. + """ + + def db_for_read(self, model: type[Model], **hints: Any) -> Optional[str]: + """Return the alias of the database to use for read operations.""" + return None + + def db_for_write(self, model: type[Model], **hints: Any) -> Optional[str]: + """Return the alias of the database to use for write operations.""" + return None + + def allow_migrate(self, db: str, app_label: str, model_name: str) -> Optional[bool]: + """Return True/False to allow/disallow migrations on a specific DB.""" + return None + + +# Global router instance +_router: Optional[BaseRouter] = None + + +def set_router(router: BaseRouter) -> None: + """Set the global router for the application.""" + global _router + _router = router + + +def get_router() -> Optional[BaseRouter]: + """Retrieve the currently configured router.""" + return _router From b901f3d060ae651d90f25bea832be51693f57530 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 15:27:20 +0000 Subject: [PATCH 06/10] feat(core): implement multi-database pool registry and infrastructure --- ryx/ryx_core.pyi | 41 +++++++++++++++-------------------------- src/lib.rs | 44 ++++++++++++++++++++++++++++++++++---------- src/pool.rs | 14 ++++++++++++-- src/transaction.rs | 8 +++++--- 4 files changed, 66 insertions(+), 41 deletions(-) diff --git a/ryx/ryx_core.pyi b/ryx/ryx_core.pyi index 5387ec5..f9e44f5 100644 --- a/ryx/ryx_core.pyi +++ b/ryx/ryx_core.pyi @@ -35,12 +35,11 @@ from typing import Any, Optional __version__: str """Semver version of the compiled Rust core, e.g. ``"0.2.0"``.""" -# --------------------------------------------------------------------------- +# # Module-level functions -# --------------------------------------------------------------------------- - +# async def setup( - url: str, + urls: dict, max_connections: int = 10, min_connections: int = 1, connect_timeout: int = 30, @@ -139,7 +138,7 @@ def list_transforms() -> list[str]: ... -def is_connected() -> bool: +def is_connected(alias: str = 'default') -> bool: """Return ``True`` if ``setup()`` has been called successfully. Pure in-memory check — no database round-trip. @@ -354,10 +353,7 @@ class QueryBuilder: """ ... - # ------------------------------------------------------------------ # Filter / WHERE - # ------------------------------------------------------------------ - def add_filter( self, field: str, @@ -447,10 +443,7 @@ class QueryBuilder: """ ... - # ------------------------------------------------------------------ # Aggregation / GROUP BY - # ------------------------------------------------------------------ - def add_annotation( self, alias: str, @@ -495,10 +488,7 @@ class QueryBuilder: """ ... - # ------------------------------------------------------------------ # JOIN - # ------------------------------------------------------------------ - def add_join( self, kind: str, @@ -538,10 +528,7 @@ class QueryBuilder: """ ... - # ------------------------------------------------------------------ # Ordering / pagination - # ------------------------------------------------------------------ - def add_order_by(self, field: str) -> "QueryBuilder": """Append an ``ORDER BY`` term. @@ -597,10 +584,16 @@ class QueryBuilder: """ ... - # ------------------------------------------------------------------ - # Introspection - # ------------------------------------------------------------------ + def set_using(alias: str) -> "QueryBuilder": + """Set the database to use for this query + + Returns + ------- + A new ``QueryBuilder`` with bd_alias set to the new alias. + """ + ... + # Introspection def compiled_sql(self) -> str: """Return the compiled SQL string without executing the query. @@ -622,10 +615,7 @@ class QueryBuilder: """ ... - # ------------------------------------------------------------------ # Async execution - # ------------------------------------------------------------------ - async def fetch_all(self) -> list[dict[str, Any]]: """Execute the current SELECT and return all matching rows. @@ -799,10 +789,9 @@ class QueryBuilder: ... -# --------------------------------------------------------------------------- +# # TransactionHandle -# --------------------------------------------------------------------------- - +# class TransactionHandle: """A live database transaction, owned by the Rust ``Arc>>``. diff --git a/src/lib.rs b/src/lib.rs index c62f0ca..4bbba98 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -44,12 +44,12 @@ fn setup<'py>( idle_timeout: u64, max_lifetime: u64, ) -> PyResult> { - let urls_py = urls.downcast::()?; + let urls_py = urls.cast::()?; let mut database_urls = HashMap::new(); for (key, value) in urls_py.iter() { - let alias = key.downcast::()?.to_str()?.to_string(); - let url = value.downcast::()?.to_str()?.to_string(); + let alias = key.cast::()?.to_str()?.to_string(); + let url = value.cast::()?.to_str()?.to_string(); database_urls.insert(alias, url); } @@ -90,7 +90,7 @@ fn list_transforms() -> Vec<&'static str> { #[pyfunction] fn is_connected(_py: Python<'_>, alias: Option) -> bool { // For now we just check if the registry is initialized - pool::is_initialized() + pool::is_initialized(alias) } #[pyfunction] @@ -103,12 +103,17 @@ fn pool_stats<'py>(py: Python<'py>, alias: Option) -> PyResult(py: Python<'py>, sql: String) -> PyResult> { +#[pyo3(signature = (sql, alias=None))] +fn raw_fetch<'py>( + py: Python<'py>, + sql: String, + alias: Option, +) -> PyResult> { pyo3_async_runtimes::tokio::future_into_py(py, async move { let compiled = compiler::CompiledQuery { sql, values: vec![], - db_alias: None, + db_alias: alias, }; let rows = executor::fetch_all(compiled).await.map_err(PyErr::from)?; Python::attach(|py| { @@ -119,12 +124,17 @@ fn raw_fetch<'py>(py: Python<'py>, sql: String) -> PyResult> { } #[pyfunction] -fn raw_execute<'py>(py: Python<'py>, sql: String) -> PyResult> { +#[pyo3(signature = (sql, alias=None))] +fn raw_execute<'py>( + py: Python<'py>, + sql: String, + alias: Option, +) -> PyResult> { pyo3_async_runtimes::tokio::future_into_py(py, async move { let compiled = compiler::CompiledQuery { sql, values: vec![], - db_alias: None, + db_alias: alias, }; executor::execute(compiled).await.map_err(PyErr::from)?; Python::attach(|py| Ok(py.None().into_pyobject(py)?.unbind())) @@ -132,6 +142,7 @@ fn raw_execute<'py>(py: Python<'py>, sql: String) -> PyResult> } + // ### // QueryBuilder // ### @@ -571,6 +582,15 @@ pub struct PyTransactionHandle { #[pymethods] impl PyTransactionHandle { + fn get_alias(&self) -> PyResult> { + let h = self.handle.blocking_lock(); + if let Some(tx) = h.as_ref() { + Ok(tx.alias.clone()) + } else { + Ok(None) + } + } + fn commit<'py>(&self, py: Python<'py>) -> PyResult> { let h = self.handle.clone(); pyo3_async_runtimes::tokio::future_into_py(py, async move { @@ -635,9 +655,13 @@ impl PyTransactionHandle { } #[pyfunction] -fn begin_transaction<'py>(py: Python<'py>) -> PyResult> { +fn begin_transaction<'py>( + py: Python<'py>, + alias: Option>, +) -> PyResult> { + let alias_str = alias.map(|s| s.to_string()); pyo3_async_runtimes::tokio::future_into_py(py, async move { - let handle = TransactionHandle::begin().await.map_err(PyErr::from)?; + let handle = TransactionHandle::begin(alias_str).await.map_err(PyErr::from)?; Python::attach(|py| { let py_handle = PyTransactionHandle { handle: Arc::new(TokioMutex::new(Some(handle))), diff --git a/src/pool.rs b/src/pool.rs index 0f918ff..2bc6e1b 100644 --- a/src/pool.rs +++ b/src/pool.rs @@ -182,8 +182,18 @@ pub fn get(alias: Option<&str>) -> RyxResult> { } /// Check whether the pool registry has been initialized. -pub fn is_initialized() -> bool { - REGISTRY.get().is_some() +pub fn is_initialized(alias: Option) -> bool { + + // Alias provided + if alias.is_some(){ + REGISTRY.get().is_some_and(|f| { + f.read().is_ok_and(|pc| pc.pools.contains_key(alias.unwrap().as_str())) + }) + } + // Else is the registry not none? + else { + REGISTRY.get().is_some() + } } /// Retrieve the backend type for a specific pool. diff --git a/src/transaction.rs b/src/transaction.rs index b8e57e7..481584a 100644 --- a/src/transaction.rs +++ b/src/transaction.rs @@ -66,18 +66,20 @@ pub fn get_current_transaction() -> Option>> pub struct TransactionHandle { inner: Arc>>>, savepoints: Vec, + pub alias: Option, } impl TransactionHandle { /// Begin a new transaction by acquiring a connection from the pool. - pub async fn begin() -> RyxResult { - let pool = pool::get(None)?; - debug!("Beginning transaction"); + pub async fn begin(alias: Option) -> RyxResult { + let pool = pool::get(alias.as_deref())?; + debug!("Beginning transaction for alias: {:?}", alias); let tx = pool.begin().await.map_err(RyxError::Database)?; Ok(Self { inner: Arc::new(Mutex::new(Some(tx))), savepoints: Vec::new(), + alias: alias.clone(), }) } From f2b963ac59b5583927bd1bcd926e4ccb445ab591 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 15:27:22 +0000 Subject: [PATCH 07/10] feat(api): add .using() routing and Model.Meta.database support --- ryx/__init__.py | 13 +++++++++---- ryx/models.py | 51 ++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 49 insertions(+), 15 deletions(-) diff --git a/ryx/__init__.py b/ryx/__init__.py index d4e6ec7..2a02f66 100644 --- a/ryx/__init__.py +++ b/ryx/__init__.py @@ -110,7 +110,7 @@ # Setup async def setup( - url: str, + urls: str | dict, # str | dict to maintain backward. *, max_connections: int = 10, min_connections: int = 1, @@ -119,8 +119,13 @@ async def setup( max_lifetime: int = 1800, ) -> None: """Initialize the ryx connection pool. Call once at startup.""" + + # For old versions wrap the url with a dict + if isinstance(urls, str): + urls = {'default': urls} + await _core.setup( - url, + urls, max_connections=max_connections, min_connections=min_connections, connect_timeout=connect_timeout, @@ -149,8 +154,8 @@ def available_transforms() -> list[str]: return list(_core.list_transforms()) -def is_connected() -> bool: - return _core.is_connected() +def is_connected(db_alias: str = 'default') -> bool: + return _core.is_connected(db_alias) def pool_stats() -> dict: diff --git a/ryx/models.py b/ryx/models.py index 0873287..598aa1b 100644 --- a/ryx/models.py +++ b/ryx/models.py @@ -160,8 +160,12 @@ def get_field(self, name: str) -> Field: class Manager: """Default query manager. Proxies to QuerySet.""" - def __init__(self) -> None: + def __init__(self, alias: Optional[str] = None) -> None: self._model: Optional[type[Model]] = None + self._alias = alias + + def contribute_to_class(self, model: type, name: str) -> None: + self._model = model def contribute_to_class(self, model: type, name: str) -> None: self._model = model @@ -169,7 +173,7 @@ def contribute_to_class(self, model: type, name: str) -> None: def get_queryset(self): from ryx.queryset import QuerySet - return QuerySet(self._model) + return QuerySet(self._model, _using=self._alias) # Proxy shortcuts def all(self): @@ -184,8 +188,12 @@ def exclude(self, *q, **kw): def order_by(self, *f): return self.get_queryset().order_by(*f) - def using(self, alias): - return self.get_queryset() # future: multi-db + def using(self, alias: str) -> "Manager": + """Return a new Manager bound to the specified database alias.""" + new_mgr = Manager() + new_mgr._model = self._model + new_mgr._alias = alias + return new_mgr def cache(self, **kw): return self.get_queryset().cache(**kw) @@ -229,7 +237,22 @@ async def count(self) -> int: async def create(self, **kw): """Create and save a new model instance.""" instance = self._model(**kw) - await instance.save() + + # Use the manager's alias if specified + from ryx.router import get_router + + router = get_router() + alias = None + if router: + alias = router.db_for_write(self._model) + if not alias: + alias = self._model._meta.database + if not alias: + alias = self._alias + + # We need a way to pass the alias to instance.save() + # Let's add an optional `using` argument to save() + await instance.save(using=alias) return instance async def get_or_create(self, defaults: Optional[dict] = None, **kw): @@ -505,7 +528,11 @@ async def full_clean(self) -> None: # Persistence async def save( - self, *, validate: bool = True, update_fields: Optional[List[str]] = None + self, + *, + validate: bool = True, + update_fields: Optional[List[str]] = None, + using: Optional[str] = None, ) -> None: """Save the instance to the database. @@ -516,6 +543,7 @@ async def save( Args: validate: Run field validators + clean() before SQL (default: True). update_fields: If given, only UPDATE these field names (reduces SQL chatter). + using: Explicitly specify the database alias to use. """ created = self.pk is None @@ -532,15 +560,16 @@ async def save( # pre_save signal await pre_save.send(sender=type(self), instance=self, created=created) - # Resolve database alias: Router.db_for_write -> Meta.database -> 'default' + # Resolve database alias: using -> Router.db_for_write -> Meta.database -> 'default' from ryx.router import get_router router = get_router() - alias = None - if router: - alias = router.db_for_write(type(self)) + alias = using if not alias: - alias = self._meta.database + if router: + alias = router.db_for_write(type(self)) + if not alias: + alias = self._meta.database # SQL execution # Creation From e0056a4005713be3d52884d4313acf58b624c019 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 15:27:28 +0000 Subject: [PATCH 08/10] feat(tx): support database aliases in transactions and smart nesting --- ryx/transaction.py | 48 ++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/ryx/transaction.py b/ryx/transaction.py index 07d5af2..5c5f08a 100644 --- a/ryx/transaction.py +++ b/ryx/transaction.py @@ -73,7 +73,8 @@ class TransactionContext: value so callers can use explicit ``savepoint()`` / ``rollback_to()``. """ - def __init__(self) -> None: + def __init__(self, alias: Optional[str] = None) -> None: + self._alias = alias self._handle = None # set in __aenter__ self._savepoint_name: Optional[str] = None self._outer_token = None # for ContextVar reset @@ -84,20 +85,23 @@ def __init__(self) -> None: async def __aenter__(self): outer = _active_tx.get() + # If there is an outer transaction, check if it's for the same database. + # If it's for a different database, we treat this as a new outermost + # transaction for that specific database. if outer is not None: - # Nested transaction → SAVEPOINT - # We reuse the outer transaction's connection and create a named - # savepoint. The name is unique per nesting level. - sp_name = f"_Ryx_sp_{id(self)}" - self._savepoint_name = sp_name - await outer.savepoint(sp_name) - self._handle = outer - logger.debug("Nested transaction: created savepoint %s", sp_name) - else: - # Outermost transaction → BEGIN - self._handle = await _core.begin_transaction() - logger.debug("Transaction BEGIN") - + outer_alias = outer.get_alias() + if outer_alias == self._alias: + # Nested transaction on same DB → SAVEPOINT + sp_name = f"_Ryx_sp_{id(self)}" + self._savepoint_name = sp_name + await outer.savepoint(sp_name) + self._handle = outer + logger.debug("Nested transaction: created savepoint %s", sp_name) + return self._handle + + # Outermost transaction (or transaction on a different DB) → BEGIN + self._handle = await _core.begin_transaction(self._alias) + logger.debug("Transaction BEGIN (alias=%s)", self._alias) self._outer_token = _active_tx.set(self._handle) self._previous_tx = outer _core._set_active_transaction(self._handle) @@ -136,17 +140,20 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): return False -def transaction() -> TransactionContext: +def transaction(alias: Optional[str] = None) -> TransactionContext: """Return an async context manager for database transactions. Usage:: - - async with Ryx.transaction(): + async with ryx.transaction(): await Post.objects.create(title="Atomic post") await Tag.objects.create(name="python") + # Transaction on a specific database: + async with ryx.transaction(alias="user_db"): + await User.objects.create(name="Atomic user") + # With explicit handle for savepoints: - async with Ryx.transaction() as tx: + async with ryx.transaction() as tx: await Order.objects.create(total=99.99) await tx.savepoint("before_items") try: @@ -157,7 +164,6 @@ def transaction() -> TransactionContext: raise Nesting:: - async with Ryx.transaction(): # BEGIN ... async with Ryx.transaction(): # SAVEPOINT _Ryx_sp_... @@ -167,7 +173,7 @@ def transaction() -> TransactionContext: Returns: :class:`TransactionContext` — an async context manager. """ - return TransactionContext() + return TransactionContext(alias) def get_active_transaction(): @@ -178,7 +184,7 @@ def get_active_transaction(): Example:: - tx = Ryx.get_active_transaction() + tx = ryx.get_active_transaction() if tx: # we're inside a transaction — the next ORM call auto-enlists pass From 4447ad5fe0d3ae5804f07494ed88a878508e7412 Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 15:27:31 +0000 Subject: [PATCH 09/10] test(integration): add multi-database routing and transaction tests --- tests/integration/test_multi_db.py | 125 ++++++++++++++++++++++ tests/integration/test_multi_db_script.py | 71 ++++++++++++ tests/integration/test_simple_async.py | 8 ++ 3 files changed, 204 insertions(+) create mode 100644 tests/integration/test_multi_db.py create mode 100644 tests/integration/test_multi_db_script.py create mode 100644 tests/integration/test_simple_async.py diff --git a/tests/integration/test_multi_db.py b/tests/integration/test_multi_db.py new file mode 100644 index 0000000..6543240 --- /dev/null +++ b/tests/integration/test_multi_db.py @@ -0,0 +1,125 @@ +""" +Integration tests for multi-database support. +""" + +import pytest +from ryx import ryx_core +from ryx.models import Model +from ryx.fields import CharField, IntField +from ryx.router import BaseRouter, set_router +from ryx.exceptions import DoesNotExist + + +# Define models for multi-db testing +class User(Model): + name = CharField() + age = IntField() + + +class Log(Model): + message = CharField() + + class Meta: + database = "logs_db" + + +class TestRouter(BaseRouter): + def db_for_read(self, model, **hints): + if model == User: + return "user_db" + return None + + def db_for_write(self, model, **hints): + if model == User: + return "user_db" + return None + + +@pytest.fixture(autouse=True) +async def setup_multi_db(): + """Set up multiple databases for the module.""" + urls = { + "default": "sqlite::memory:", + "user_db": "sqlite::memory:", + "logs_db": "sqlite::memory:", + } + await ryx_core.setup(urls, 10, 1, 30, 600, 1800) + + # Create tables manually on all pools to ensure they exist for routing tests + for alias in urls: + await ryx_core.raw_execute( + f"CREATE TABLE {User._meta.table_name} (id INTEGER PRIMARY KEY, name TEXT, age INTEGER)", + alias=alias, + ) + await ryx_core.raw_execute( + f"CREATE TABLE {Log._meta.table_name} (id INTEGER PRIMARY KEY, message TEXT)", + alias=alias, + ) + yield + # No explicit teardown needed for in-memory sqlite pools as they are replaced by next setup + + +@pytest.mark.asyncio +async def test_using_explicit_routing(): + """Test that .using(alias) routes queries to the correct database.""" + # Clear tables (manual cleanup for this specific test) + await ryx_core.raw_execute(f"DELETE FROM {User._meta.table_name}", alias="default") + await ryx_core.raw_execute(f"DELETE FROM {User._meta.table_name}", alias="user_db") + + await User.objects.create(name="Default User", age=30) + await User.objects.using("user_db").create(name="UserDB User", age=25) + + # Verify Default DB + default_users = await User.objects.all() + assert len(default_users) == 1 + assert default_users[0].name == "Default User" + + # Verify UserDB DB + user_db_users = await User.objects.using("user_db").all() + assert len(user_db_users) == 1 + assert user_db_users[0].name == "UserDB User" + + +@pytest.mark.asyncio +async def test_meta_database_routing(): + """Test that Model.Meta.database routes queries automatically.""" + # Clear tables + await ryx_core.raw_execute(f"DELETE FROM {Log._meta.table_name}", alias="default") + await ryx_core.raw_execute(f"DELETE FROM {Log._meta.table_name}", alias="logs_db") + + # Log should go to logs_db by default + await Log.objects.create(message="Log entry 1") + + # Verify it's in logs_db + logs_db_logs = await Log.objects.using("logs_db").all() + assert len(logs_db_logs) == 1 + assert logs_db_logs[0].message == "Log entry 1" + + # Verify it's NOT in default db + default_logs = await Log.objects.using("default").all() + assert len(default_logs) == 0 + + +@pytest.mark.asyncio +async def test_dynamic_router_routing(): + """Test that the configured Router routes queries dynamically.""" + set_router(TestRouter()) + + # Clear User tables + await ryx_core.raw_execute(f"DELETE FROM {User._meta.table_name}", alias="default") + await ryx_core.raw_execute(f"DELETE FROM {User._meta.table_name}", alias="user_db") + + # Router should route User to user_db + await User.objects.create(name="Routed User", age=40) + + # Verify it's in user_db + user_db_users = await User.objects.using("user_db").filter(name="Routed User").all() + assert len(user_db_users) == 1 + assert user_db_users[0].name == "Routed User" + + # Verify it's NOT in default db + default_users = await User.objects.using("default").filter(name="Routed User").all() + assert len(default_users) == 0 + + # Reset router for other tests + set_router(None) diff --git a/tests/integration/test_multi_db_script.py b/tests/integration/test_multi_db_script.py new file mode 100644 index 0000000..fbfcbe4 --- /dev/null +++ b/tests/integration/test_multi_db_script.py @@ -0,0 +1,71 @@ +import asyncio +from ryx import ryx_core +from ryx.models import Model +from ryx.fields import CharField, IntField +from ryx.router import BaseRouter, set_router +# from ryx.exceptions import DoesNotExist + + +class User(Model): + name = CharField() + age = IntField() + + +class Log(Model): + message = CharField() + + class Meta: + database = "logs_db" + + +class TestRouter(BaseRouter): + def db_for_read(self, model, **hints): + if model == User: + return "user_db" + return None + + def db_for_write(self, model, **hints): + if model == User: + return "user_db" + return None + + +async def main(): + urls = { + "default": "sqlite::memory:", + "user_db": "sqlite::memory:", + "logs_db": "sqlite::memory:", + } + await ryx_core.setup(urls, 10, 1, 30, 600, 1800) + + # Create tables manually + for alias in urls: + # Use ryx_core.raw_execute to create tables on specific pools + await ryx_core.raw_execute( + f"CREATE TABLE {User._meta.table_name} (id INTEGER PRIMARY KEY, name TEXT, age INTEGER)", + alias=alias, + ) + await ryx_core.raw_execute( + f"CREATE TABLE {Log._meta.table_name} (id INTEGER PRIMARY KEY, message TEXT)", + alias=alias, + ) + + # Test .using() + await User.objects.create(name="Default User", age=30) + await User.objects.using("user_db").create(name="UserDB User", age=25) + print("Explicit using: OK") + + # Test Meta.database + await Log.objects.create(message="Log entry 1") + log = await Log.objects.get(message="Log entry 1") + print(f"Meta database: OK ({log.message})") + + # Test Router + set_router(TestRouter()) + await User.objects.create(name="Routed User", age=40) + user = await User.objects.using("user_db").get(name="Routed User") + print(f"Dynamic router: OK ({user.name})") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/tests/integration/test_simple_async.py b/tests/integration/test_simple_async.py new file mode 100644 index 0000000..20b6afd --- /dev/null +++ b/tests/integration/test_simple_async.py @@ -0,0 +1,8 @@ +import pytest +import asyncio + + +@pytest.mark.asyncio +async def test_simple_async(): + await asyncio.sleep(0.1) + assert True From ce0f888bd8ef60505720323fbd3212d6e7fdec2d Mon Sep 17 00:00:00 2001 From: #Einswilli Date: Thu, 9 Apr 2026 15:27:38 +0000 Subject: [PATCH 10/10] docs: add multi-database support guide and update navigation --- docs/doc/advanced/index.mdx | 1 + docs/doc/advanced/multi-db.mdx | 99 ++++++++++++++++++++++++++++++++++ docs/docusaurus.config.js | 2 +- docs/docusaurus.config.ts | 4 +- docs/sidebars.js | 1 + 5 files changed, 104 insertions(+), 3 deletions(-) create mode 100644 docs/doc/advanced/multi-db.mdx diff --git a/docs/doc/advanced/index.mdx b/docs/doc/advanced/index.mdx index 3a33771..23e33d3 100644 --- a/docs/doc/advanced/index.mdx +++ b/docs/doc/advanced/index.mdx @@ -15,5 +15,6 @@ Deep-dive topics for production-ready applications. - **[Caching](./caching)** — Query result caching - **[Custom Lookups](./custom-lookups)** — Extend the query API - **[Sync/Async](./sync-async)** — Bridge between sync and async code +- **[Multi-Databases](./multi-db)** - Multi-Database Support - **[Raw SQL](./raw-sql)** — Escape hatch for complex queries - **[CLI](./cli)** — Command-line management commands diff --git a/docs/doc/advanced/multi-db.mdx b/docs/doc/advanced/multi-db.mdx new file mode 100644 index 0000000..046f0c0 --- /dev/null +++ b/docs/doc/advanced/multi-db.mdx @@ -0,0 +1,99 @@ +--- +sidebar_position: 11 +title: Multi-Database Support +description: Learn how to route queries across multiple databases in Ryx. +--- + +Ryx supports routing queries across multiple databases, allowing you to separate read and write workloads, split data across different servers, or use a dedicated database for specific models. + +## Configuration + +To enable multi-database support, provide a dictionary of URLs to `ryx_core.setup` instead of a single string. Each key in the dictionary serves as an **alias** for that database pool. + +```python +from ryx import ryx_core + +# Configure multiple databases +urls = { + "default": "postgresql://user:pass@localhost/main_db", + "users": "postgresql://user:pass@localhost/user_db", + "logs": "sqlite::memory:", +} + +await ryx_core.setup(urls) +``` + +## Routing Strategies + +Ryx resolves which database to use for a query in the following order of priority: + +1. **Explicit Routing**: Using `.using(alias)` on a QuerySet. +2. **Dynamic Router**: Using a configured `BaseRouter`. +3. **Model Metadata**: Using the `database` option in `Model.Meta`. +4. **Default**: Falling back to the `'default'` alias. + +### 1. Explicit Routing + +You can force a query to run on a specific database using the `.using()` method. This is useful for one-off queries or manual routing. + +```python +# Read from the 'users' database +users = await User.objects.using("users").all() + +# Write to the 'logs' database +await Log.objects.using("logs").create(message="System boot") +``` + +### 2. Model-Level Routing + +You can assign a model to a specific database by default using the `database` option in its `Meta` class. + +```python +class Log(Model): + message = CharField() + + class Meta: + database = "logs" +``` + +Any query on `Log` will now use the `logs` database unless overridden by `.using()`. + +### 3. Dynamic Routing (The Router) + +For more complex logic (e.g., routing based on the environment, user, or model type), you can implement a custom router by inheriting from `BaseRouter`. + +```python +from ryx.router import BaseRouter, set_router + +class MyProjectRouter(BaseRouter): + def db_for_read(self, model, **hints): + if model.__name__ == "User": + return "users" + return None # Fallback to default + + def db_for_write(self, model, **hints): + if model.__name__ == "User": + return "users" + return None + +# Activate the router globally +set_router(MyProjectRouter()) +``` + +## Multi-Database Transactions + +Transactions in Ryx are tied to a specific database connection. To start a transaction on a non-default database, pass the `alias` to the `transaction()` context manager. + +```python +import ryx + +async with Ryx.transaction(alias="users"): + await User.objects.create(name="Alice") + await User.objects.create(name="Bob") + # If an exception occurs, only changes to 'users' DB are rolled back. +``` + +### Nesting and Multiple Databases + +- If you start a transaction on a database that already has an active transaction on the current task, Ryx creates a **SAVEPOINT**. +- If you start a transaction on a *different* database while another is active, Ryx starts a new independent transaction for that database. diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 0bf37a6..f1c2db3 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -62,7 +62,7 @@ const config = { }, { type: 'custom-search-bar', - position: 'right', + position: 'center', }, { type: 'custom-github-stats', diff --git a/docs/docusaurus.config.ts b/docs/docusaurus.config.ts index 84afc25..bf3879e 100644 --- a/docs/docusaurus.config.ts +++ b/docs/docusaurus.config.ts @@ -4,8 +4,8 @@ import type { Config } from '@docusaurus/types'; const config: Config = { title: 'Ryx ORM', tagline: 'Django-style Python ORM. Powered by Rust.', - favicon: 'img/favicon.ico', - url: 'https://ryx.alldotpy.dev', + favicon: 'img/logo.svg', + url: 'https://ryx.alldotpy.com', baseUrl: '/', organizationName: 'AllDotPy', projectName: 'Ryx', diff --git a/docs/sidebars.js b/docs/sidebars.js index 6541724..e466e9b 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -70,6 +70,7 @@ const sidebars = { 'advanced/caching', 'advanced/custom-lookups', 'advanced/sync-async', + 'advanced/multi-db', 'advanced/raw-sql', 'advanced/cli', ],