diff --git a/.gitignore b/.gitignore index e9d56d6b..b8e7b86e 100644 --- a/.gitignore +++ b/.gitignore @@ -33,4 +33,6 @@ test_*.db* # Debug and temporary files debug_* test_datetime_translation -benchmark.db \ No newline at end of file +benchmark.db +# Worktrees +.worktrees/ diff --git a/Cargo.lock b/Cargo.lock index 1155a495..6aef6cfe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1254,7 +1254,7 @@ dependencies = [ [[package]] name = "pgsqlite" -version = "0.0.17" +version = "0.0.18" dependencies = [ "anyhow", "arbitrary", diff --git a/docs/plans/2025-01-25-missing-catalog-tables-design.md b/docs/plans/2025-01-25-missing-catalog-tables-design.md new file mode 100644 index 00000000..1bee87f9 --- /dev/null +++ b/docs/plans/2025-01-25-missing-catalog-tables-design.md @@ -0,0 +1,217 @@ +# Missing PostgreSQL Catalog Tables Design + +## Overview + +Add 7 missing PostgreSQL catalog tables to improve protocol completeness: +- pg_settings +- pg_sequence +- pg_trigger +- pg_collation +- pg_replication_slots +- pg_shdepend +- pg_statistic + +## Goals + +- **Completeness**: General PostgreSQL protocol completeness +- **Fidelity**: Dynamic where possible, mapping SQLite state where meaningful +- **Approach**: Implement all 7 tables together + +## Architecture + +Follow the existing **CatalogInterceptor pattern**: +1. Add table names to interception check in `query_interceptor.rs` +2. Create dedicated handler modules for complex tables +3. Handle simpler tables inline in `check_table_factor()` + +### Table Classification + +| Table | Implementation | Rationale | +|-------|---------------|-----------| +| pg_settings | Dedicated handler | Many rows, commonly queried, needs SHOW/SET integration | +| pg_sequence | Dedicated handler | Must query SQLite's sqlite_sequence table dynamically | +| pg_trigger | Dedicated handler | Must query SQLite's sqlite_master for triggers | +| pg_collation | Inline static | Small fixed set of collations | +| pg_replication_slots | Inline stub | Always empty (SQLite has no replication) | +| pg_shdepend | Inline stub | Always empty (no shared dependencies) | +| pg_statistic | Inline stub | Complex internal format, stub is appropriate | + +### File Structure + +``` +src/catalog/ +├── pg_settings.rs (new) +├── pg_sequence.rs (new) +├── pg_trigger.rs (new) +└── mod.rs (updated) +``` + +## Detailed Designs + +### pg_settings + +Exposes PostgreSQL server configuration. Clients query this during connection setup. + +**Column schema:** +``` +oid, name, setting, unit, category, short_desc, extra_desc, +context, vartype, source, min_val, max_val, enumvals, boot_val, +reset_val, sourcefile, sourceline, pending_restart +``` + +**Implementation:** +1. Static configuration map with ~30-40 common settings: + - server_version → "16.0" + - server_encoding → "UTF8" + - client_encoding → "UTF8" + - DateStyle → "ISO, MDY" + - TimeZone → "UTC" + - integer_datetimes → "on" + - standard_conforming_strings → "on" + - max_connections → "100" + +2. Dynamic settings where applicable: + - search_path → from session state + - transaction_isolation → current transaction level + +3. WHERE clause support for `name` filtering (SHOW command compatibility) + +**Handler signature:** +```rust +pub struct PgSettingsHandler; +impl PgSettingsHandler { + pub async fn handle_query(select: &Select, session: Option<&SessionState>) -> Result; +} +``` + +### pg_sequence + +Maps SQLite's autoincrement sequences to PostgreSQL's sequence catalog. + +**Column schema:** +``` +seqrelid, seqtypid, seqstart, seqincrement, seqmax, seqmin, seqcache, seqcycle +``` + +**Data source:** SQLite's `sqlite_sequence` table: +```sql +SELECT name, seq FROM sqlite_sequence +``` + +**Mapping:** +- seqrelid → Generate OID from table name hash +- seqtypid → 20 (int8 OID) +- seqstart → 1 +- seqincrement → 1 +- seqmax → current seq value from sqlite_sequence +- seqmin → 1 +- seqcache → 1 +- seqcycle → false + +### pg_trigger + +Maps SQLite triggers to PostgreSQL's trigger catalog. + +**Column schema:** +``` +oid, tgrelid, tgparentid, tgname, tgfoid, tgtype, tgenabled, +tgisinternal, tgconstrrelid, tgconstrindid, tgconstraint, +tgdeferrable, tginitdeferred, tgnargs, tgattr, tgargs, tgqual, +tgoldtable, tgnewtable +``` + +**Data source:** SQLite's `sqlite_master`: +```sql +SELECT name, tbl_name, sql FROM sqlite_master WHERE type = 'trigger' +``` + +**Note:** Reuse existing `parse_trigger_sql()` logic from information_schema.triggers implementation. + +### pg_collation (Static) + +**Column schema:** +``` +oid, collname, collnamespace, collowner, collprovider, collisdeterministic, +collencoding, collcollate, collctype, colliculocale, collicurules, collversion +``` + +**Implementation:** Return 3 standard collations: +- default (oid 100) +- C (oid 950) +- POSIX (oid 951) + +### pg_replication_slots (Empty stub) + +**Column schema:** +``` +slot_name, plugin, slot_type, datoid, database, temporary, active, +active_pid, xmin, catalog_xmin, restart_lsn, confirmed_flush_lsn, ... +``` + +**Implementation:** Always return empty result set. SQLite has no replication. + +### pg_shdepend (Empty stub) + +**Column schema:** +``` +dbid, classid, objid, objsubid, refclassid, refobjid, deptype +``` + +**Implementation:** Always return empty result set. Not applicable to SQLite's single-file model. + +### pg_statistic (Empty stub) + +**Column schema:** +``` +starelid, staattnum, stainherit, stanullfrac, stawidth, stadistinct, +stakind1-5, staop1-5, stacoll1-5, stanumbers1-5, stavalues1-5 +``` + +**Implementation:** Always return empty result set. pg_stats (already implemented) is the user-facing view. + +## Integration Points + +### query_interceptor.rs changes + +**1. Update table detection (lines 50-61):** +```rust +let has_catalog_tables = lower_query.contains("pg_catalog") || + // ... existing tables ... + lower_query.contains("pg_settings") || + lower_query.contains("pg_sequence") || + lower_query.contains("pg_trigger") || + lower_query.contains("pg_collation") || + lower_query.contains("pg_replication_slots") || + lower_query.contains("pg_shdepend") || + lower_query.contains("pg_statistic"); +``` + +**2. Add routing in check_table_factor():** +- Route pg_settings, pg_sequence, pg_trigger to dedicated handlers +- Handle pg_collation, pg_replication_slots, pg_shdepend, pg_statistic inline + +### mod.rs changes + +```rust +pub mod pg_settings; +pub mod pg_sequence; +pub mod pg_trigger; +``` + +## Migration Consideration + +No SQLite migrations needed - these are all virtual tables handled by the interceptor, not backed by SQLite tables. + +## Estimates + +| Table | Type | Lines of code | +|-------|------|---------------| +| pg_settings | Dedicated handler | ~200 | +| pg_sequence | Dedicated handler | ~100 | +| pg_trigger | Dedicated handler | ~120 | +| pg_collation | Inline static | ~40 | +| pg_replication_slots | Inline stub | ~20 | +| pg_shdepend | Inline stub | ~20 | +| pg_statistic | Inline stub | ~30 | + +**Total:** ~530 lines of new code diff --git a/docs/plans/2025-01-25-missing-catalog-tables-implementation.md b/docs/plans/2025-01-25-missing-catalog-tables-implementation.md new file mode 100644 index 00000000..81459a1a --- /dev/null +++ b/docs/plans/2025-01-25-missing-catalog-tables-implementation.md @@ -0,0 +1,1351 @@ +# Missing PostgreSQL Catalog Tables Implementation Plan + +> **For Claude:** REQUIRED SUB-SKILL: Use superpowers:executing-plans to implement this plan task-by-task. + +**Goal:** Add 7 missing PostgreSQL catalog tables (pg_settings, pg_sequence, pg_trigger, pg_collation, pg_replication_slots, pg_shdepend, pg_statistic) for protocol completeness. + +**Architecture:** Follow existing CatalogInterceptor pattern - intercept queries to pg_catalog tables and return PostgreSQL-compatible responses. Dedicated handlers for complex tables (pg_settings, pg_sequence, pg_trigger), inline handlers for stubs. + +**Tech Stack:** Rust, sqlparser, rusqlite, async/await + +--- + +## Task 1: Add pg_collation (Inline Static) + +**Files:** +- Modify: `src/catalog/query_interceptor.rs` + +**Step 1: Write integration test** + +Create file `tests/pg_collation_test.rs`: + +```rust +use pgsqlite::test_utils::create_test_server; +use tokio_postgres::NoTls; + +#[tokio::test] +async fn test_pg_collation_basic() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Test basic query + let rows = client.query("SELECT oid, collname FROM pg_collation", &[]).await.unwrap(); + assert!(rows.len() >= 3, "Should have at least 3 collations (default, C, POSIX)"); + + // Test filtering by name + let rows = client.query("SELECT * FROM pg_collation WHERE collname = 'C'", &[]).await.unwrap(); + assert_eq!(rows.len(), 1); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --test pg_collation_test` +Expected: FAIL (table not recognized) + +**Step 3: Add pg_collation to table detection** + +In `src/catalog/query_interceptor.rs`, find the `has_catalog_tables` check (~line 50-61) and add: + +```rust +lower_query.contains("pg_collation") || +``` + +**Step 4: Add pg_collation handler in check_table_factor** + +In `src/catalog/query_interceptor.rs`, add after the pg_tablespace handler (~line 514-516): + +```rust +// Handle pg_collation queries +if table_name.contains("pg_collation") || table_name.contains("pg_catalog.pg_collation") { + return Some(Ok(Self::handle_pg_collation_query(select))); +} +``` + +**Step 5: Implement handle_pg_collation_query** + +Add this function to the `CatalogInterceptor` impl block: + +```rust +fn handle_pg_collation_query(select: &Select) -> DbResponse { + // Define pg_collation columns (PostgreSQL standard) + let all_columns = vec![ + "oid".to_string(), + "collname".to_string(), + "collnamespace".to_string(), + "collowner".to_string(), + "collprovider".to_string(), + "collisdeterministic".to_string(), + "collencoding".to_string(), + "collcollate".to_string(), + "collctype".to_string(), + "colliculocale".to_string(), + "collicurules".to_string(), + "collversion".to_string(), + ]; + + // Extract selected columns + let (selected_columns, column_indices) = Self::extract_selected_columns(select, &all_columns); + + // Define standard collations + let collations = vec![ + ("100", "default", "11", "10", "d", "t", "-1", "", "", "", "", ""), + ("950", "C", "11", "10", "c", "t", "-1", "C", "C", "", "", ""), + ("951", "POSIX", "11", "10", "c", "t", "-1", "POSIX", "POSIX", "", "", ""), + ]; + + // Check for WHERE clause filtering by collname + let name_filter = if let Some(ref where_clause) = select.selection { + Self::extract_collation_name_filter(where_clause) + } else { + None + }; + + let mut rows = Vec::new(); + for (oid, collname, collnamespace, collowner, collprovider, collisdeterministic, + collencoding, collcollate, collctype, colliculocale, collicurules, collversion) in collations { + + // Apply name filter if present + if let Some(ref filter) = name_filter { + if collname != filter { + continue; + } + } + + let full_row: Vec>> = vec![ + Some(oid.to_string().into_bytes()), + Some(collname.to_string().into_bytes()), + Some(collnamespace.to_string().into_bytes()), + Some(collowner.to_string().into_bytes()), + Some(collprovider.to_string().into_bytes()), + Some(collisdeterministic.to_string().into_bytes()), + Some(collencoding.to_string().into_bytes()), + if collcollate.is_empty() { None } else { Some(collcollate.to_string().into_bytes()) }, + if collctype.is_empty() { None } else { Some(collctype.to_string().into_bytes()) }, + if colliculocale.is_empty() { None } else { Some(colliculocale.to_string().into_bytes()) }, + if collicurules.is_empty() { None } else { Some(collicurules.to_string().into_bytes()) }, + if collversion.is_empty() { None } else { Some(collversion.to_string().into_bytes()) }, + ]; + + let projected_row: Vec>> = column_indices.iter() + .map(|&idx| full_row[idx].clone()) + .collect(); + rows.push(projected_row); + } + + let rows_affected = rows.len(); + DbResponse { + columns: selected_columns, + rows, + rows_affected, + } +} + +fn extract_collation_name_filter(where_clause: &Expr) -> Option { + match where_clause { + Expr::BinaryOp { left, op, right } => { + if let (Expr::Identifier(ident), sqlparser::ast::BinaryOperator::Eq, Expr::Value(value_with_span)) = + (left.as_ref(), op, right.as_ref()) + && ident.value.to_lowercase() == "collname" + && let sqlparser::ast::Value::SingleQuotedString(value) = &value_with_span.value { + return Some(value.clone()); + } + } + _ => {} + } + None +} +``` + +**Step 6: Run test to verify it passes** + +Run: `cargo test --test pg_collation_test` +Expected: PASS + +**Step 7: Commit** + +```bash +git add src/catalog/query_interceptor.rs tests/pg_collation_test.rs +git commit -m "feat: add pg_collation catalog table support" +``` + +--- + +## Task 2: Add pg_replication_slots (Empty Stub) + +**Files:** +- Modify: `src/catalog/query_interceptor.rs` + +**Step 1: Write integration test** + +Create file `tests/pg_replication_slots_test.rs`: + +```rust +use pgsqlite::test_utils::create_test_server; +use tokio_postgres::NoTls; + +#[tokio::test] +async fn test_pg_replication_slots_empty() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Should return empty result with correct schema + let rows = client.query("SELECT slot_name, plugin, slot_type FROM pg_replication_slots", &[]).await.unwrap(); + assert_eq!(rows.len(), 0, "pg_replication_slots should be empty for SQLite"); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --test pg_replication_slots_test` +Expected: FAIL + +**Step 3: Add pg_replication_slots to table detection and handler** + +In `src/catalog/query_interceptor.rs`, add to `has_catalog_tables`: + +```rust +lower_query.contains("pg_replication_slots") || +``` + +Add handler in `check_table_factor`: + +```rust +// Handle pg_replication_slots queries (always empty - SQLite has no replication) +if table_name.contains("pg_replication_slots") || table_name.contains("pg_catalog.pg_replication_slots") { + return Some(Ok(Self::handle_pg_replication_slots_query(select))); +} +``` + +**Step 4: Implement handle_pg_replication_slots_query** + +```rust +fn handle_pg_replication_slots_query(select: &Select) -> DbResponse { + let all_columns = vec![ + "slot_name".to_string(), + "plugin".to_string(), + "slot_type".to_string(), + "datoid".to_string(), + "database".to_string(), + "temporary".to_string(), + "active".to_string(), + "active_pid".to_string(), + "xmin".to_string(), + "catalog_xmin".to_string(), + "restart_lsn".to_string(), + "confirmed_flush_lsn".to_string(), + "wal_status".to_string(), + "safe_wal_size".to_string(), + "two_phase".to_string(), + "conflicting".to_string(), + ]; + + let (selected_columns, _) = Self::extract_selected_columns(select, &all_columns); + + // Always return empty - SQLite has no replication + DbResponse { + columns: selected_columns, + rows: vec![], + rows_affected: 0, + } +} +``` + +**Step 5: Run test to verify it passes** + +Run: `cargo test --test pg_replication_slots_test` +Expected: PASS + +**Step 6: Commit** + +```bash +git add src/catalog/query_interceptor.rs tests/pg_replication_slots_test.rs +git commit -m "feat: add pg_replication_slots catalog table (empty stub)" +``` + +--- + +## Task 3: Add pg_shdepend (Empty Stub) + +**Files:** +- Modify: `src/catalog/query_interceptor.rs` + +**Step 1: Write integration test** + +Create file `tests/pg_shdepend_test.rs`: + +```rust +use pgsqlite::test_utils::create_test_server; +use tokio_postgres::NoTls; + +#[tokio::test] +async fn test_pg_shdepend_empty() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + let rows = client.query("SELECT dbid, classid, objid FROM pg_shdepend", &[]).await.unwrap(); + assert_eq!(rows.len(), 0, "pg_shdepend should be empty for SQLite"); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --test pg_shdepend_test` +Expected: FAIL + +**Step 3: Add pg_shdepend detection and handler** + +Add to `has_catalog_tables`: +```rust +lower_query.contains("pg_shdepend") || +``` + +Add handler: +```rust +// Handle pg_shdepend queries (always empty - no shared dependencies in SQLite) +if table_name.contains("pg_shdepend") || table_name.contains("pg_catalog.pg_shdepend") { + return Some(Ok(Self::handle_pg_shdepend_query(select))); +} +``` + +**Step 4: Implement handle_pg_shdepend_query** + +```rust +fn handle_pg_shdepend_query(select: &Select) -> DbResponse { + let all_columns = vec![ + "dbid".to_string(), + "classid".to_string(), + "objid".to_string(), + "objsubid".to_string(), + "refclassid".to_string(), + "refobjid".to_string(), + "deptype".to_string(), + ]; + + let (selected_columns, _) = Self::extract_selected_columns(select, &all_columns); + + DbResponse { + columns: selected_columns, + rows: vec![], + rows_affected: 0, + } +} +``` + +**Step 5: Run test and commit** + +Run: `cargo test --test pg_shdepend_test` +Expected: PASS + +```bash +git add src/catalog/query_interceptor.rs tests/pg_shdepend_test.rs +git commit -m "feat: add pg_shdepend catalog table (empty stub)" +``` + +--- + +## Task 4: Add pg_statistic (Empty Stub) + +**Files:** +- Modify: `src/catalog/query_interceptor.rs` + +**Step 1: Write integration test** + +Create file `tests/pg_statistic_test.rs`: + +```rust +use pgsqlite::test_utils::create_test_server; +use tokio_postgres::NoTls; + +#[tokio::test] +async fn test_pg_statistic_empty() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + let rows = client.query("SELECT starelid, staattnum FROM pg_statistic", &[]).await.unwrap(); + assert_eq!(rows.len(), 0, "pg_statistic should be empty (use pg_stats view instead)"); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --test pg_statistic_test` +Expected: FAIL + +**Step 3: Add pg_statistic detection and handler** + +Add to `has_catalog_tables`: +```rust +lower_query.contains("pg_statistic") || +``` + +Add handler: +```rust +// Handle pg_statistic queries (always empty - internal stats table) +if table_name.contains("pg_statistic") || table_name.contains("pg_catalog.pg_statistic") { + return Some(Ok(Self::handle_pg_statistic_query(select))); +} +``` + +**Step 4: Implement handle_pg_statistic_query** + +```rust +fn handle_pg_statistic_query(select: &Select) -> DbResponse { + let all_columns = vec![ + "starelid".to_string(), + "staattnum".to_string(), + "stainherit".to_string(), + "stanullfrac".to_string(), + "stawidth".to_string(), + "stadistinct".to_string(), + "stakind1".to_string(), + "stakind2".to_string(), + "stakind3".to_string(), + "stakind4".to_string(), + "stakind5".to_string(), + "staop1".to_string(), + "staop2".to_string(), + "staop3".to_string(), + "staop4".to_string(), + "staop5".to_string(), + "stacoll1".to_string(), + "stacoll2".to_string(), + "stacoll3".to_string(), + "stacoll4".to_string(), + "stacoll5".to_string(), + "stanumbers1".to_string(), + "stanumbers2".to_string(), + "stanumbers3".to_string(), + "stanumbers4".to_string(), + "stanumbers5".to_string(), + "stavalues1".to_string(), + "stavalues2".to_string(), + "stavalues3".to_string(), + "stavalues4".to_string(), + "stavalues5".to_string(), + ]; + + let (selected_columns, _) = Self::extract_selected_columns(select, &all_columns); + + DbResponse { + columns: selected_columns, + rows: vec![], + rows_affected: 0, + } +} +``` + +**Step 5: Run test and commit** + +Run: `cargo test --test pg_statistic_test` +Expected: PASS + +```bash +git add src/catalog/query_interceptor.rs tests/pg_statistic_test.rs +git commit -m "feat: add pg_statistic catalog table (empty stub)" +``` + +--- + +## Task 5: Add pg_sequence Handler + +**Files:** +- Create: `src/catalog/pg_sequence.rs` +- Modify: `src/catalog/mod.rs` +- Modify: `src/catalog/query_interceptor.rs` + +**Step 1: Write integration test** + +Create file `tests/pg_sequence_test.rs`: + +```rust +use pgsqlite::test_utils::create_test_server; +use tokio_postgres::NoTls; + +#[tokio::test] +async fn test_pg_sequence_basic() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Create a table with AUTOINCREMENT to populate sqlite_sequence + client.execute("CREATE TABLE test_seq (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT)", &[]).await.unwrap(); + client.execute("INSERT INTO test_seq (name) VALUES ('a'), ('b'), ('c')", &[]).await.unwrap(); + + // Query pg_sequence + let rows = client.query("SELECT seqrelid, seqtypid, seqstart, seqincrement FROM pg_sequence", &[]).await.unwrap(); + assert!(rows.len() >= 1, "Should have at least one sequence from test_seq"); +} + +#[tokio::test] +async fn test_pg_sequence_empty_when_no_autoincrement() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Query pg_sequence without creating any autoincrement tables + let rows = client.query("SELECT * FROM pg_sequence", &[]).await.unwrap(); + // May be empty or have existing sequences + assert!(rows.len() >= 0); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --test pg_sequence_test` +Expected: FAIL + +**Step 3: Create pg_sequence.rs module** + +Create `src/catalog/pg_sequence.rs`: + +```rust +use crate::session::db_handler::{DbHandler, DbResponse}; +use sqlparser::ast::{Select, SelectItem, Expr}; +use std::sync::Arc; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; +use tracing::debug; + +pub struct PgSequenceHandler; + +impl PgSequenceHandler { + pub async fn handle_query(select: &Select, db: &Arc) -> Result { + debug!("PgSequenceHandler: handling query"); + + let all_columns = vec![ + "seqrelid".to_string(), + "seqtypid".to_string(), + "seqstart".to_string(), + "seqincrement".to_string(), + "seqmax".to_string(), + "seqmin".to_string(), + "seqcache".to_string(), + "seqcycle".to_string(), + ]; + + let (selected_columns, column_indices) = Self::extract_selected_columns(select, &all_columns); + + // Query sqlite_sequence table for autoincrement sequences + let sequences = Self::get_sqlite_sequences(db).await; + + let mut rows = Vec::new(); + for (table_name, current_value) in sequences { + // Generate a stable OID from table name + let seqrelid = Self::generate_oid(&table_name); + + let full_row: Vec>> = vec![ + Some(seqrelid.to_string().into_bytes()), // seqrelid + Some("20".to_string().into_bytes()), // seqtypid (int8) + Some("1".to_string().into_bytes()), // seqstart + Some("1".to_string().into_bytes()), // seqincrement + Some(current_value.to_string().into_bytes()), // seqmax (current value) + Some("1".to_string().into_bytes()), // seqmin + Some("1".to_string().into_bytes()), // seqcache + Some("f".to_string().into_bytes()), // seqcycle (false) + ]; + + let projected_row: Vec>> = column_indices.iter() + .map(|&idx| full_row[idx].clone()) + .collect(); + rows.push(projected_row); + } + + let rows_affected = rows.len(); + Ok(DbResponse { + columns: selected_columns, + rows, + rows_affected, + }) + } + + async fn get_sqlite_sequences(db: &Arc) -> Vec<(String, i64)> { + let mut sequences = Vec::new(); + + // Query sqlite_sequence - this table exists if any AUTOINCREMENT columns exist + match db.query("SELECT name, seq FROM sqlite_sequence").await { + Ok(response) => { + for row in response.rows { + if row.len() >= 2 { + if let (Some(Some(name_bytes)), Some(Some(seq_bytes))) = (row.get(0), row.get(1)) { + let name = String::from_utf8_lossy(name_bytes).to_string(); + let seq: i64 = String::from_utf8_lossy(seq_bytes) + .parse() + .unwrap_or(0); + sequences.push((name, seq)); + } + } + } + } + Err(e) => { + debug!("No sqlite_sequence table or error: {:?}", e); + } + } + + sequences + } + + fn generate_oid(name: &str) -> u32 { + let mut hasher = DefaultHasher::new(); + name.hash(&mut hasher); + // Use upper bits to avoid collision with standard OIDs + (hasher.finish() as u32) | 0x80000000 + } + + fn extract_selected_columns(select: &Select, all_columns: &[String]) -> (Vec, Vec) { + if select.projection.len() == 1 { + if let SelectItem::Wildcard(_) = &select.projection[0] { + return (all_columns.to_vec(), (0..all_columns.len()).collect()); + } + } + + let mut cols = Vec::new(); + let mut indices = Vec::new(); + for item in &select.projection { + match item { + SelectItem::UnnamedExpr(Expr::Identifier(ident)) => { + let col_name = ident.value.to_lowercase(); + if let Some(idx) = all_columns.iter().position(|c| c == &col_name) { + cols.push(col_name); + indices.push(idx); + } + } + SelectItem::UnnamedExpr(Expr::CompoundIdentifier(parts)) => { + if let Some(last) = parts.last() { + let col_name = last.value.to_lowercase(); + if let Some(idx) = all_columns.iter().position(|c| c == &col_name) { + cols.push(col_name); + indices.push(idx); + } + } + } + _ => {} + } + } + + if cols.is_empty() { + (all_columns.to_vec(), (0..all_columns.len()).collect()) + } else { + (cols, indices) + } + } +} +``` + +**Step 4: Update mod.rs** + +Add to `src/catalog/mod.rs`: + +```rust +pub mod pg_sequence; +``` + +**Step 5: Update query_interceptor.rs** + +Add import at top: +```rust +use super::pg_sequence::PgSequenceHandler; +``` + +Add to `has_catalog_tables`: +```rust +lower_query.contains("pg_sequence") || +``` + +Add handler in `check_table_factor`: +```rust +// Handle pg_sequence queries +if table_name.contains("pg_sequence") || table_name.contains("pg_catalog.pg_sequence") { + return match PgSequenceHandler::handle_query(select, &db).await { + Ok(response) => Some(Ok(response)), + Err(_) => None, + }; +} +``` + +**Step 6: Run test and commit** + +Run: `cargo test --test pg_sequence_test` +Expected: PASS + +```bash +git add src/catalog/pg_sequence.rs src/catalog/mod.rs src/catalog/query_interceptor.rs tests/pg_sequence_test.rs +git commit -m "feat: add pg_sequence catalog table with SQLite sequence mapping" +``` + +--- + +## Task 6: Add pg_trigger Handler + +**Files:** +- Create: `src/catalog/pg_trigger.rs` +- Modify: `src/catalog/mod.rs` +- Modify: `src/catalog/query_interceptor.rs` + +**Step 1: Write integration test** + +Create file `tests/pg_trigger_test.rs`: + +```rust +use pgsqlite::test_utils::create_test_server; +use tokio_postgres::NoTls; + +#[tokio::test] +async fn test_pg_trigger_basic() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Create a table and trigger + client.execute("CREATE TABLE trigger_test (id INTEGER PRIMARY KEY, value TEXT)", &[]).await.unwrap(); + client.execute("CREATE TABLE trigger_log (msg TEXT)", &[]).await.unwrap(); + client.execute( + "CREATE TRIGGER test_trigger AFTER INSERT ON trigger_test BEGIN INSERT INTO trigger_log VALUES ('inserted'); END", + &[] + ).await.unwrap(); + + // Query pg_trigger + let rows = client.query("SELECT tgname, tgrelid FROM pg_trigger", &[]).await.unwrap(); + assert!(rows.len() >= 1, "Should have at least one trigger"); + + // Verify trigger name + let tgname: &str = rows[0].get(0); + assert_eq!(tgname, "test_trigger"); +} + +#[tokio::test] +async fn test_pg_trigger_empty() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Query pg_trigger without any triggers + let rows = client.query("SELECT * FROM pg_trigger", &[]).await.unwrap(); + assert!(rows.len() >= 0); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --test pg_trigger_test` +Expected: FAIL + +**Step 3: Create pg_trigger.rs module** + +Create `src/catalog/pg_trigger.rs`: + +```rust +use crate::session::db_handler::{DbHandler, DbResponse}; +use sqlparser::ast::{Select, SelectItem, Expr}; +use std::sync::Arc; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; +use tracing::debug; + +pub struct PgTriggerHandler; + +impl PgTriggerHandler { + pub async fn handle_query(select: &Select, db: &Arc) -> Result { + debug!("PgTriggerHandler: handling query"); + + let all_columns = vec![ + "oid".to_string(), + "tgrelid".to_string(), + "tgparentid".to_string(), + "tgname".to_string(), + "tgfoid".to_string(), + "tgtype".to_string(), + "tgenabled".to_string(), + "tgisinternal".to_string(), + "tgconstrrelid".to_string(), + "tgconstrindid".to_string(), + "tgconstraint".to_string(), + "tgdeferrable".to_string(), + "tginitdeferred".to_string(), + "tgnargs".to_string(), + "tgattr".to_string(), + "tgargs".to_string(), + "tgqual".to_string(), + "tgoldtable".to_string(), + "tgnewtable".to_string(), + ]; + + let (selected_columns, column_indices) = Self::extract_selected_columns(select, &all_columns); + + // Query SQLite triggers + let triggers = Self::get_sqlite_triggers(db).await; + + let mut rows = Vec::new(); + for (trigger_name, table_name, trigger_sql) in triggers { + let oid = Self::generate_oid(&trigger_name); + let tgrelid = Self::generate_oid(&table_name); + let tgtype = Self::parse_trigger_type(&trigger_sql); + + let full_row: Vec>> = vec![ + Some(oid.to_string().into_bytes()), // oid + Some(tgrelid.to_string().into_bytes()), // tgrelid + Some("0".to_string().into_bytes()), // tgparentid + Some(trigger_name.clone().into_bytes()), // tgname + Some("0".to_string().into_bytes()), // tgfoid + Some(tgtype.to_string().into_bytes()), // tgtype + Some("O".to_string().into_bytes()), // tgenabled (Origin) + Some("f".to_string().into_bytes()), // tgisinternal + Some("0".to_string().into_bytes()), // tgconstrrelid + Some("0".to_string().into_bytes()), // tgconstrindid + Some("0".to_string().into_bytes()), // tgconstraint + Some("f".to_string().into_bytes()), // tgdeferrable + Some("f".to_string().into_bytes()), // tginitdeferred + Some("0".to_string().into_bytes()), // tgnargs + None, // tgattr + None, // tgargs + None, // tgqual + None, // tgoldtable + None, // tgnewtable + ]; + + let projected_row: Vec>> = column_indices.iter() + .map(|&idx| full_row[idx].clone()) + .collect(); + rows.push(projected_row); + } + + let rows_affected = rows.len(); + Ok(DbResponse { + columns: selected_columns, + rows, + rows_affected, + }) + } + + async fn get_sqlite_triggers(db: &Arc) -> Vec<(String, String, String)> { + let mut triggers = Vec::new(); + + match db.query("SELECT name, tbl_name, sql FROM sqlite_master WHERE type = 'trigger'").await { + Ok(response) => { + for row in response.rows { + if row.len() >= 3 { + if let (Some(Some(name)), Some(Some(tbl)), Some(Some(sql))) = + (row.get(0), row.get(1), row.get(2)) { + triggers.push(( + String::from_utf8_lossy(name).to_string(), + String::from_utf8_lossy(tbl).to_string(), + String::from_utf8_lossy(sql).to_string(), + )); + } + } + } + } + Err(e) => { + debug!("Error querying triggers: {:?}", e); + } + } + + triggers + } + + /// Parse SQLite trigger SQL to determine PostgreSQL tgtype bitmask + fn parse_trigger_type(sql: &str) -> i16 { + let sql_upper = sql.to_uppercase(); + let mut tgtype: i16 = 0; + + // Bit 0: ROW (1) vs STATEMENT (0) - SQLite is always ROW + tgtype |= 1; + + // Bit 1: BEFORE (1) vs AFTER (0) + if sql_upper.contains("BEFORE") { + tgtype |= 2; + } + // INSTEAD OF sets bit 6 + if sql_upper.contains("INSTEAD OF") { + tgtype |= 64; + } + + // Bits 2-4: INSERT (4), DELETE (8), UPDATE (16) + if sql_upper.contains(" INSERT ") || sql_upper.contains(" INSERT\n") { + tgtype |= 4; + } + if sql_upper.contains(" DELETE ") || sql_upper.contains(" DELETE\n") { + tgtype |= 8; + } + if sql_upper.contains(" UPDATE ") || sql_upper.contains(" UPDATE\n") { + tgtype |= 16; + } + + tgtype + } + + fn generate_oid(name: &str) -> u32 { + let mut hasher = DefaultHasher::new(); + name.hash(&mut hasher); + (hasher.finish() as u32) | 0x80000000 + } + + fn extract_selected_columns(select: &Select, all_columns: &[String]) -> (Vec, Vec) { + if select.projection.len() == 1 { + if let SelectItem::Wildcard(_) = &select.projection[0] { + return (all_columns.to_vec(), (0..all_columns.len()).collect()); + } + } + + let mut cols = Vec::new(); + let mut indices = Vec::new(); + for item in &select.projection { + match item { + SelectItem::UnnamedExpr(Expr::Identifier(ident)) => { + let col_name = ident.value.to_lowercase(); + if let Some(idx) = all_columns.iter().position(|c| c == &col_name) { + cols.push(col_name); + indices.push(idx); + } + } + SelectItem::UnnamedExpr(Expr::CompoundIdentifier(parts)) => { + if let Some(last) = parts.last() { + let col_name = last.value.to_lowercase(); + if let Some(idx) = all_columns.iter().position(|c| c == &col_name) { + cols.push(col_name); + indices.push(idx); + } + } + } + _ => {} + } + } + + if cols.is_empty() { + (all_columns.to_vec(), (0..all_columns.len()).collect()) + } else { + (cols, indices) + } + } +} +``` + +**Step 4: Update mod.rs** + +Add to `src/catalog/mod.rs`: +```rust +pub mod pg_trigger; +``` + +**Step 5: Update query_interceptor.rs** + +Add import: +```rust +use super::pg_trigger::PgTriggerHandler; +``` + +Add to `has_catalog_tables`: +```rust +lower_query.contains("pg_trigger") || +``` + +Add handler: +```rust +// Handle pg_trigger queries +if table_name.contains("pg_trigger") || table_name.contains("pg_catalog.pg_trigger") { + return match PgTriggerHandler::handle_query(select, &db).await { + Ok(response) => Some(Ok(response)), + Err(_) => None, + }; +} +``` + +**Step 6: Run test and commit** + +Run: `cargo test --test pg_trigger_test` +Expected: PASS + +```bash +git add src/catalog/pg_trigger.rs src/catalog/mod.rs src/catalog/query_interceptor.rs tests/pg_trigger_test.rs +git commit -m "feat: add pg_trigger catalog table with SQLite trigger mapping" +``` + +--- + +## Task 7: Add pg_settings Handler + +**Files:** +- Create: `src/catalog/pg_settings.rs` +- Modify: `src/catalog/mod.rs` +- Modify: `src/catalog/query_interceptor.rs` + +**Step 1: Write integration test** + +Create file `tests/pg_settings_test.rs`: + +```rust +use pgsqlite::test_utils::create_test_server; +use tokio_postgres::NoTls; + +#[tokio::test] +async fn test_pg_settings_basic() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Query all settings + let rows = client.query("SELECT name, setting FROM pg_settings", &[]).await.unwrap(); + assert!(rows.len() >= 10, "Should have many settings"); + + // Check for common settings + let settings: Vec = rows.iter().map(|r| r.get::<_, String>(0)).collect(); + assert!(settings.contains(&"server_version".to_string())); + assert!(settings.contains(&"server_encoding".to_string())); +} + +#[tokio::test] +async fn test_pg_settings_filter_by_name() { + let (addr, _server, _temp_dir) = create_test_server().await; + let (client, connection) = tokio_postgres::connect( + &format!("host={} port={} user=postgres dbname=test", addr.ip(), addr.port()), + NoTls, + ).await.unwrap(); + tokio::spawn(async move { connection.await.unwrap(); }); + + // Filter by name (SHOW command pattern) + let rows = client.query("SELECT setting FROM pg_settings WHERE name = 'server_version'", &[]).await.unwrap(); + assert_eq!(rows.len(), 1); +} +``` + +**Step 2: Run test to verify it fails** + +Run: `cargo test --test pg_settings_test` +Expected: FAIL + +**Step 3: Create pg_settings.rs module** + +Create `src/catalog/pg_settings.rs`: + +```rust +use crate::session::db_handler::DbResponse; +use sqlparser::ast::{Select, SelectItem, Expr}; +use tracing::debug; + +pub struct PgSettingsHandler; + +impl PgSettingsHandler { + pub fn handle_query(select: &Select) -> DbResponse { + debug!("PgSettingsHandler: handling query"); + + let all_columns = vec![ + "name".to_string(), + "setting".to_string(), + "unit".to_string(), + "category".to_string(), + "short_desc".to_string(), + "extra_desc".to_string(), + "context".to_string(), + "vartype".to_string(), + "source".to_string(), + "min_val".to_string(), + "max_val".to_string(), + "enumvals".to_string(), + "boot_val".to_string(), + "reset_val".to_string(), + "sourcefile".to_string(), + "sourceline".to_string(), + "pending_restart".to_string(), + ]; + + let (selected_columns, column_indices) = Self::extract_selected_columns(select, &all_columns); + + // Check for WHERE clause filtering by name + let name_filter = if let Some(ref where_clause) = select.selection { + Self::extract_name_filter(where_clause) + } else { + None + }; + + // Define settings: (name, setting, unit, category, short_desc, context, vartype) + let settings = Self::get_settings(); + + let mut rows = Vec::new(); + for (name, setting, unit, category, short_desc, context, vartype) in settings { + // Apply name filter if present + if let Some(ref filter) = name_filter { + if name != filter { + continue; + } + } + + let full_row: Vec>> = vec![ + Some(name.to_string().into_bytes()), // name + Some(setting.to_string().into_bytes()), // setting + if unit.is_empty() { None } else { Some(unit.to_string().into_bytes()) }, // unit + Some(category.to_string().into_bytes()), // category + Some(short_desc.to_string().into_bytes()), // short_desc + None, // extra_desc + Some(context.to_string().into_bytes()), // context + Some(vartype.to_string().into_bytes()), // vartype + Some("default".to_string().into_bytes()), // source + None, // min_val + None, // max_val + None, // enumvals + Some(setting.to_string().into_bytes()), // boot_val + Some(setting.to_string().into_bytes()), // reset_val + None, // sourcefile + None, // sourceline + Some("f".to_string().into_bytes()), // pending_restart + ]; + + let projected_row: Vec>> = column_indices.iter() + .map(|&idx| full_row[idx].clone()) + .collect(); + rows.push(projected_row); + } + + let rows_affected = rows.len(); + DbResponse { + columns: selected_columns, + rows, + rows_affected, + } + } + + fn get_settings() -> Vec<(&'static str, &'static str, &'static str, &'static str, &'static str, &'static str, &'static str)> { + vec![ + // Version and encoding + ("server_version", "16.0", "", "Preset Options", "PostgreSQL version string", "internal", "string"), + ("server_version_num", "160000", "", "Preset Options", "PostgreSQL version number", "internal", "integer"), + ("server_encoding", "UTF8", "", "Client Connection Defaults", "Server encoding", "internal", "string"), + ("client_encoding", "UTF8", "", "Client Connection Defaults", "Client encoding", "user", "string"), + + // Date/time settings + ("DateStyle", "ISO, MDY", "", "Client Connection Defaults", "Date display format", "user", "string"), + ("TimeZone", "UTC", "", "Client Connection Defaults", "Time zone", "user", "string"), + ("timezone_abbreviations", "Default", "", "Client Connection Defaults", "Time zone abbreviations", "user", "string"), + ("extra_float_digits", "1", "", "Client Connection Defaults", "Extra float digits", "user", "integer"), + ("integer_datetimes", "on", "", "Preset Options", "Integer datetimes", "internal", "bool"), + + // Connection settings + ("max_connections", "100", "", "Connections and Authentication", "Maximum connections", "postmaster", "integer"), + ("superuser_reserved_connections", "3", "", "Connections and Authentication", "Reserved for superuser", "postmaster", "integer"), + + // Memory settings + ("shared_buffers", "128MB", "8kB", "Resource Usage / Memory", "Shared memory buffers", "postmaster", "integer"), + ("work_mem", "4MB", "kB", "Resource Usage / Memory", "Work memory", "user", "integer"), + ("maintenance_work_mem", "64MB", "kB", "Resource Usage / Memory", "Maintenance work memory", "user", "integer"), + + // Query tuning + ("effective_cache_size", "4GB", "8kB", "Query Tuning / Planner Cost Constants", "Effective cache size", "user", "integer"), + ("random_page_cost", "4", "", "Query Tuning / Planner Cost Constants", "Random page cost", "user", "real"), + ("seq_page_cost", "1", "", "Query Tuning / Planner Cost Constants", "Sequential page cost", "user", "real"), + + // String handling + ("standard_conforming_strings", "on", "", "Client Connection Defaults", "Standard conforming strings", "user", "bool"), + ("escape_string_warning", "on", "", "Client Connection Defaults", "Escape string warning", "user", "bool"), + ("bytea_output", "hex", "", "Client Connection Defaults", "Bytea output format", "user", "enum"), + + // Search path + ("search_path", "\"$user\", public", "", "Client Connection Defaults", "Schema search path", "user", "string"), + + // Logging + ("log_statement", "none", "", "Reporting and Logging", "Log statements", "superuser", "enum"), + ("log_min_duration_statement", "-1", "ms", "Reporting and Logging", "Min duration to log", "superuser", "integer"), + + // Locale + ("lc_collate", "en_US.UTF-8", "", "Client Connection Defaults", "Collation locale", "internal", "string"), + ("lc_ctype", "en_US.UTF-8", "", "Client Connection Defaults", "Character type locale", "internal", "string"), + ("lc_messages", "en_US.UTF-8", "", "Client Connection Defaults", "Messages locale", "superuser", "string"), + ("lc_monetary", "en_US.UTF-8", "", "Client Connection Defaults", "Monetary locale", "user", "string"), + ("lc_numeric", "en_US.UTF-8", "", "Client Connection Defaults", "Numeric locale", "user", "string"), + ("lc_time", "en_US.UTF-8", "", "Client Connection Defaults", "Time locale", "user", "string"), + + // Transaction settings + ("default_transaction_isolation", "read committed", "", "Client Connection Defaults", "Default isolation level", "user", "enum"), + ("default_transaction_read_only", "off", "", "Client Connection Defaults", "Default read only", "user", "bool"), + ("transaction_isolation", "read committed", "", "Client Connection Defaults", "Transaction isolation", "user", "enum"), + ("transaction_read_only", "off", "", "Client Connection Defaults", "Transaction read only", "user", "bool"), + + // Application + ("application_name", "", "", "Client Connection Defaults", "Application name", "user", "string"), + + // SSL + ("ssl", "off", "", "Connections and Authentication", "SSL enabled", "sighup", "bool"), + + // WAL (not applicable but commonly queried) + ("wal_level", "replica", "", "Write-Ahead Log", "WAL level", "postmaster", "enum"), + ("max_wal_senders", "10", "", "Replication", "Max WAL senders", "postmaster", "integer"), + + // Autovacuum (not applicable but commonly queried) + ("autovacuum", "on", "", "Autovacuum", "Autovacuum enabled", "sighup", "bool"), + + // Statement timeout + ("statement_timeout", "0", "ms", "Client Connection Defaults", "Statement timeout", "user", "integer"), + ("lock_timeout", "0", "ms", "Client Connection Defaults", "Lock timeout", "user", "integer"), + ("idle_in_transaction_session_timeout", "0", "ms", "Client Connection Defaults", "Idle transaction timeout", "user", "integer"), + ] + } + + fn extract_name_filter(where_clause: &Expr) -> Option { + match where_clause { + Expr::BinaryOp { left, op, right } => { + if let (Expr::Identifier(ident), sqlparser::ast::BinaryOperator::Eq, Expr::Value(value_with_span)) = + (left.as_ref(), op, right.as_ref()) + && ident.value.to_lowercase() == "name" + && let sqlparser::ast::Value::SingleQuotedString(value) = &value_with_span.value { + return Some(value.clone()); + } + } + _ => {} + } + None + } + + fn extract_selected_columns(select: &Select, all_columns: &[String]) -> (Vec, Vec) { + if select.projection.len() == 1 { + if let SelectItem::Wildcard(_) = &select.projection[0] { + return (all_columns.to_vec(), (0..all_columns.len()).collect()); + } + } + + let mut cols = Vec::new(); + let mut indices = Vec::new(); + for item in &select.projection { + match item { + SelectItem::UnnamedExpr(Expr::Identifier(ident)) => { + let col_name = ident.value.to_lowercase(); + if let Some(idx) = all_columns.iter().position(|c| c == &col_name) { + cols.push(col_name); + indices.push(idx); + } + } + SelectItem::UnnamedExpr(Expr::CompoundIdentifier(parts)) => { + if let Some(last) = parts.last() { + let col_name = last.value.to_lowercase(); + if let Some(idx) = all_columns.iter().position(|c| c == &col_name) { + cols.push(col_name); + indices.push(idx); + } + } + } + _ => {} + } + } + + if cols.is_empty() { + (all_columns.to_vec(), (0..all_columns.len()).collect()) + } else { + (cols, indices) + } + } +} +``` + +**Step 4: Update mod.rs** + +Add to `src/catalog/mod.rs`: +```rust +pub mod pg_settings; +``` + +**Step 5: Update query_interceptor.rs** + +Add import: +```rust +use super::pg_settings::PgSettingsHandler; +``` + +Add to `has_catalog_tables`: +```rust +lower_query.contains("pg_settings") || +``` + +Add handler: +```rust +// Handle pg_settings queries +if table_name.contains("pg_settings") || table_name.contains("pg_catalog.pg_settings") { + return Some(Ok(PgSettingsHandler::handle_query(select))); +} +``` + +**Step 6: Run test and commit** + +Run: `cargo test --test pg_settings_test` +Expected: PASS + +```bash +git add src/catalog/pg_settings.rs src/catalog/mod.rs src/catalog/query_interceptor.rs tests/pg_settings_test.rs +git commit -m "feat: add pg_settings catalog table with common PostgreSQL settings" +``` + +--- + +## Task 8: Final Verification + +**Step 1: Run full test suite** + +```bash +cargo test --lib +cargo test --test pg_collation_test --test pg_replication_slots_test --test pg_shdepend_test --test pg_statistic_test --test pg_sequence_test --test pg_trigger_test --test pg_settings_test +``` + +**Step 2: Run clippy** + +```bash +cargo clippy -- -W clippy::all +``` + +Fix any warnings. + +**Step 3: Build release** + +```bash +cargo build --release +``` + +**Step 4: Final commit if any fixes needed** + +```bash +git add -A +git commit -m "fix: address clippy warnings and final cleanup" +``` + +--- + +## Summary + +| Task | Table | Type | Status | +|------|-------|------|--------| +| 1 | pg_collation | Inline static | | +| 2 | pg_replication_slots | Inline stub | | +| 3 | pg_shdepend | Inline stub | | +| 4 | pg_statistic | Inline stub | | +| 5 | pg_sequence | Dedicated handler | | +| 6 | pg_trigger | Dedicated handler | | +| 7 | pg_settings | Dedicated handler | | +| 8 | Final verification | - | | + +**Total: 8 tasks, ~530 lines of new code** diff --git a/src/catalog/mod.rs b/src/catalog/mod.rs index a5100cd0..43d343f3 100644 --- a/src/catalog/mod.rs +++ b/src/catalog/mod.rs @@ -10,6 +10,9 @@ pub mod pg_description; pub mod pg_roles; pub mod pg_user; pub mod pg_stats; +pub mod pg_sequence; +pub mod pg_trigger; +pub mod pg_settings; pub mod system_functions; pub mod where_evaluator; pub mod constraint_populator; diff --git a/src/catalog/pg_sequence.rs b/src/catalog/pg_sequence.rs new file mode 100644 index 00000000..3a410b3a --- /dev/null +++ b/src/catalog/pg_sequence.rs @@ -0,0 +1,179 @@ +use crate::session::db_handler::{DbHandler, DbResponse}; +use crate::PgSqliteError; +use sqlparser::ast::{Select, SelectItem, Expr}; +use tracing::debug; +use std::collections::HashMap; +use super::where_evaluator::WhereEvaluator; + +pub struct PgSequenceHandler; + +impl PgSequenceHandler { + pub async fn handle_query( + select: &Select, + db: &DbHandler, + ) -> Result { + debug!("Handling pg_sequence query"); + + let all_columns = vec![ + "seqrelid".to_string(), + "seqtypid".to_string(), + "seqstart".to_string(), + "seqincrement".to_string(), + "seqmax".to_string(), + "seqmin".to_string(), + "seqcache".to_string(), + "seqcycle".to_string(), + ]; + + let selected_columns = Self::get_selected_columns(&select.projection, &all_columns); + + let sequences = Self::get_sequences(db).await?; + + let filtered_sequences = if let Some(where_clause) = &select.selection { + Self::apply_where_filter(&sequences, where_clause)? + } else { + sequences + }; + + let mut rows = Vec::new(); + for sequence in filtered_sequences { + let mut row = Vec::new(); + for column in &selected_columns { + let value = sequence.get(column).cloned().unwrap_or_else(|| b"".to_vec()); + row.push(Some(value)); + } + rows.push(row); + } + + let rows_count = rows.len(); + Ok(DbResponse { + columns: selected_columns, + rows, + rows_affected: rows_count, + }) + } + + fn get_selected_columns(projection: &[SelectItem], all_columns: &[String]) -> Vec { + let mut selected = Vec::new(); + + for item in projection { + match item { + SelectItem::Wildcard(_) => { + selected.extend_from_slice(all_columns); + break; + } + SelectItem::UnnamedExpr(Expr::Identifier(ident)) => { + let col_name = ident.value.to_lowercase(); + if all_columns.contains(&col_name) { + selected.push(col_name); + } + } + SelectItem::ExprWithAlias { expr: Expr::Identifier(ident), alias } => { + let col_name = ident.value.to_lowercase(); + if all_columns.contains(&col_name) { + selected.push(alias.value.clone()); + } + } + SelectItem::QualifiedWildcard(_, _) => { + selected.extend_from_slice(all_columns); + break; + } + SelectItem::UnnamedExpr(Expr::Function(_)) => { + selected.push("count".to_string()); + } + SelectItem::ExprWithAlias { expr: Expr::Function(_), alias } => { + selected.push(alias.value.clone()); + } + _ => {} + } + } + + selected + } + + async fn get_sequences(db: &DbHandler) -> Result>>, PgSqliteError> { + let mut sequences = Vec::new(); + + let conn = rusqlite::Connection::open(&db.db_path).map_err(PgSqliteError::Sqlite)?; + + let query = "SELECT name, seq FROM sqlite_sequence"; + + let mut stmt = match conn.prepare(query) { + Ok(stmt) => stmt, + Err(e) => { + if e.to_string().contains("no such table") { + debug!("sqlite_sequence table doesn't exist (no AUTOINCREMENT columns)"); + return Ok(sequences); + } + return Err(PgSqliteError::Sqlite(e)); + } + }; + + let rows = stmt.query_map([], |row| { + let name: String = row.get(0)?; + let seq: i64 = row.get(1)?; + Ok((name, seq)) + }).map_err(PgSqliteError::Sqlite)?; + + for row_result in rows.flatten() { + let (table_name, current_value) = row_result; + + let table_oid = Self::generate_table_oid(&table_name); + + let mut sequence = HashMap::new(); + + sequence.insert("seqrelid".to_string(), table_oid.to_string().into_bytes()); + sequence.insert("seqtypid".to_string(), b"20".to_vec()); // int8 (BIGINT) type OID + sequence.insert("seqstart".to_string(), b"1".to_vec()); // SQLite AUTOINCREMENT starts at 1 + sequence.insert("seqincrement".to_string(), b"1".to_vec()); // SQLite increments by 1 + + // Max value for BIGINT: 9223372036854775807 + sequence.insert("seqmax".to_string(), b"9223372036854775807".to_vec()); + + // Min value for sequences: 1 + sequence.insert("seqmin".to_string(), b"1".to_vec()); + + // Cache size (SQLite doesn't have caching concept, default to 1) + sequence.insert("seqcache".to_string(), b"1".to_vec()); + + // Cycle behavior (SQLite doesn't cycle, default to false) + sequence.insert("seqcycle".to_string(), b"f".to_vec()); + + debug!("Found sequence for table {} with current value {}", table_name, current_value); + sequences.push(sequence); + } + + Ok(sequences) + } + + fn generate_table_oid(table_name: &str) -> u32 { + let mut hash = 0u32; + for byte in table_name.bytes() { + hash = hash.wrapping_mul(31).wrapping_add(byte as u32); + } + 16384 + (hash % 65536) + } + + fn apply_where_filter( + sequences: &[HashMap>], + where_clause: &Expr, + ) -> Result>>, PgSqliteError> { + let mut filtered = Vec::new(); + + for sequence in sequences { + let mut string_data = HashMap::new(); + for (key, value) in sequence { + if let Ok(string_val) = String::from_utf8(value.clone()) { + string_data.insert(key.clone(), string_val); + } + } + + let column_mapping = HashMap::new(); + if WhereEvaluator::evaluate(where_clause, &string_data, &column_mapping) { + filtered.push(sequence.clone()); + } + } + + Ok(filtered) + } +} diff --git a/src/catalog/pg_settings.rs b/src/catalog/pg_settings.rs new file mode 100644 index 00000000..4aea4162 --- /dev/null +++ b/src/catalog/pg_settings.rs @@ -0,0 +1,199 @@ +use crate::session::db_handler::DbResponse; +use crate::PgSqliteError; +use sqlparser::ast::{Select, SelectItem, Expr}; +use tracing::debug; +use std::collections::HashMap; +use super::where_evaluator::WhereEvaluator; + +pub struct PgSettingsHandler; + +impl PgSettingsHandler { + pub fn handle_query(select: &Select) -> Result { + debug!("Handling pg_settings query"); + + let all_columns = vec![ + "name".to_string(), + "setting".to_string(), + "unit".to_string(), + "category".to_string(), + "short_desc".to_string(), + "extra_desc".to_string(), + "context".to_string(), + "vartype".to_string(), + "source".to_string(), + "min_val".to_string(), + "max_val".to_string(), + "enumvals".to_string(), + "boot_val".to_string(), + "reset_val".to_string(), + "sourcefile".to_string(), + "sourceline".to_string(), + "pending_restart".to_string(), + ]; + + let selected_columns = Self::get_selected_columns(&select.projection, &all_columns); + + let settings = Self::get_all_settings(); + + let filtered_settings = if let Some(where_clause) = &select.selection { + Self::apply_where_filter(&settings, where_clause)? + } else { + settings + }; + + let mut rows = Vec::new(); + for setting in filtered_settings { + let mut row = Vec::new(); + for column in &selected_columns { + let value = setting.get(column).cloned().unwrap_or_else(|| b"".to_vec()); + row.push(Some(value)); + } + rows.push(row); + } + + let rows_count = rows.len(); + Ok(DbResponse { + columns: selected_columns, + rows, + rows_affected: rows_count, + }) + } + + fn get_selected_columns(projection: &[SelectItem], all_columns: &[String]) -> Vec { + let mut selected = Vec::new(); + + for item in projection { + match item { + SelectItem::Wildcard(_) => { + selected.extend_from_slice(all_columns); + break; + } + SelectItem::UnnamedExpr(Expr::Identifier(ident)) => { + let col_name = ident.value.to_lowercase(); + if all_columns.contains(&col_name) { + selected.push(col_name); + } + } + SelectItem::ExprWithAlias { expr: Expr::Identifier(ident), alias } => { + let col_name = ident.value.to_lowercase(); + if all_columns.contains(&col_name) { + selected.push(alias.value.clone()); + } + } + SelectItem::QualifiedWildcard(_, _) => { + selected.extend_from_slice(all_columns); + break; + } + SelectItem::UnnamedExpr(Expr::Function(_)) => { + selected.push("count".to_string()); + } + SelectItem::ExprWithAlias { expr: Expr::Function(_), alias } => { + selected.push(alias.value.clone()); + } + _ => {} + } + } + + selected + } + + fn get_all_settings() -> Vec>> { + let settings_data: Vec<(&str, &str, &str, &str, &str, &str, &str)> = vec![ + ("server_version", "16.0", "", "Preset Options", "PostgreSQL version string", "internal", "string"), + ("server_version_num", "160000", "", "Preset Options", "PostgreSQL version number", "internal", "integer"), + ("server_encoding", "UTF8", "", "Client Connection Defaults", "Server encoding", "internal", "string"), + ("client_encoding", "UTF8", "", "Client Connection Defaults", "Client encoding", "user", "string"), + ("DateStyle", "ISO, MDY", "", "Client Connection Defaults", "Date display format", "user", "string"), + ("TimeZone", "UTC", "", "Client Connection Defaults", "Time zone", "user", "string"), + ("timezone_abbreviations", "Default", "", "Client Connection Defaults", "Time zone abbreviations", "user", "string"), + ("extra_float_digits", "1", "", "Client Connection Defaults", "Extra float digits", "user", "integer"), + ("integer_datetimes", "on", "", "Preset Options", "Integer datetimes", "internal", "bool"), + ("max_connections", "100", "", "Connections and Authentication", "Maximum connections", "postmaster", "integer"), + ("superuser_reserved_connections", "3", "", "Connections and Authentication", "Reserved for superuser", "postmaster", "integer"), + ("shared_buffers", "128MB", "8kB", "Resource Usage / Memory", "Shared memory buffers", "postmaster", "integer"), + ("work_mem", "4MB", "kB", "Resource Usage / Memory", "Work memory", "user", "integer"), + ("maintenance_work_mem", "64MB", "kB", "Resource Usage / Memory", "Maintenance work memory", "user", "integer"), + ("effective_cache_size", "4GB", "8kB", "Query Tuning / Planner Cost Constants", "Effective cache size", "user", "integer"), + ("random_page_cost", "4", "", "Query Tuning / Planner Cost Constants", "Random page cost", "user", "real"), + ("seq_page_cost", "1", "", "Query Tuning / Planner Cost Constants", "Sequential page cost", "user", "real"), + ("standard_conforming_strings", "on", "", "Client Connection Defaults", "Standard conforming strings", "user", "bool"), + ("escape_string_warning", "on", "", "Client Connection Defaults", "Escape string warning", "user", "bool"), + ("bytea_output", "hex", "", "Client Connection Defaults", "Bytea output format", "user", "enum"), + ("search_path", "\"$user\", public", "", "Client Connection Defaults", "Schema search path", "user", "string"), + ("log_statement", "none", "", "Reporting and Logging", "Log statements", "superuser", "enum"), + ("log_min_duration_statement", "-1", "ms", "Reporting and Logging", "Min duration to log", "superuser", "integer"), + ("lc_collate", "en_US.UTF-8", "", "Client Connection Defaults", "Collation locale", "internal", "string"), + ("lc_ctype", "en_US.UTF-8", "", "Client Connection Defaults", "Character type locale", "internal", "string"), + ("lc_messages", "en_US.UTF-8", "", "Client Connection Defaults", "Messages locale", "superuser", "string"), + ("lc_monetary", "en_US.UTF-8", "", "Client Connection Defaults", "Monetary locale", "user", "string"), + ("lc_numeric", "en_US.UTF-8", "", "Client Connection Defaults", "Numeric locale", "user", "string"), + ("lc_time", "en_US.UTF-8", "", "Client Connection Defaults", "Time locale", "user", "string"), + ("default_transaction_isolation", "read committed", "", "Client Connection Defaults", "Default isolation level", "user", "enum"), + ("default_transaction_read_only", "off", "", "Client Connection Defaults", "Default read only", "user", "bool"), + ("transaction_isolation", "read committed", "", "Client Connection Defaults", "Transaction isolation", "user", "enum"), + ("transaction_read_only", "off", "", "Client Connection Defaults", "Transaction read only", "user", "bool"), + ("application_name", "", "", "Client Connection Defaults", "Application name", "user", "string"), + ("ssl", "off", "", "Connections and Authentication", "SSL enabled", "sighup", "bool"), + ("wal_level", "replica", "", "Write-Ahead Log", "WAL level", "postmaster", "enum"), + ("max_wal_senders", "10", "", "Replication", "Max WAL senders", "postmaster", "integer"), + ("autovacuum", "on", "", "Autovacuum", "Autovacuum enabled", "sighup", "bool"), + ("statement_timeout", "0", "ms", "Client Connection Defaults", "Statement timeout", "user", "integer"), + ("lock_timeout", "0", "ms", "Client Connection Defaults", "Lock timeout", "user", "integer"), + ("idle_in_transaction_session_timeout", "0", "ms", "Client Connection Defaults", "Idle transaction timeout", "user", "integer"), + ]; + + settings_data + .into_iter() + .map(|(name, setting, unit, category, short_desc, context, vartype)| { + let mut setting_map = HashMap::new(); + + setting_map.insert("name".to_string(), name.as_bytes().to_vec()); + setting_map.insert("setting".to_string(), setting.as_bytes().to_vec()); + if unit.is_empty() { + setting_map.insert("unit".to_string(), b"".to_vec()); + } else { + setting_map.insert("unit".to_string(), unit.as_bytes().to_vec()); + } + setting_map.insert("category".to_string(), category.as_bytes().to_vec()); + setting_map.insert("short_desc".to_string(), short_desc.as_bytes().to_vec()); + setting_map.insert("extra_desc".to_string(), b"".to_vec()); + setting_map.insert("context".to_string(), context.as_bytes().to_vec()); + setting_map.insert("vartype".to_string(), vartype.as_bytes().to_vec()); + setting_map.insert("source".to_string(), b"default".to_vec()); + setting_map.insert("min_val".to_string(), b"".to_vec()); + setting_map.insert("max_val".to_string(), b"".to_vec()); + setting_map.insert("enumvals".to_string(), b"".to_vec()); + setting_map.insert("boot_val".to_string(), setting.as_bytes().to_vec()); + setting_map.insert("reset_val".to_string(), setting.as_bytes().to_vec()); + setting_map.insert("sourcefile".to_string(), b"".to_vec()); + setting_map.insert("sourceline".to_string(), b"".to_vec()); + setting_map.insert("pending_restart".to_string(), b"f".to_vec()); + + setting_map + }) + .collect() + } + + fn apply_where_filter( + settings: &[HashMap>], + where_clause: &Expr, + ) -> Result>>, PgSqliteError> { + let mut filtered = Vec::new(); + + for setting in settings { + let mut string_data = HashMap::new(); + for (key, value) in setting { + if let Ok(string_val) = String::from_utf8(value.clone()) { + string_data.insert(key.clone(), string_val); + } + } + + let column_mapping = HashMap::new(); + if WhereEvaluator::evaluate(where_clause, &string_data, &column_mapping) { + filtered.push(setting.clone()); + } + } + + Ok(filtered) + } +} diff --git a/src/catalog/pg_trigger.rs b/src/catalog/pg_trigger.rs new file mode 100644 index 00000000..30e5e785 --- /dev/null +++ b/src/catalog/pg_trigger.rs @@ -0,0 +1,263 @@ +use crate::session::db_handler::{DbHandler, DbResponse}; +use crate::PgSqliteError; +use sqlparser::ast::{Select, SelectItem, Expr}; +use tracing::debug; +use std::collections::HashMap; +use super::where_evaluator::WhereEvaluator; + +pub struct PgTriggerHandler; + +impl PgTriggerHandler { + pub async fn handle_query( + select: &Select, + db: &DbHandler, + ) -> Result { + debug!("Handling pg_trigger query"); + + let all_columns = vec![ + "oid".to_string(), + "tgrelid".to_string(), + "tgparentid".to_string(), + "tgname".to_string(), + "tgfoid".to_string(), + "tgtype".to_string(), + "tgenabled".to_string(), + "tgisinternal".to_string(), + "tgconstrrelid".to_string(), + "tgconstrindid".to_string(), + "tgconstraint".to_string(), + "tgdeferrable".to_string(), + "tginitdeferred".to_string(), + "tgnargs".to_string(), + "tgattr".to_string(), + "tgargs".to_string(), + "tgqual".to_string(), + "tgoldtable".to_string(), + "tgnewtable".to_string(), + ]; + + let selected_columns = Self::get_selected_columns(&select.projection, &all_columns); + + let triggers = Self::get_triggers(db).await?; + + let mut filtered_triggers = if let Some(where_clause) = &select.selection { + Self::apply_where_filter(&triggers, where_clause)? + } else { + triggers + }; + + filtered_triggers.sort_by(|a, b| { + let name_a = a.get("tgname").and_then(|v| String::from_utf8(v.clone()).ok()).unwrap_or_default(); + let name_b = b.get("tgname").and_then(|v| String::from_utf8(v.clone()).ok()).unwrap_or_default(); + name_a.cmp(&name_b) + }); + + let mut rows = Vec::new(); + for trigger in filtered_triggers { + let mut row = Vec::new(); + for column in &selected_columns { + let value = trigger.get(column).cloned().unwrap_or_else(|| b"".to_vec()); + row.push(Some(value)); + } + rows.push(row); + } + + let rows_count = rows.len(); + Ok(DbResponse { + columns: selected_columns, + rows, + rows_affected: rows_count, + }) + } + + fn get_selected_columns(projection: &[SelectItem], all_columns: &[String]) -> Vec { + let mut selected = Vec::new(); + + for item in projection { + match item { + SelectItem::Wildcard(_) => { + selected.extend_from_slice(all_columns); + break; + } + SelectItem::UnnamedExpr(Expr::Identifier(ident)) => { + let col_name = ident.value.to_lowercase(); + if all_columns.contains(&col_name) { + selected.push(col_name); + } + } + SelectItem::ExprWithAlias { expr: Expr::Identifier(ident), alias } => { + let col_name = ident.value.to_lowercase(); + if all_columns.contains(&col_name) { + selected.push(alias.value.clone()); + } + } + SelectItem::QualifiedWildcard(_, _) => { + selected.extend_from_slice(all_columns); + break; + } + SelectItem::UnnamedExpr(Expr::Function(_)) => { + selected.push("count".to_string()); + } + SelectItem::ExprWithAlias { expr: Expr::Function(_), alias } => { + selected.push(alias.value.clone()); + } + _ => {} + } + } + + selected + } + + async fn get_triggers(db: &DbHandler) -> Result>>, PgSqliteError> { + let mut triggers = Vec::new(); + + let conn = rusqlite::Connection::open(&db.db_path).map_err(PgSqliteError::Sqlite)?; + + let query = "SELECT name, tbl_name, sql FROM sqlite_master WHERE type = 'trigger'"; + + let mut stmt = conn.prepare(query).map_err(PgSqliteError::Sqlite)?; + + let rows = stmt.query_map([], |row| { + let name: String = row.get(0)?; + let tbl_name: String = row.get(1)?; + let sql: String = row.get(2)?; + Ok((name, tbl_name, sql)) + }).map_err(PgSqliteError::Sqlite)?; + + for row_result in rows.flatten() { + let (trigger_name, table_name, trigger_sql) = row_result; + + let (timing, event, _orientation) = Self::parse_trigger_sql(&trigger_sql); + + let trigger_oid = Self::generate_trigger_oid(&trigger_name); + let table_oid = Self::generate_table_oid(&table_name); + let tgtype = Self::calculate_tgtype(&timing, &event); + + let mut trigger = HashMap::new(); + + trigger.insert("oid".to_string(), trigger_oid.to_string().into_bytes()); + trigger.insert("tgrelid".to_string(), table_oid.to_string().into_bytes()); + trigger.insert("tgparentid".to_string(), b"0".to_vec()); + trigger.insert("tgname".to_string(), trigger_name.clone().into_bytes()); + trigger.insert("tgfoid".to_string(), b"0".to_vec()); + trigger.insert("tgtype".to_string(), tgtype.to_string().into_bytes()); + trigger.insert("tgenabled".to_string(), b"O".to_vec()); + trigger.insert("tgisinternal".to_string(), b"f".to_vec()); + trigger.insert("tgconstrrelid".to_string(), b"0".to_vec()); + trigger.insert("tgconstrindid".to_string(), b"0".to_vec()); + trigger.insert("tgconstraint".to_string(), b"0".to_vec()); + trigger.insert("tgdeferrable".to_string(), b"f".to_vec()); + trigger.insert("tginitdeferred".to_string(), b"f".to_vec()); + trigger.insert("tgnargs".to_string(), b"0".to_vec()); + trigger.insert("tgattr".to_string(), b"".to_vec()); + trigger.insert("tgargs".to_string(), b"".to_vec()); + trigger.insert("tgqual".to_string(), b"".to_vec()); + trigger.insert("tgoldtable".to_string(), b"".to_vec()); + trigger.insert("tgnewtable".to_string(), b"".to_vec()); + + debug!("Found trigger {} on table {} with type {}", trigger_name, table_name, tgtype); + triggers.push(trigger); + } + + Ok(triggers) + } + + fn parse_trigger_sql(sql: &str) -> (String, String, String) { + let sql_upper = sql.to_uppercase(); + + let timing = if sql_upper.contains("BEFORE") { + "BEFORE".to_string() + } else if sql_upper.contains("AFTER") { + "AFTER".to_string() + } else if sql_upper.contains("INSTEAD OF") { + "INSTEAD OF".to_string() + } else { + "BEFORE".to_string() + }; + + let event = if let Some(on_pos) = sql_upper.find(" ON ") { + let before_on = &sql_upper[..on_pos]; + + if before_on.contains(" DELETE") || before_on.ends_with("DELETE") { + "DELETE".to_string() + } else if before_on.contains(" UPDATE") || before_on.ends_with("UPDATE") { + "UPDATE".to_string() + } else { + "INSERT".to_string() + } + } else { + if sql_upper.contains("DELETE") { + "DELETE".to_string() + } else if sql_upper.contains("UPDATE") { + "UPDATE".to_string() + } else { + "INSERT".to_string() + } + }; + + let orientation = "ROW".to_string(); + + (timing, event, orientation) + } + + fn calculate_tgtype(timing: &str, event: &str) -> i16 { + let mut tgtype: i16 = 0; + + tgtype |= 1; + + if timing == "BEFORE" { + tgtype |= 2; + } else if timing == "INSTEAD OF" { + tgtype |= 64; + } + + if event == "INSERT" { + tgtype |= 4; + } else if event == "DELETE" { + tgtype |= 8; + } else if event == "UPDATE" { + tgtype |= 16; + } + + tgtype + } + + fn generate_trigger_oid(trigger_name: &str) -> u32 { + let mut hash = 0u32; + for byte in trigger_name.bytes() { + hash = hash.wrapping_mul(31).wrapping_add(byte as u32); + } + 16384 + (hash % 65536) + } + + fn generate_table_oid(table_name: &str) -> u32 { + let mut hash = 0u32; + for byte in table_name.bytes() { + hash = hash.wrapping_mul(31).wrapping_add(byte as u32); + } + 16384 + (hash % 65536) + } + + fn apply_where_filter( + triggers: &[HashMap>], + where_clause: &Expr, + ) -> Result>>, PgSqliteError> { + let mut filtered = Vec::new(); + + for trigger in triggers { + let mut string_data = HashMap::new(); + for (key, value) in trigger { + if let Ok(string_val) = String::from_utf8(value.clone()) { + string_data.insert(key.clone(), string_val); + } + } + + let column_mapping = HashMap::new(); + if WhereEvaluator::evaluate(where_clause, &string_data, &column_mapping) { + filtered.push(trigger.clone()); + } + } + + Ok(filtered) + } +} diff --git a/src/catalog/query_interceptor.rs b/src/catalog/query_interceptor.rs index 3f7d94ba..e1f90546 100644 --- a/src/catalog/query_interceptor.rs +++ b/src/catalog/query_interceptor.rs @@ -8,7 +8,7 @@ use sqlparser::dialect::PostgreSqlDialect; use sqlparser::parser::Parser; use sqlparser::tokenizer::{Location, Span}; use tracing::{debug, info}; -use super::{pg_class::PgClassHandler, pg_attribute::PgAttributeHandler, pg_constraint::PgConstraintHandler, pg_depend::PgDependHandler, pg_enum::PgEnumHandler, pg_description::PgDescriptionHandler, pg_roles::PgRolesHandler, pg_user::PgUserHandler, pg_stats::PgStatsHandler, system_functions::SystemFunctions, where_evaluator::WhereEvaluator}; +use super::{pg_class::PgClassHandler, pg_attribute::PgAttributeHandler, pg_constraint::PgConstraintHandler, pg_depend::PgDependHandler, pg_enum::PgEnumHandler, pg_description::PgDescriptionHandler, pg_roles::PgRolesHandler, pg_user::PgUserHandler, pg_stats::PgStatsHandler, pg_sequence::PgSequenceHandler, pg_trigger::PgTriggerHandler, pg_settings::PgSettingsHandler, system_functions::SystemFunctions, where_evaluator::WhereEvaluator}; use std::sync::Arc; use std::pin::Pin; use std::future::Future; @@ -55,7 +55,12 @@ impl CatalogInterceptor { lower_query.contains("pg_description") || lower_query.contains("pg_roles") || lower_query.contains("pg_user") || lower_query.contains("pg_authid") || lower_query.contains("pg_stats") || lower_query.contains("pg_constraint") || - lower_query.contains("pg_depend") || + lower_query.contains("pg_depend") || lower_query.contains("pg_sequence") || + lower_query.contains("pg_trigger") || lower_query.contains("pg_settings") || + lower_query.contains("pg_collation") || + lower_query.contains("pg_replication_slots") || + lower_query.contains("pg_shdepend") || + lower_query.contains("pg_statistic") || lower_query.contains("information_schema") || lower_query.contains("pg_stat_") || lower_query.contains("pg_database") || lower_query.contains("pg_foreign_data_wrapper"); @@ -515,6 +520,26 @@ impl CatalogInterceptor { return Some(Ok(Self::handle_pg_tablespace_query(select))); } + // Handle pg_collation queries + if table_name.contains("pg_collation") || table_name.contains("pg_catalog.pg_collation") { + return Some(Ok(Self::handle_pg_collation_query(select))); + } + + // Handle pg_replication_slots queries (always empty - SQLite has no replication) + if table_name.contains("pg_replication_slots") || table_name.contains("pg_catalog.pg_replication_slots") { + return Some(Ok(Self::handle_pg_replication_slots_query(select))); + } + + // Handle pg_shdepend queries (always empty - SQLite has no shared dependencies) + if table_name.contains("pg_shdepend") || table_name.contains("pg_catalog.pg_shdepend") { + return Some(Ok(Self::handle_pg_shdepend_query(select))); + } + + // Handle pg_statistic queries (always empty - internal stats table) + if table_name.contains("pg_statistic") || table_name.contains("pg_catalog.pg_statistic") { + return Some(Ok(Self::handle_pg_statistic_query(select))); + } + // Handle pg_class queries if table_name.contains("pg_class") || table_name.contains("pg_catalog.pg_class") { return Some(PgClassHandler::handle_query(select, &db).await); @@ -614,6 +639,46 @@ impl CatalogInterceptor { }; } + // Handle pg_sequence queries + if table_name.contains("pg_sequence") || table_name.contains("pg_catalog.pg_sequence") { + info!("Routing to PgSequenceHandler for table: {}", table_name); + return match PgSequenceHandler::handle_query(select, &db).await { + Ok(response) => { + debug!("PgSequenceHandler returned {} rows", response.rows.len()); + Some(Ok(response)) + }, + Err(_) => { + None + }, + }; + } + + if table_name.contains("pg_trigger") || table_name.contains("pg_catalog.pg_trigger") { + info!("Routing to PgTriggerHandler for table: {}", table_name); + return match PgTriggerHandler::handle_query(select, &db).await { + Ok(response) => { + debug!("PgTriggerHandler returned {} rows", response.rows.len()); + Some(Ok(response)) + }, + Err(_) => { + None + }, + }; + } + + if table_name.contains("pg_settings") || table_name.contains("pg_catalog.pg_settings") { + info!("Routing to PgSettingsHandler for table: {}", table_name); + return match PgSettingsHandler::handle_query(select) { + Ok(response) => { + debug!("PgSettingsHandler returned {} rows", response.rows.len()); + Some(Ok(response)) + }, + Err(_) => { + None + }, + }; + } + // Handle pg_depend queries if table_name.contains("pg_depend") || table_name.contains("pg_catalog.pg_depend") { info!("Routing to PgDependHandler for table: {}", table_name); @@ -1063,6 +1128,191 @@ impl CatalogInterceptor { } } + pub fn handle_pg_collation_query(select: &Select) -> DbResponse { + // Define pg_collation columns (PostgreSQL standard) + let all_columns = vec![ + "oid".to_string(), + "collname".to_string(), + "collnamespace".to_string(), + "collowner".to_string(), + "collprovider".to_string(), + "collisdeterministic".to_string(), + "collencoding".to_string(), + "collcollate".to_string(), + "collctype".to_string(), + "colliculocale".to_string(), + "collicurules".to_string(), + "collversion".to_string(), + ]; + + // Extract selected columns + let (selected_columns, column_indices) = Self::extract_selected_columns(select, &all_columns); + + // Define standard collations + let collations = vec![ + ("100", "default", "11", "10", "d", "t", "-1", "", "", "", "", ""), + ("950", "C", "11", "10", "c", "t", "-1", "C", "C", "", "", ""), + ("951", "POSIX", "11", "10", "c", "t", "-1", "POSIX", "POSIX", "", "", ""), + ]; + + // Check for WHERE clause filtering by collname + let name_filter = if let Some(ref where_clause) = select.selection { + Self::extract_collation_name_filter(where_clause) + } else { + None + }; + + let mut rows = Vec::new(); + for (oid, collname, collnamespace, collowner, collprovider, collisdeterministic, + collencoding, collcollate, collctype, colliculocale, collicurules, collversion) in collations { + + // Apply name filter if present + if let Some(ref filter) = name_filter { + if collname != filter { + continue; + } + } + + let full_row: Vec>> = vec![ + Some(oid.to_string().into_bytes()), + Some(collname.to_string().into_bytes()), + Some(collnamespace.to_string().into_bytes()), + Some(collowner.to_string().into_bytes()), + Some(collprovider.to_string().into_bytes()), + Some(collisdeterministic.to_string().into_bytes()), + Some(collencoding.to_string().into_bytes()), + if collcollate.is_empty() { None } else { Some(collcollate.to_string().into_bytes()) }, + if collctype.is_empty() { None } else { Some(collctype.to_string().into_bytes()) }, + if colliculocale.is_empty() { None } else { Some(colliculocale.to_string().into_bytes()) }, + if collicurules.is_empty() { None } else { Some(collicurules.to_string().into_bytes()) }, + if collversion.is_empty() { None } else { Some(collversion.to_string().into_bytes()) }, + ]; + + let projected_row: Vec>> = column_indices.iter() + .map(|&idx| full_row[idx].clone()) + .collect(); + rows.push(projected_row); + } + + let rows_affected = rows.len(); + DbResponse { + columns: selected_columns, + rows, + rows_affected, + } + } + + fn extract_collation_name_filter(where_clause: &Expr) -> Option { + match where_clause { + Expr::BinaryOp { left, op, right } => { + if let (Expr::Identifier(ident), sqlparser::ast::BinaryOperator::Eq, Expr::Value(value_with_span)) = + (left.as_ref(), op, right.as_ref()) + && ident.value.to_lowercase() == "collname" + && let sqlparser::ast::Value::SingleQuotedString(value) = &value_with_span.value { + return Some(value.clone()); + } + } + _ => {} + } + None + } + + pub fn handle_pg_replication_slots_query(select: &Select) -> DbResponse { + let all_columns = vec![ + "slot_name".to_string(), + "plugin".to_string(), + "slot_type".to_string(), + "datoid".to_string(), + "database".to_string(), + "temporary".to_string(), + "active".to_string(), + "active_pid".to_string(), + "xmin".to_string(), + "catalog_xmin".to_string(), + "restart_lsn".to_string(), + "confirmed_flush_lsn".to_string(), + "wal_status".to_string(), + "safe_wal_size".to_string(), + "two_phase".to_string(), + "conflicting".to_string(), + ]; + + let (selected_columns, _) = Self::extract_selected_columns(select, &all_columns); + + // Always return empty - SQLite has no replication + DbResponse { + columns: selected_columns, + rows: vec![], + rows_affected: 0, + } + } + + pub fn handle_pg_shdepend_query(select: &Select) -> DbResponse { + let all_columns = vec![ + "dbid".to_string(), + "classid".to_string(), + "objid".to_string(), + "objsubid".to_string(), + "refclassid".to_string(), + "refobjid".to_string(), + "deptype".to_string(), + ]; + + let (selected_columns, _) = Self::extract_selected_columns(select, &all_columns); + + // Always return empty - SQLite has no shared dependencies + DbResponse { + columns: selected_columns, + rows: vec![], + rows_affected: 0, + } + } + + pub fn handle_pg_statistic_query(select: &Select) -> DbResponse { + let all_columns = vec![ + "starelid".to_string(), + "staattnum".to_string(), + "stainherit".to_string(), + "stanullfrac".to_string(), + "stawidth".to_string(), + "stadistinct".to_string(), + "stakind1".to_string(), + "stakind2".to_string(), + "stakind3".to_string(), + "stakind4".to_string(), + "stakind5".to_string(), + "staop1".to_string(), + "staop2".to_string(), + "staop3".to_string(), + "staop4".to_string(), + "staop5".to_string(), + "stacoll1".to_string(), + "stacoll2".to_string(), + "stacoll3".to_string(), + "stacoll4".to_string(), + "stacoll5".to_string(), + "stanumbers1".to_string(), + "stanumbers2".to_string(), + "stanumbers3".to_string(), + "stanumbers4".to_string(), + "stanumbers5".to_string(), + "stavalues1".to_string(), + "stavalues2".to_string(), + "stavalues3".to_string(), + "stavalues4".to_string(), + "stavalues5".to_string(), + ]; + + let (selected_columns, _) = Self::extract_selected_columns(select, &all_columns); + + // Always return empty - internal stats table, use pg_stats view instead + DbResponse { + columns: selected_columns, + rows: vec![], + rows_affected: 0, + } + } + fn handle_pg_type_join_query(select: &Select) -> DbResponse { // Handle the complex JOIN query that tokio-postgres uses // Extract which columns are being selected diff --git a/src/security/audit_logger.rs b/src/security/audit_logger.rs index 9679f339..71d62703 100644 --- a/src/security/audit_logger.rs +++ b/src/security/audit_logger.rs @@ -492,10 +492,7 @@ impl SecurityAuditLogger { event.client_ip.map(|ip| ip.to_string()).unwrap_or_else(|| "unknown".to_string()), event.message ); - - // Update alerting stats - let mut stats = self.stats.write(); - stats.alerts_triggered += 1; + // Note: alerts_triggered is already counted in update_stats() } /// Buffer management for batching diff --git a/src/session/db_handler.rs b/src/session/db_handler.rs index 8a05042a..eb033fe9 100644 --- a/src/session/db_handler.rs +++ b/src/session/db_handler.rs @@ -597,6 +597,112 @@ impl DbHandler { pub async fn query(&self, query: &str) -> Result { // Check for pg_stats queries first - they should be intercepted regardless of database type let lower_query = query.to_lowercase(); + + // Handle pg_sequence queries + if lower_query.contains("pg_sequence") || lower_query.contains("pg_catalog.pg_sequence") { + use crate::catalog::pg_sequence::PgSequenceHandler; + + // For aggregate queries (COUNT, AVG, etc), we need to materialize pg_sequence as a temp table + // and run the query against it + if lower_query.contains("count(") || lower_query.contains("avg(") || + lower_query.contains("sum(") || lower_query.contains("max(") || + lower_query.contains("min(") { + // Create a temporary connection and materialize pg_sequence data + let temp_conn = rusqlite::Connection::open_in_memory().map_err(|e| { + rusqlite::Error::SqliteFailure( + rusqlite::ffi::Error::new(rusqlite::ffi::SQLITE_ERROR), + Some(format!("Failed to create temp connection: {e}")) + ) + })?; + + // Create temp table with pg_sequence schema + temp_conn.execute(" + CREATE TEMP TABLE pg_sequence ( + seqrelid INTEGER, + seqtypid INTEGER, + seqstart BIGINT, + seqincrement BIGINT, + seqmax BIGINT, + seqmin BIGINT, + seqcache BIGINT, + seqcycle BOOLEAN + ) + ", []).ok(); + + // Get pg_sequence data and insert into temp table + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, "SELECT * FROM pg_sequence"); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + if let Ok(sequence_data) = PgSequenceHandler::handle_query(select, self).await { + // Insert the data into temp table + for row in &sequence_data.rows { + let mut values = Vec::new(); + for col in row { + if let Some(bytes) = col { + values.push(String::from_utf8_lossy(bytes).to_string()); + } else { + values.push(String::new()); + } + } + + let insert_sql = format!( + "INSERT INTO pg_sequence VALUES ({}, {}, {}, {}, {}, {}, {}, {})", + values[0], values[1], values[2], values[3], + values[4], values[5], values[6], if values[7] == "t" { "1" } else { "0" } + ); + temp_conn.execute(&insert_sql, []).ok(); + } + } + } + + // Now execute the actual query against the temp table + let mut stmt = temp_conn.prepare(query)?; + let column_count = stmt.column_count(); + let mut columns = Vec::new(); + for i in 0..column_count { + columns.push(stmt.column_name(i)?.to_string()); + } + + let rows_result: rusqlite::Result>>>> = stmt.query_map([], |row| { + let mut values = Vec::new(); + for i in 0..column_count { + // Try different types since COUNT returns integer + if let Ok(int_val) = row.get::<_, i64>(i) { + values.push(Some(int_val.to_string().into_bytes())); + } else if let Ok(Some(string_val)) = row.get::<_, Option>(i) { + values.push(Some(string_val.into_bytes())); + } else { + values.push(None); + } + } + Ok(values) + })?.collect(); + + return match rows_result { + Ok(rows) => { + let rows_affected = rows.len(); + Ok(DbResponse { + columns, + rows, + rows_affected, + }) + } + Err(e) => Err(e), + }; + } else { + // For simple SELECT queries, handle directly via PgSequenceHandler + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + if let Ok(response) = PgSequenceHandler::handle_query(select, self).await { + return Ok(response); + } + } + } + } + if lower_query.contains("pg_stats") || lower_query.contains("pg_catalog.pg_stats") { use crate::catalog::pg_stats::PgStatsHandler; @@ -721,6 +827,35 @@ impl DbHandler { }); } + // Handle pg_settings queries + if lower_query.contains("pg_settings") || lower_query.contains("pg_catalog.pg_settings") { + use crate::catalog::pg_settings::PgSettingsHandler; + + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + match PgSettingsHandler::handle_query(select) { + Ok(response) => return Ok(response), + Err(_) => { + // Fallback to empty response + return Ok(DbResponse { + columns: vec!["name".to_string(), "setting".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + } + } + + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["name".to_string(), "setting".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + // Check if it's any form of memory database (including named shared memory) if self.db_path == ":memory:" || self.db_path.contains("mode=memory") { // For memory databases, use a temporary session connection @@ -822,6 +957,74 @@ impl DbHandler { }); } + // Handle pg_collation queries + if lower_query.contains("pg_collation") || lower_query.contains("pg_catalog.pg_collation") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_collation_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["oid".to_string(), "collname".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + + // Handle pg_replication_slots queries (always empty - SQLite has no replication) + if lower_query.contains("pg_replication_slots") || lower_query.contains("pg_catalog.pg_replication_slots") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_replication_slots_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["slot_name".to_string(), "plugin".to_string(), "slot_type".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + + // Handle pg_shdepend queries (always empty - SQLite has no shared dependencies) + if lower_query.contains("pg_shdepend") || lower_query.contains("pg_catalog.pg_shdepend") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_shdepend_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["dbid".to_string(), "classid".to_string(), "objid".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + + // Handle pg_statistic queries (always empty - internal stats table) + if lower_query.contains("pg_statistic") || lower_query.contains("pg_catalog.pg_statistic") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_statistic_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["starelid".to_string(), "staattnum".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + // Handle pg_stats queries if lower_query.contains("pg_stats") || lower_query.contains("pg_catalog.pg_stats") { use crate::catalog::pg_stats::PgStatsHandler; @@ -1101,6 +1304,74 @@ impl DbHandler { }); } + // Handle pg_collation queries + if lower_query.contains("pg_collation") || lower_query.contains("pg_catalog.pg_collation") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_collation_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["oid".to_string(), "collname".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + + // Handle pg_replication_slots queries (always empty - SQLite has no replication) + if lower_query.contains("pg_replication_slots") || lower_query.contains("pg_catalog.pg_replication_slots") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_replication_slots_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["slot_name".to_string(), "plugin".to_string(), "slot_type".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + + // Handle pg_shdepend queries (always empty - SQLite has no shared dependencies) + if lower_query.contains("pg_shdepend") || lower_query.contains("pg_catalog.pg_shdepend") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_shdepend_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["dbid".to_string(), "classid".to_string(), "objid".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + + // Handle pg_statistic queries (always empty - internal stats table) + if lower_query.contains("pg_statistic") || lower_query.contains("pg_catalog.pg_statistic") { + use crate::catalog::query_interceptor::CatalogInterceptor; + let parsed_query = sqlparser::parser::Parser::parse_sql(&sqlparser::dialect::PostgreSqlDialect {}, query); + if let Ok(mut statements) = parsed_query + && let Some(sqlparser::ast::Statement::Query(query_ast)) = statements.pop() + && let Some(select) = query_ast.body.as_select() { + return Ok(CatalogInterceptor::handle_pg_statistic_query(select)); + } + // Fallback to empty response if parsing fails + return Ok(DbResponse { + columns: vec!["starelid".to_string(), "staattnum".to_string()], + rows: vec![], + rows_affected: 0, + }); + } + // Handle pg_stats queries if lower_query.contains("pg_stats") || lower_query.contains("pg_catalog.pg_stats") { use crate::catalog::pg_stats::PgStatsHandler; diff --git a/tests/pg_collation_test.rs b/tests/pg_collation_test.rs new file mode 100644 index 00000000..b7680336 --- /dev/null +++ b/tests/pg_collation_test.rs @@ -0,0 +1,31 @@ +use pgsqlite::session::db_handler::DbHandler; +use std::sync::Arc; +use uuid::Uuid; + +#[tokio::test] +async fn test_pg_collation_basic() { + // Create a temporary database + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("pg_collation.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + // Create session + let session_id = Uuid::new_v4(); + db_handler.create_session_connection(session_id).await.unwrap(); + + // Test basic query + let result = db_handler.query_with_session("SELECT oid, collname FROM pg_collation", &session_id).await.unwrap(); + + assert_eq!(result.columns.len(), 2); + assert_eq!(result.columns[0], "oid"); + assert_eq!(result.columns[1], "collname"); + assert!(result.rows.len() >= 3, "Should have at least 3 collations (default, C, POSIX)"); + + // Test filtering by name + let result = db_handler.query_with_session("SELECT * FROM pg_collation WHERE collname = 'C'", &session_id).await.unwrap(); + assert_eq!(result.rows.len(), 1); + + let collname = String::from_utf8(result.rows[0][1].as_ref().unwrap().clone()).unwrap(); + assert_eq!(collname, "C"); +} + diff --git a/tests/pg_replication_slots_test.rs b/tests/pg_replication_slots_test.rs new file mode 100644 index 00000000..8c0a61cb --- /dev/null +++ b/tests/pg_replication_slots_test.rs @@ -0,0 +1,43 @@ +use pgsqlite::session::db_handler::DbHandler; +use std::sync::Arc; +use uuid::Uuid; + +#[tokio::test] +async fn test_pg_replication_slots_empty() { + // Create a temporary database + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("pg_replication_slots.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + // Create session + let session_id = Uuid::new_v4(); + db_handler.create_session_connection(session_id).await.unwrap(); + + // Should return empty result with correct schema + let result = db_handler.query_with_session("SELECT slot_name, plugin, slot_type FROM pg_replication_slots", &session_id).await.unwrap(); + + assert_eq!(result.columns.len(), 3); + assert_eq!(result.columns[0], "slot_name"); + assert_eq!(result.columns[1], "plugin"); + assert_eq!(result.columns[2], "slot_type"); + assert_eq!(result.rows.len(), 0, "pg_replication_slots should be empty for SQLite"); +} + +#[tokio::test] +async fn test_pg_replication_slots_all_columns() { + // Create a temporary database + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("pg_replication_slots.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + // Create session + let session_id = Uuid::new_v4(); + db_handler.create_session_connection(session_id).await.unwrap(); + + // Test SELECT * to ensure all columns work + let result = db_handler.query_with_session("SELECT * FROM pg_replication_slots", &session_id).await.unwrap(); + + // Should have all 16 columns according to PostgreSQL 16 + assert!(result.columns.len() >= 12, "Should have at least 12 columns"); + assert_eq!(result.rows.len(), 0, "pg_replication_slots should be empty for SQLite"); +} diff --git a/tests/pg_sequence_test.rs b/tests/pg_sequence_test.rs new file mode 100644 index 00000000..1ff1d336 --- /dev/null +++ b/tests/pg_sequence_test.rs @@ -0,0 +1,118 @@ +use pgsqlite::session::db_handler::DbHandler; +use std::sync::Arc; + +#[tokio::test] +async fn test_pg_sequence_basic() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_sequence.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + db_handler.execute("CREATE TABLE users ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT + )").await.unwrap(); + + db_handler.execute("INSERT INTO users (name) VALUES ('Alice')").await.unwrap(); + db_handler.execute("INSERT INTO users (name) VALUES ('Bob')").await.unwrap(); + + let result = db_handler.query("SELECT * FROM pg_sequence").await.unwrap(); + + assert!(!result.rows.is_empty(), "Should find at least one sequence"); + + println!("✅ pg_sequence returns {} sequences", result.rows.len()); +} + +#[tokio::test] +async fn test_pg_sequence_columns() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_sequence_columns.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + db_handler.execute("CREATE TABLE products ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + title TEXT + )").await.unwrap(); + + db_handler.execute("INSERT INTO products (title) VALUES ('Widget')").await.unwrap(); + + let result = db_handler.query("SELECT seqrelid, seqtypid, seqstart, seqincrement FROM pg_sequence").await.unwrap(); + + assert_eq!(result.columns.len(), 4, "Should have 4 columns"); + assert_eq!(result.columns, vec!["seqrelid", "seqtypid", "seqstart", "seqincrement"]); + assert!(!result.rows.is_empty(), "Should have at least one sequence"); + + println!("✅ pg_sequence has correct column structure"); +} + +#[tokio::test] +async fn test_pg_sequence_no_autoincrement() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_sequence_no_auto.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + db_handler.execute("CREATE TABLE simple_table ( + id INTEGER PRIMARY KEY, + name TEXT + )").await.unwrap(); + + let result = db_handler.query("SELECT * FROM pg_sequence").await.unwrap(); + + assert_eq!(result.rows.len(), 0, "Should have no sequences without AUTOINCREMENT"); + + println!("✅ pg_sequence correctly returns empty for tables without AUTOINCREMENT"); +} + +#[tokio::test] +async fn test_pg_sequence_current_value() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_sequence_value.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + db_handler.execute("CREATE TABLE orders ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + amount REAL + )").await.unwrap(); + + db_handler.execute("INSERT INTO orders (amount) VALUES (100.0)").await.unwrap(); + db_handler.execute("INSERT INTO orders (amount) VALUES (200.0)").await.unwrap(); + db_handler.execute("INSERT INTO orders (amount) VALUES (300.0)").await.unwrap(); + + let result = db_handler.query("SELECT seqrelid FROM pg_sequence WHERE seqrelid > 0").await.unwrap(); + + assert!(!result.rows.is_empty(), "Should find the orders sequence"); + + if let Some(Some(seqrelid_bytes)) = result.rows[0].first() { + let seqrelid_str = String::from_utf8(seqrelid_bytes.clone()).unwrap(); + let seqrelid: u32 = seqrelid_str.parse().unwrap(); + assert!(seqrelid >= 16384, "seqrelid should be >= 16384 (user OID range)"); + } + + println!("✅ pg_sequence returns valid sequence OIDs"); +} + +#[tokio::test] +async fn test_pg_sequence_multiple_tables() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_sequence_multi.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + db_handler.execute("CREATE TABLE table1 (id INTEGER PRIMARY KEY AUTOINCREMENT, data TEXT)").await.unwrap(); + db_handler.execute("CREATE TABLE table2 (id INTEGER PRIMARY KEY AUTOINCREMENT, info TEXT)").await.unwrap(); + db_handler.execute("CREATE TABLE table3 (id INTEGER PRIMARY KEY, value TEXT)").await.unwrap(); + + db_handler.execute("INSERT INTO table1 (data) VALUES ('test1')").await.unwrap(); + db_handler.execute("INSERT INTO table2 (info) VALUES ('test2')").await.unwrap(); + + let result = db_handler.query("SELECT COUNT(*) FROM pg_sequence").await.unwrap(); + + assert!(!result.rows.is_empty(), "Should have at least one row with count"); + assert!(result.rows[0][0].is_some(), "Count result should not be None"); + + let count_bytes = result.rows[0][0].as_ref().unwrap(); + let count_str = String::from_utf8(count_bytes.clone()).unwrap(); + let count: i32 = count_str.parse().unwrap(); + + assert_eq!(count, 2, "Should have exactly 2 sequences (table1 and table2)"); + + println!("✅ pg_sequence correctly handles multiple tables with AUTOINCREMENT"); +} diff --git a/tests/pg_settings_test.rs b/tests/pg_settings_test.rs new file mode 100644 index 00000000..859ba44d --- /dev/null +++ b/tests/pg_settings_test.rs @@ -0,0 +1,119 @@ +use pgsqlite::session::db_handler::DbHandler; +use std::sync::Arc; + +#[tokio::test] +async fn test_pg_settings_basic() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_settings.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let result = db_handler.query("SELECT name, setting FROM pg_settings").await.unwrap(); + + assert!(result.rows.len() >= 10, "Should have many settings, got {}", result.rows.len()); + assert_eq!(result.columns, vec!["name", "setting"]); + + let names: Vec = result + .rows + .iter() + .filter_map(|row| { + row.get(0) + .and_then(|opt| opt.as_ref()) + .and_then(|bytes| String::from_utf8(bytes.clone()).ok()) + }) + .collect(); + + assert!(names.contains(&"server_version".to_string()), "Should contain server_version"); + assert!(names.contains(&"server_encoding".to_string()), "Should contain server_encoding"); + + println!("✅ pg_settings returns {} settings", result.rows.len()); +} + +#[tokio::test] +async fn test_pg_settings_filter_by_name() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_settings2.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let result = db_handler + .query("SELECT setting FROM pg_settings WHERE name = 'server_version'") + .await + .unwrap(); + + assert_eq!(result.rows.len(), 1, "Should have exactly one row"); + + let setting_bytes = result.rows[0][0].as_ref().unwrap(); + let setting = String::from_utf8(setting_bytes.clone()).unwrap(); + assert_eq!(setting, "16.0"); + + println!("✅ pg_settings WHERE filter works correctly"); +} + +#[tokio::test] +async fn test_pg_settings_all_columns() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_settings3.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let result = db_handler + .query("SELECT name, setting, category, short_desc, context, vartype FROM pg_settings WHERE name = 'server_version'") + .await + .unwrap(); + + assert_eq!(result.rows.len(), 1); + assert_eq!(result.columns.len(), 6); + + let name = String::from_utf8(result.rows[0][0].as_ref().unwrap().clone()).unwrap(); + let setting = String::from_utf8(result.rows[0][1].as_ref().unwrap().clone()).unwrap(); + let category = String::from_utf8(result.rows[0][2].as_ref().unwrap().clone()).unwrap(); + let context = String::from_utf8(result.rows[0][4].as_ref().unwrap().clone()).unwrap(); + let vartype = String::from_utf8(result.rows[0][5].as_ref().unwrap().clone()).unwrap(); + + assert_eq!(name, "server_version"); + assert_eq!(setting, "16.0"); + assert_eq!(category, "Preset Options"); + assert_eq!(context, "internal"); + assert_eq!(vartype, "string"); + + println!("✅ pg_settings returns correct column values"); +} + +#[tokio::test] +async fn test_pg_settings_common_settings() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("test_pg_settings4.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let result = db_handler.query("SELECT name FROM pg_settings ORDER BY name").await.unwrap(); + + let names: Vec = result + .rows + .iter() + .filter_map(|row| { + row.get(0) + .and_then(|opt| opt.as_ref()) + .and_then(|bytes| String::from_utf8(bytes.clone()).ok()) + }) + .collect(); + + let expected_settings = vec![ + "server_version", + "server_version_num", + "server_encoding", + "client_encoding", + "DateStyle", + "TimeZone", + "max_connections", + "standard_conforming_strings", + "integer_datetimes", + ]; + + for expected in expected_settings { + assert!( + names.contains(&expected.to_string()), + "Should contain setting: {}", + expected + ); + } + + println!("✅ pg_settings contains all common PostgreSQL settings"); +} diff --git a/tests/pg_shdepend_test.rs b/tests/pg_shdepend_test.rs new file mode 100644 index 00000000..e877cafe --- /dev/null +++ b/tests/pg_shdepend_test.rs @@ -0,0 +1,43 @@ +use pgsqlite::session::db_handler::DbHandler; +use std::sync::Arc; +use uuid::Uuid; + +#[tokio::test] +async fn test_pg_shdepend_empty() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("pg_shdepend.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let session_id = Uuid::new_v4(); + db_handler.create_session_connection(session_id).await.unwrap(); + + let result = db_handler.query_with_session("SELECT dbid, classid, objid FROM pg_shdepend", &session_id).await.unwrap(); + + assert_eq!(result.columns.len(), 3); + assert_eq!(result.columns[0], "dbid"); + assert_eq!(result.columns[1], "classid"); + assert_eq!(result.columns[2], "objid"); + assert_eq!(result.rows.len(), 0, "pg_shdepend should be empty for SQLite"); +} + +#[tokio::test] +async fn test_pg_shdepend_all_columns() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("pg_shdepend.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let session_id = Uuid::new_v4(); + db_handler.create_session_connection(session_id).await.unwrap(); + + let result = db_handler.query_with_session("SELECT * FROM pg_shdepend", &session_id).await.unwrap(); + + assert_eq!(result.columns.len(), 7, "Should have 7 columns"); + assert!(result.columns.contains(&"dbid".to_string())); + assert!(result.columns.contains(&"classid".to_string())); + assert!(result.columns.contains(&"objid".to_string())); + assert!(result.columns.contains(&"objsubid".to_string())); + assert!(result.columns.contains(&"refclassid".to_string())); + assert!(result.columns.contains(&"refobjid".to_string())); + assert!(result.columns.contains(&"deptype".to_string())); + assert_eq!(result.rows.len(), 0, "pg_shdepend should be empty for SQLite"); +} diff --git a/tests/pg_statistic_test.rs b/tests/pg_statistic_test.rs new file mode 100644 index 00000000..e861bafa --- /dev/null +++ b/tests/pg_statistic_test.rs @@ -0,0 +1,46 @@ +use pgsqlite::session::db_handler::DbHandler; +use std::sync::Arc; +use uuid::Uuid; + +#[tokio::test] +async fn test_pg_statistic_empty() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("pg_statistic.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let session_id = Uuid::new_v4(); + db_handler.create_session_connection(session_id).await.unwrap(); + + let result = db_handler.query_with_session("SELECT starelid, staattnum FROM pg_statistic", &session_id).await.unwrap(); + + assert_eq!(result.columns.len(), 2); + assert_eq!(result.columns[0], "starelid"); + assert_eq!(result.columns[1], "staattnum"); + assert_eq!(result.rows.len(), 0, "pg_statistic should be empty (use pg_stats view instead)"); +} + +#[tokio::test] +async fn test_pg_statistic_all_columns() { + let temp_dir = tempfile::tempdir().unwrap(); + let db_path = temp_dir.path().join("pg_statistic.db"); + let db_handler = Arc::new(DbHandler::new(db_path.to_str().unwrap()).unwrap()); + + let session_id = Uuid::new_v4(); + db_handler.create_session_connection(session_id).await.unwrap(); + + let result = db_handler.query_with_session("SELECT * FROM pg_statistic", &session_id).await.unwrap(); + + assert_eq!(result.columns.len(), 31, "Should have 31 columns"); + assert!(result.columns.contains(&"starelid".to_string())); + assert!(result.columns.contains(&"staattnum".to_string())); + assert!(result.columns.contains(&"stainherit".to_string())); + assert!(result.columns.contains(&"stanullfrac".to_string())); + assert!(result.columns.contains(&"stawidth".to_string())); + assert!(result.columns.contains(&"stadistinct".to_string())); + assert!(result.columns.contains(&"stakind1".to_string())); + assert!(result.columns.contains(&"staop1".to_string())); + assert!(result.columns.contains(&"stacoll1".to_string())); + assert!(result.columns.contains(&"stanumbers1".to_string())); + assert!(result.columns.contains(&"stavalues1".to_string())); + assert_eq!(result.rows.len(), 0, "pg_statistic should be empty (use pg_stats view instead)"); +} diff --git a/tests/pg_trigger_test.rs b/tests/pg_trigger_test.rs new file mode 100644 index 00000000..fedae191 --- /dev/null +++ b/tests/pg_trigger_test.rs @@ -0,0 +1,175 @@ +use pgsqlite::session::db_handler::DbHandler; +use pgsqlite::catalog::pg_trigger::PgTriggerHandler; +use sqlparser::dialect::PostgreSqlDialect; +use sqlparser::parser::Parser; +use tempfile::TempDir; + +#[tokio::test] +async fn test_pg_trigger_basic() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_pg_trigger.db"); + let db = DbHandler::new(db_path.to_str().unwrap()).unwrap(); + + db.execute("CREATE TABLE test_table (id INTEGER PRIMARY KEY, name TEXT)") + .await + .unwrap(); + + db.execute("CREATE TRIGGER test_trigger AFTER INSERT ON test_table BEGIN SELECT 1; END") + .await + .unwrap(); + + let sql = "SELECT tgname, tgrelid, tgtype, tgenabled FROM pg_trigger"; + let ast = Parser::parse_sql(&PostgreSqlDialect {}, sql).unwrap(); + let statement = &ast[0]; + + if let sqlparser::ast::Statement::Query(query) = statement { + if let sqlparser::ast::SetExpr::Select(select) = &*query.body { + let result = PgTriggerHandler::handle_query(select, &db).await.unwrap(); + + assert_eq!(result.rows.len(), 1); + + let tgname = String::from_utf8(result.rows[0][0].as_ref().unwrap().clone()).unwrap(); + assert_eq!(tgname, "test_trigger"); + + let tgrelid_str = String::from_utf8(result.rows[0][1].as_ref().unwrap().clone()).unwrap(); + let tgrelid: u32 = tgrelid_str.parse().unwrap(); + assert!(tgrelid >= 16384); + + let tgtype_str = String::from_utf8(result.rows[0][2].as_ref().unwrap().clone()).unwrap(); + let tgtype: i16 = tgtype_str.parse().unwrap(); + assert!(tgtype > 0); + + let tgenabled = String::from_utf8(result.rows[0][3].as_ref().unwrap().clone()).unwrap(); + assert_eq!(tgenabled, "O"); + } + } +} + +#[tokio::test] +async fn test_pg_trigger_before_update() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_pg_trigger_before.db"); + let db = DbHandler::new(db_path.to_str().unwrap()).unwrap(); + + db.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, name TEXT)") + .await + .unwrap(); + + db.execute("CREATE TRIGGER audit_trigger BEFORE UPDATE ON users BEGIN SELECT 1; END") + .await + .unwrap(); + + let sql = "SELECT tgname, tgtype FROM pg_trigger WHERE tgname = 'audit_trigger'"; + let ast = Parser::parse_sql(&PostgreSqlDialect {}, sql).unwrap(); + let statement = &ast[0]; + + if let sqlparser::ast::Statement::Query(query) = statement { + if let sqlparser::ast::SetExpr::Select(select) = &*query.body { + let result = PgTriggerHandler::handle_query(select, &db).await.unwrap(); + + assert_eq!(result.rows.len(), 1); + + let tgname = String::from_utf8(result.rows[0][0].as_ref().unwrap().clone()).unwrap(); + assert_eq!(tgname, "audit_trigger"); + + let tgtype_str = String::from_utf8(result.rows[0][1].as_ref().unwrap().clone()).unwrap(); + let tgtype: i16 = tgtype_str.parse().unwrap(); + + assert_eq!(tgtype & 1, 1); + assert_eq!(tgtype & 2, 2); + assert_eq!(tgtype & 16, 16); + } + } +} + +#[tokio::test] +async fn test_pg_trigger_delete() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_pg_trigger_delete.db"); + let db = DbHandler::new(db_path.to_str().unwrap()).unwrap(); + + db.execute("CREATE TABLE products (id INTEGER PRIMARY KEY, price REAL)") + .await + .unwrap(); + + db.execute("CREATE TRIGGER cleanup_trigger AFTER DELETE ON products BEGIN SELECT 1; END") + .await + .unwrap(); + + let sql = "SELECT tgname, tgtype FROM pg_trigger WHERE tgname = 'cleanup_trigger'"; + let ast = Parser::parse_sql(&PostgreSqlDialect {}, sql).unwrap(); + let statement = &ast[0]; + + if let sqlparser::ast::Statement::Query(query) = statement { + if let sqlparser::ast::SetExpr::Select(select) = &*query.body { + let result = PgTriggerHandler::handle_query(select, &db).await.unwrap(); + + assert_eq!(result.rows.len(), 1); + + let tgtype_str = String::from_utf8(result.rows[0][1].as_ref().unwrap().clone()).unwrap(); + let tgtype: i16 = tgtype_str.parse().unwrap(); + + assert_eq!(tgtype & 1, 1); + assert_eq!(tgtype & 8, 8); + } + } +} + +#[tokio::test] +async fn test_pg_trigger_empty() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_pg_trigger_empty.db"); + let db = DbHandler::new(db_path.to_str().unwrap()).unwrap(); + + db.execute("CREATE TABLE empty_table (id INTEGER PRIMARY KEY)") + .await + .unwrap(); + + let sql = "SELECT tgname FROM pg_trigger"; + let ast = Parser::parse_sql(&PostgreSqlDialect {}, sql).unwrap(); + let statement = &ast[0]; + + if let sqlparser::ast::Statement::Query(query) = statement { + if let sqlparser::ast::SetExpr::Select(select) = &*query.body { + let result = PgTriggerHandler::handle_query(select, &db).await.unwrap(); + assert_eq!(result.rows.len(), 0); + } + } +} + +#[tokio::test] +async fn test_pg_trigger_multiple() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_pg_trigger_multiple.db"); + let db = DbHandler::new(db_path.to_str().unwrap()).unwrap(); + + db.execute("CREATE TABLE orders (id INTEGER PRIMARY KEY, total REAL)") + .await + .unwrap(); + + db.execute("CREATE TRIGGER before_insert_trigger BEFORE INSERT ON orders BEGIN SELECT 1; END") + .await + .unwrap(); + + db.execute("CREATE TRIGGER after_insert_trigger AFTER INSERT ON orders BEGIN SELECT 2; END") + .await + .unwrap(); + + let sql = "SELECT tgname FROM pg_trigger ORDER BY tgname"; + let ast = Parser::parse_sql(&PostgreSqlDialect {}, sql).unwrap(); + let statement = &ast[0]; + + if let sqlparser::ast::Statement::Query(query) = statement { + if let sqlparser::ast::SetExpr::Select(select) = &*query.body { + let result = PgTriggerHandler::handle_query(select, &db).await.unwrap(); + + assert_eq!(result.rows.len(), 2); + + let name1 = String::from_utf8(result.rows[0][0].as_ref().unwrap().clone()).unwrap(); + let name2 = String::from_utf8(result.rows[1][0].as_ref().unwrap().clone()).unwrap(); + + assert_eq!(name1, "after_insert_trigger"); + assert_eq!(name2, "before_insert_trigger"); + } + } +}