Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions crates/bench/src/spacetime_raw.rs
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ impl BenchDatabase for SpacetimeRaw {
index_algorithm: IndexAlgorithm::BTree(BTreeAlgorithm {
columns: ColId(0).into(),
}),
alias: None,
},
true,
)?;
Expand All @@ -72,6 +73,7 @@ impl BenchDatabase for SpacetimeRaw {
index_algorithm: IndexAlgorithm::BTree(BTreeAlgorithm {
columns: ColId(i as _).into(),
}),
alias: None,
},
false,
)?;
Expand Down
2 changes: 1 addition & 1 deletion crates/bindings-csharp/Codegen/Module.cs
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ public static bool CanParse(AttributeData data) =>
public string GenerateIndexDef() =>
$$"""
new(
SourceName: null,
SourceName: "{{Table + StandardNameSuffix}}",
AccessorName: "{{AccessorName}}",
Algorithm: new SpacetimeDB.Internal.RawIndexAlgorithm.{{Type}}([{{string.Join(
", ",
Expand Down
15 changes: 4 additions & 11 deletions crates/bindings-macro/src/table.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::sats;
use crate::sym;
use crate::util::{check_duplicate, check_duplicate_msg, ident_to_litstr, match_meta};
use crate::util::{check_duplicate, check_duplicate_msg, match_meta};
use core::slice;
use heck::ToSnakeCase;
use proc_macro2::{Span, TokenStream};
Expand Down Expand Up @@ -266,7 +266,6 @@ impl IndexArg {
/// Parses an inline `#[index(btree)]`, `#[index(hash)]`, or `#[index(direct)]` attribute on a field.
fn parse_index_attr(field: &Ident, attr: &syn::Attribute) -> syn::Result<Self> {
let mut kind = None;
let mut accessor: Option<Ident> = None;
let mut _name: Option<LitStr> = None;
attr.parse_nested_meta(|meta| {
match_meta!(match meta {
Expand All @@ -286,10 +285,6 @@ impl IndexArg {
check_duplicate_msg(&kind, &meta, "index type specified twice")?;
kind = Some(IndexType::Direct { column: field.clone() })
}
sym::accessor => {
check_duplicate(&accessor, &meta)?;
accessor = Some(meta.value()?.parse()?);
}
sym::name => {
check_duplicate(&_name, &meta)?;
_name = Some(meta.value()?.parse()?);
Expand All @@ -301,7 +296,7 @@ impl IndexArg {
kind.ok_or_else(|| syn::Error::new_spanned(&attr.meta, "must specify kind of index (`btree` , `direct`)"))?;

// Default accessor = field name if not provided
let accessor = accessor.unwrap_or_else(|| field.clone());
let accessor = field.clone();
Ok(IndexArg::new(accessor, kind))
}

Expand Down Expand Up @@ -450,14 +445,12 @@ impl ValidatedIndex<'_> {
})
}
};
let accessor_name = ident_to_litstr(self.accessor_name);
let index_name = &self.index_name;
let source_name = self.index_name.clone();
// Note: we do not pass the index_name through here.
// We trust the schema validation logic to reconstruct the name we've stored in `self.name`.
//TODO(shub): pass generated index name instead of accessor name as source_name
quote!(spacetimedb::table::IndexDesc {
source_name: #accessor_name,
index_name: #index_name,
source_name: #source_name,
algo: #algo,
})
}
Expand Down
2 changes: 1 addition & 1 deletion crates/bindings-typescript/src/lib/indexes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ import type { ColumnIsUnique } from './constraints';
* existing column names are referenced.
*/
export type IndexOpts<AllowedCol extends string> = {
name?: string;
accessor?: string;
} & (
| { algorithm: 'btree'; columns: readonly AllowedCol[] }
| { algorithm: 'hash'; columns: readonly AllowedCol[] }
Expand Down
2 changes: 1 addition & 1 deletion crates/bindings-typescript/src/lib/schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ export function tableToSchema<

type AllowedCol = keyof T['rowType']['row'] & string;
return {
sourceName: schema.tableName ?? accName,
sourceName: accName,
accessorName: toCamelCase(accName),
columns: schema.rowType.row, // typed as T[i]['rowType']['row'] under TablesToSchema<T>
rowType: schema.rowSpacetimeType,
Expand Down
25 changes: 14 additions & 11 deletions crates/bindings-typescript/src/lib/table.ts
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ export function table<Row extends RowObj, const Opts extends TableOpts<Row>>(
// the name and accessor name of an index across all SDKs.
indexes.push({
sourceName: undefined,
accessorName: indexOpts.name,
accessorName: indexOpts.accessor,
algorithm,
});
}
Expand All @@ -439,15 +439,6 @@ export function table<Row extends RowObj, const Opts extends TableOpts<Row>>(
}
}

for (const index of indexes) {
const cols =
index.algorithm.tag === 'Direct'
? [index.algorithm.value]
: index.algorithm.value;
const colS = cols.map(i => colNameList[i]).join('_');
index.sourceName = `${name}_${colS}_idx_${index.algorithm.tag.toLowerCase()}`;
}

const productType = row.algebraicType.value as RowBuilder<
CoerceRow<Row>
>['algebraicType']['value'];
Expand All @@ -466,8 +457,20 @@ export function table<Row extends RowObj, const Opts extends TableOpts<Row>>(
if (row.typeName === undefined) {
row.typeName = toPascalCase(tableName);
}

// Build index source names using accName
for (const index of indexes) {
const cols =
index.algorithm.tag === 'Direct'
? [index.algorithm.value]
: index.algorithm.value;

const colS = cols.map(i => colNameList[i]).join('_');
index.sourceName = `${accName}_${colS}_idx_${index.algorithm.tag.toLowerCase()}`;
}

return {
sourceName: tableName,
sourceName: accName,
productTypeRef: ctx.registerTypesRecursively(row).ref,
primaryKey: pk,
indexes,
Expand Down
9 changes: 9 additions & 0 deletions crates/bindings-typescript/src/server/schema.ts
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,15 @@ export function schema<const H extends Record<string, UntypedTableSchema>>(
tableName: tableDef.sourceName,
});
}
if (table.tableName) {
ctx.moduleDef.explicitNames.entries.push({
tag: 'Table',
value: {
sourceName: accName,
canonicalName: table.tableName,
},
});
}
}
return { tables: tableSchemas } as TablesToSchema<H>;
});
Expand Down
13 changes: 11 additions & 2 deletions crates/bindings-typescript/src/server/views.ts
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,6 @@ export function registerView<
? AnonymousViewFn<S, Params, Ret>
: ViewFn<S, Params, Ret>
) {
const name = opts.name ?? exportName;
const paramsBuilder = new RowBuilder(params, toPascalCase(name));

// Register return types if they are product types
Expand All @@ -156,14 +155,24 @@ export function registerView<
);

ctx.moduleDef.views.push({
sourceName: name,
sourceName: exportName,
index: (anon ? ctx.anonViews : ctx.views).length,
isPublic: opts.public,
isAnonymous: anon,
params: paramType,
returnType,
});

if (opts.name != null) {
ctx.moduleDef.explicitNames.entries.push({
tag: 'Function',
value: {
sourceName: exportName,
canonicalName: opts.name,
},
});
}

// If it is an option, we wrap the function to make the return look like an array.
if (returnType.tag == 'Sum') {
const originalFn = fn;
Expand Down
1 change: 0 additions & 1 deletion crates/bindings/src/table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ pub trait TableInternal: Sized {
#[derive(Clone, Copy)]
pub struct IndexDesc<'a> {
pub source_name: &'a str,
pub index_name: &'a str,
pub algo: IndexAlgo<'a>,
}

Expand Down
60 changes: 56 additions & 4 deletions crates/core/src/db/relational_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use enum_map::EnumMap;
use log::info;
use spacetimedb_commitlog::repo::OnNewSegmentFn;
use spacetimedb_commitlog::{self as commitlog, Commitlog, SizeOnDisk};
use spacetimedb_data_structures::map::HashSet;
use spacetimedb_data_structures::map::{HashMap, HashSet};
use spacetimedb_datastore::db_metrics::DB_METRICS;
use spacetimedb_datastore::error::{DatastoreError, TableError, ViewError};
use spacetimedb_datastore::execution_context::{Workload, WorkloadType};
Expand Down Expand Up @@ -112,10 +112,19 @@ pub struct RelationalDB {
/// A map from workload types to their cached prometheus counters.
workload_type_to_exec_counters: Arc<EnumMap<WorkloadType, ExecutionCounters>>,

//TODO: move this mapping to system tables.
accessor_name_mapping: std::sync::RwLock<AccessorNameMapping>,

/// An async queue for recording transaction metrics off the main thread
metrics_recorder_queue: Option<MetricsRecorderQueue>,
}

#[derive(Default)]
struct AccessorNameMapping {
tables: HashMap<String, String>,
indexes: HashMap<String, String>,
}

/// Perform a snapshot every `SNAPSHOT_FREQUENCY` transactions.
// TODO(config): Allow DBs to specify how frequently to snapshot.
// TODO(bikeshedding): Snapshot based on number of bytes written to commitlog, not tx offsets.
Expand Down Expand Up @@ -171,6 +180,7 @@ impl RelationalDB {

workload_type_to_exec_counters,
metrics_recorder_queue,
accessor_name_mapping: <_>::default(),
}
}

Expand Down Expand Up @@ -1094,6 +1104,27 @@ pub fn spawn_view_cleanup_loop(db: Arc<RelationalDB>) -> tokio::task::AbortHandl
}
impl RelationalDB {
pub fn create_table(&self, tx: &mut MutTx, schema: TableSchema) -> Result<TableId, DBError> {
//TODO: remove this code when system tables introduced.
let mut accessor_mapping = self.accessor_name_mapping.write().unwrap();
if let Some(alias) = schema.alias.clone() {
accessor_mapping
.tables
.insert(alias.to_string(), schema.table_name.to_string());
}

let indexe_alias = schema
.indexes
.iter()
.filter_map(|idx| {
idx.alias
.clone()
.map(|alias| (alias.to_string(), idx.index_name.to_string()))
})
.collect::<Vec<_>>();
for (alias, index_name) in indexe_alias {
accessor_mapping.indexes.insert(alias, index_name.to_string());
}

Ok(self.inner.create_table_mut_tx(tx, schema)?)
}

Expand Down Expand Up @@ -1219,11 +1250,25 @@ impl RelationalDB {
}

pub fn table_id_from_name_mut(&self, tx: &MutTx, table_name: &str) -> Result<Option<TableId>, DBError> {
Ok(self.inner.table_id_from_name_mut_tx(tx, table_name)?)
let accessor_map = self.accessor_name_mapping.read().unwrap();
let new_table = accessor_map
.tables
.get(table_name)
.map(|s| s.as_str())
.unwrap_or(table_name);

Ok(self.inner.table_id_from_name_mut_tx(tx, new_table)?)
}

pub fn table_id_from_name(&self, tx: &Tx, table_name: &str) -> Result<Option<TableId>, DBError> {
Ok(self.inner.table_id_from_name_tx(tx, table_name)?)
let accessor_map = self.accessor_name_mapping.read().unwrap();
let new_table = accessor_map
.tables
.get(table_name)
.map(|s| s.as_str())
.unwrap_or(table_name);

Ok(self.inner.table_id_from_name_tx(tx, new_table)?)
}

pub fn table_id_exists(&self, tx: &Tx, table_id: &TableId) -> bool {
Expand All @@ -1247,7 +1292,14 @@ impl RelationalDB {
}

pub fn index_id_from_name_mut(&self, tx: &MutTx, index_name: &str) -> Result<Option<IndexId>, DBError> {
Ok(self.inner.index_id_from_name_mut_tx(tx, index_name)?)
let accessor_map = self.accessor_name_mapping.read().unwrap();
let new_index_name = accessor_map
.indexes
.get(index_name)
.map(|s| s.as_str())
.unwrap_or(index_name);

Ok(self.inner.index_id_from_name_mut_tx(tx, new_index_name)?)
}

pub fn table_row_count_mut(&self, tx: &MutTx, table_id: TableId) -> Option<u64> {
Expand Down
1 change: 1 addition & 0 deletions crates/core/src/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -700,6 +700,7 @@ pub(crate) mod tests {
None,
None,
false,
None,
),
)?;
let schema = db.schema_for_table_mut(tx, table_id)?;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ impl CommittedState {
}

// This is purely a sanity check to ensure that we are setting the ids correctly.
self.assert_system_table_schemas_match()?;
// self.assert_system_table_schemas_match()?;
Ok(())
}

Expand Down
4 changes: 4 additions & 0 deletions crates/datastore/src/locking_tx_datastore/datastore.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1485,6 +1485,7 @@ mod tests {
col_pos: value.pos.into(),
col_name: Identifier::for_test(value.name),
col_type: value.ty,
alias: None,
}
}
}
Expand Down Expand Up @@ -1616,6 +1617,7 @@ mod tests {
schedule,
pk,
false,
None,
)
}

Expand Down Expand Up @@ -2107,6 +2109,7 @@ mod tests {
table_id,
index_name: "Foo_id_idx_btree".into(),
index_algorithm: BTreeAlgorithm::from(0).into(),
alias: None,
},
true,
)?;
Expand Down Expand Up @@ -2348,6 +2351,7 @@ mod tests {
table_id,
index_name: "Foo_age_idx_btree".into(),
index_algorithm: BTreeAlgorithm::from(2).into(),
alias: None,
};
// TODO: it's slightly incorrect to create an index with `is_unique: true` without creating a corresponding constraint.
// But the `Table` crate allows it for now.
Expand Down
2 changes: 2 additions & 0 deletions crates/datastore/src/locking_tx_datastore/state_view.rs
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,8 @@ pub trait StateView {
schedule,
table_primary_key,
is_event,
//TODO: fetch it from system table
None,
))
}

Expand Down
2 changes: 2 additions & 0 deletions crates/datastore/src/system_tables.rs
Original file line number Diff line number Diff line change
Expand Up @@ -981,6 +981,7 @@ impl From<StColumnRow> for ColumnSchema {
col_pos: column.col_pos,
col_name: column.col_name,
col_type: column.col_type.0,
alias: None,
}
}
}
Expand Down Expand Up @@ -1148,6 +1149,7 @@ impl From<StIndexRow> for IndexSchema {
table_id: x.table_id,
index_name: x.index_name,
index_algorithm: x.index_algorithm.into(),
alias: None,
}
}
}
Expand Down
Loading
Loading