Skip to content
Merged
Show file tree
Hide file tree
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

35 changes: 35 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -948,6 +948,41 @@ Working Tauri demo apps are in the [`examples/`](examples) directory:
See the [toolkit crate README](crates/sqlx-sqlite-toolkit/README.md#examples)
for setup instructions.

## Security Considerations

### Cross-Window Shared State

Database instances are shared across all webviews/windows within the same Tauri
application. A database loaded in one window is accessible from any other window
without calling `load()` again. Writes from one window are immediately visible
to reads in another, and closing a database affects all windows.

### Resource Limits

The plugin enforces several resource limits to prevent denial-of-service from
untrusted or buggy frontend code:

* **Database count**: Maximum 50 concurrently loaded databases (configurable
via `Builder::max_databases()`)
* **Interruptible transaction timeout**: Transactions that exceed the
default (5 minutes) are automatically rolled back on the next access
attempt (configurable via `Builder::transaction_timeout()`)
* **Observer channel capacity**: Capped at 10,000 (default 256)
* **Observed tables**: Maximum 100 tables per `observe()` call
* **Subscriptions**: Maximum 100 active subscriptions per database

### Unbounded Result Sets

`fetchAll()` returns the entire result set in a single response with no built-in
size limit. For large or unbounded queries, prefer `fetchPage()` with keyset
pagination to keep memory usage bounded on both the Rust and TypeScript sides.

### Path Validation

Database paths are validated to prevent directory traversal. Absolute paths,
`..` segments, and null bytes are rejected. All paths are resolved relative to
the app config directory.

## Development

This project follows
Expand Down
2 changes: 1 addition & 1 deletion crates/sqlx-sqlite-conn-mgr/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "sqlx-sqlite-conn-mgr"
# Sync major.minor with major.minor of SQLx crate
version = "0.8.6"
version = "0.8.7"
description = "Wraps SQLx for SQLite, enforcing pragmatic connection policies for mobile and desktop applications"
authors = ["Jeremy Thomerson"]
license = "MIT"
Expand Down
14 changes: 10 additions & 4 deletions crates/sqlx-sqlite-conn-mgr/src/attached.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ impl AttachedReadConnection {
/// attached databases may persist when the connection is returned to the pool.
pub async fn detach_all(mut self) -> Result<()> {
for schema_name in &self.schema_names {
let detach_sql = format!("DETACH DATABASE {}", schema_name);
let detach_sql = format!("DETACH DATABASE \"{}\"", schema_name);
sqlx::query(&detach_sql).execute(&mut *self.conn).await?;
}
Ok(())
Expand Down Expand Up @@ -140,7 +140,7 @@ impl AttachedWriteGuard {
/// attached databases may persist when the connection is returned to the pool.
pub async fn detach_all(mut self) -> Result<()> {
for schema_name in &self.schema_names {
let detach_sql = format!("DETACH DATABASE {}", schema_name);
let detach_sql = format!("DETACH DATABASE \"{}\"", schema_name);
sqlx::query(&detach_sql).execute(&mut *self.writer).await?;
}
Ok(())
Expand Down Expand Up @@ -252,7 +252,10 @@ pub async fn acquire_reader_with_attached(
// Schema name is validated above to contain only safe identifier characters
let path = spec.database.path_str();
let escaped_path = path.replace("'", "''");
let attach_sql = format!("ATTACH DATABASE '{}' AS {}", escaped_path, spec.schema_name);
let attach_sql = format!(
"ATTACH DATABASE '{}' AS \"{}\"",
escaped_path, spec.schema_name
);
sqlx::query(&attach_sql).execute(&mut *conn).await?;

schema_names.push(spec.schema_name);
Expand Down Expand Up @@ -349,7 +352,10 @@ pub async fn acquire_writer_with_attached(
for spec in specs {
let path = spec.database.path_str();
let escaped_path = path.replace("'", "''");
let attach_sql = format!("ATTACH DATABASE '{}' AS {}", escaped_path, spec.schema_name);
let attach_sql = format!(
"ATTACH DATABASE '{}' AS \"{}\"",
escaped_path, spec.schema_name
);
sqlx::query(&attach_sql).execute(&mut *writer).await?;

schema_names.push(spec.schema_name);
Expand Down
4 changes: 2 additions & 2 deletions crates/sqlx-sqlite-observer/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "sqlx-sqlite-observer"
# Sync major.minor with major.minor of SQLx crate
version = "0.8.6"
version = "0.8.7"
license = "MIT"
edition = "2024"
rust-version = "1.89"
Expand Down Expand Up @@ -29,7 +29,7 @@ regex = "1.12.3"
sqlx = { version = "0.8.6", features = ["sqlite", "runtime-tokio"], default-features = false }
# Required for preupdate_hook - SQLite must be compiled with SQLITE_ENABLE_PREUPDATE_HOOK
libsqlite3-sys = { version = "0.30.1", features = ["preupdate_hook"] }
sqlx-sqlite-conn-mgr = { path = "../sqlx-sqlite-conn-mgr", version = "0.8.6", optional = true }
sqlx-sqlite-conn-mgr = { path = "../sqlx-sqlite-conn-mgr", version = "0.8.7", optional = true }

[dev-dependencies]
tokio = { version = "1.49.0", features = ["full", "macros"] }
Expand Down
9 changes: 9 additions & 0 deletions crates/sqlx-sqlite-observer/src/broker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,16 @@ pub struct ObservationBroker {

impl ObservationBroker {
/// Creates a new broker with the specified broadcast channel capacity.
///
/// # Panics
///
/// Panics if `channel_capacity` is 0.
pub fn new(channel_capacity: usize, capture_values: bool) -> Arc<Self> {
// broadcast::channel panics on zero capacity. Assert here to surface a clear
// message rather than an internal tokio panic. Changing the return type to
// Result would ripple through every call site for a case that the plugin layer
// already validates before reaching this point.
assert!(channel_capacity > 0, "channel_capacity must be at least 1");
Comment thread
pmorris-dev marked this conversation as resolved.
let (change_tx, _) = broadcast::channel(channel_capacity);
Arc::new(Self {
buffer: Mutex::new(Vec::new()),
Expand Down
3 changes: 3 additions & 0 deletions crates/sqlx-sqlite-observer/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,9 @@ impl ObserverConfig {

/// Sets the broadcast channel capacity for change notifications.
///
/// Capacity must be at least 1. A capacity of 0 will cause a panic when the
/// observer is initialized.
///
/// See [`channel_capacity`](Self::channel_capacity) for details on sizing.
pub fn with_channel_capacity(mut self, capacity: usize) -> Self {
self.channel_capacity = capacity;
Expand Down
28 changes: 10 additions & 18 deletions crates/sqlx-sqlite-observer/src/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,11 @@ pub async fn query_table_info(
// Check if table exists and get WITHOUT ROWID status
let without_rowid = is_without_rowid(conn, table_name).await?;

// Get primary key columns using PRAGMA table_info
// Get primary key columns using pragma_table_info()
let pk_columns = query_pk_columns(conn, table_name).await?;

// Determine if table exists:
// - If pk_columns is None, PRAGMA table_info returned no rows (table doesn't exist)
// - If pk_columns is None, pragma_table_info returned no rows (table doesn't exist)
// - If without_rowid is true, the table must exist (we found it in sqlite_master)
// - A table with no explicit PK returns Some([]), not None
if pk_columns.is_none() && !without_rowid {
Expand Down Expand Up @@ -78,15 +78,20 @@ fn has_without_rowid_clause(create_sql: &str) -> bool {
/// Returns column indices in the order they appear in the PRIMARY KEY definition.
/// For composite primary keys, the `pk` column in PRAGMA table_info indicates
/// the position (1-indexed) within the PK.
///
/// Uses the `pragma_table_info()` table-valued function (available since SQLite
/// 3.16.0) so the table name can be bound as a parameter instead of interpolated
/// into the SQL string.
async fn query_pk_columns(
conn: &mut SqliteConnection,
table_name: &str,
) -> crate::Result<Option<Vec<usize>>> {
// PRAGMA table_info returns: cid, name, type, notnull, dflt_value, pk
// pragma_table_info returns: cid, name, type, notnull, dflt_value, pk
// pk is 0 for non-PK columns, or 1-indexed position for PK columns
let pragma = format!("PRAGMA table_info({})", quote_identifier(table_name));
let sql = "SELECT cid, name, type, \"notnull\", dflt_value, pk FROM pragma_table_info(?1)";

let rows = sqlx::query(&pragma)
let rows = sqlx::query(sql)
.bind(table_name)
.fetch_all(&mut *conn)
.await
.map_err(crate::Error::Sqlx)?;
Expand Down Expand Up @@ -116,23 +121,10 @@ async fn query_pk_columns(
Ok(Some(pk_columns.into_iter().map(|(cid, _)| cid).collect()))
}

/// Quotes a SQLite identifier to prevent SQL injection.
fn quote_identifier(name: &str) -> String {
// Double any existing double quotes and wrap in double quotes
format!("\"{}\"", name.replace('"', "\"\""))
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_quote_identifier() {
assert_eq!(quote_identifier("users"), "\"users\"");
assert_eq!(quote_identifier("my table"), "\"my table\"");
assert_eq!(quote_identifier("foo\"bar"), "\"foo\"\"bar\"");
}

#[test]
fn test_has_without_rowid_clause() {
// Positive cases
Expand Down
2 changes: 1 addition & 1 deletion crates/sqlx-sqlite-toolkit/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
[package]
name = "sqlx-sqlite-toolkit"
# Sync major.minor with major.minor of SQLx crate
version = "0.8.6"
version = "0.8.7"
license = "MIT"
edition = "2024"
rust-version = "1.89"
Expand Down
12 changes: 12 additions & 0 deletions crates/sqlx-sqlite-toolkit/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ pub enum Error {
#[error("invalid transaction token")]
InvalidTransactionToken,

/// Transaction timed out (exceeded the configured timeout).
#[error("transaction timed out for database: {0}")]
TransactionTimedOut(String),

/// Error from the observer (change notifications).
#[cfg(feature = "observer")]
#[error(transparent)]
Expand Down Expand Up @@ -115,6 +119,7 @@ impl Error {
Error::TransactionAlreadyActive(_) => "TRANSACTION_ALREADY_ACTIVE".to_string(),
Error::NoActiveTransaction(_) => "NO_ACTIVE_TRANSACTION".to_string(),
Error::InvalidTransactionToken => "INVALID_TRANSACTION_TOKEN".to_string(),
Error::TransactionTimedOut(_) => "TRANSACTION_TIMED_OUT".to_string(),
#[cfg(feature = "observer")]
Error::Observer(_) => "OBSERVER_ERROR".to_string(),
Error::Io(_) => "IO_ERROR".to_string(),
Expand Down Expand Up @@ -194,6 +199,13 @@ mod tests {
assert_eq!(err.error_code(), "IO_ERROR");
}

#[test]
fn test_error_code_transaction_timed_out() {
let err = Error::TransactionTimedOut("test.db".into());
assert_eq!(err.error_code(), "TRANSACTION_TIMED_OUT");
assert!(err.to_string().contains("test.db"));
}

#[test]
fn test_error_code_other() {
let err = Error::Other("something went wrong".into());
Expand Down
Loading