diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 00000000..303e71a4 Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml index 3d1908fb..b9c501b8 100644 --- a/.github/workflows/code-quality.yml +++ b/.github/workflows/code-quality.yml @@ -56,7 +56,7 @@ jobs: strategy: fail-fast: false matrix: - crate: [canyon_connection, canyon_crud, canyon_macros, canyon_migrations] + crate: [canyon_core, canyon_crud, canyon_macros, canyon_entities, canyon_migrations] steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 91520071..0e0d093d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,7 +2,7 @@ name: Generate Canyon-SQL release on: push: - tags: + tags: - 'v[0-9]+.[0-9]+.[0-9]+' - 'v[0-9]+.[0-9]+.[0-9]+rc[0-9]+' diff --git a/.gitignore b/.gitignore index 056b3728..a751ab5c 100755 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,5 @@ Cargo.lock /tester_canyon_sql/ canyon_tester/ macro_utils.rs -.vscode/ postgres-data/ mysql-data/ \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..1a510cc9 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "rust-analyzer.cargo.features": ["postgres, mssql, mysql, migrations"], + "rust-analyzer.check.workspace": true, + "rust-analyzer.cargo.buildScripts.enable": true, + "rust-analyzer.procMacro.enable": true, + "rust-analyzer.diagnostics.disabled": ["unresolved-proc-macro"], + "rust-analyzer.linkedProjects": [ + "./Cargo.toml" + ] + } + diff --git a/Cargo.toml b/Cargo.toml index d9bfd724..4499391d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,17 +11,17 @@ description.workspace = true [workspace] members = [ - "canyon_connection", + "canyon_core", "canyon_crud", "canyon_entities", "canyon_migrations", "canyon_macros", - "tests" + "tests", ] [dependencies] # Project crates -canyon_connection = { workspace = true } +canyon_core = { workspace = true } canyon_crud = { workspace = true } canyon_entities = { workspace = true } canyon_migrations = { workspace = true, optional = true } @@ -35,8 +35,8 @@ mysql_common = { workspace = true, optional = true } [workspace.dependencies] +canyon_core = { version = "0.5.1", path = "canyon_core" } canyon_crud = { version = "0.5.1", path = "canyon_crud" } -canyon_connection = { version = "0.5.1", path = "canyon_connection" } canyon_entities = { version = "0.5.1", path = "canyon_entities" } canyon_migrations = { version = "0.5.1", path = "canyon_migrations"} canyon_macros = { version = "0.5.1", path = "canyon_macros" } @@ -44,19 +44,16 @@ canyon_macros = { version = "0.5.1", path = "canyon_macros" } tokio = { version = "1.27.0", features = ["full"] } tokio-util = { version = "0.7.4", features = ["compat"] } tokio-postgres = { version = "0.7.2", features = ["with-chrono-0_4"] } -tiberius = { version = "0.12.1", features = ["tds73", "chrono", "integrated-auth-gssapi"] } -mysql_async = { version = "0.32.2" } -mysql_common = { version = "0.30.6", features = [ "chrono" ]} +tiberius = { version = "0.12.3", features = ["tds73", "chrono", "integrated-auth-gssapi"] } +mysql_async = { version = "0.36.1" } +mysql_common = { version = "0.35.4", features = [ "chrono" ]} chrono = { version = "0.4", features = ["serde"] } # Just from TP better? serde = { version = "1.0.138", features = ["derive"] } futures = "0.3.25" -indexmap = "1.9.1" async-std = "1.12.0" -lazy_static = "1.4.0" toml = "0.7.3" -async-trait = "0.1.68" walkdir = "2.3.3" regex = "1.9.3" partialdebug = "0.2.0" @@ -66,8 +63,8 @@ proc-macro2 = "1.0.27" [workspace.package] version = "0.5.1" -edition = "2021" -authors = ["Alex Vergara, Gonzalo Busto Musi"] +edition = "2024" +authors = ["Alex Vergara, Gonzalo Busto Musi"] documentation = "https://zerodaycode.github.io/canyon-book/" homepage = "https://github.com/zerodaycode/Canyon-SQL" readme = "README.md" @@ -75,7 +72,7 @@ license = "MIT" description = "A Rust ORM and QueryBuilder" [features] -postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres", "canyon_migrations/postgres", "canyon_macros/postgres"] -mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql", "canyon_migrations/mssql", "canyon_macros/mssql"] -mysql = ["mysql_async", "mysql_common", "canyon_connection/mysql", "canyon_crud/mysql", "canyon_migrations/mysql", "canyon_macros/mysql"] +postgres = ["tokio-postgres", "canyon_core/postgres", "canyon_crud/postgres", "canyon_migrations/postgres", "canyon_macros/postgres"] +mssql = ["tiberius", "canyon_core/mssql", "canyon_crud/mssql", "canyon_migrations/mssql", "canyon_macros/mssql"] +mysql = ["mysql_async", "mysql_common", "canyon_core/mysql", "canyon_crud/mysql", "canyon_migrations/mysql", "canyon_macros/mysql"] migrations = ["canyon_migrations", "canyon_macros/migrations"] diff --git a/README.md b/README.md index 96e3b90b..09fae0b8 100755 --- a/README.md +++ b/README.md @@ -84,7 +84,7 @@ assert_eq!( ); ``` -Note the leading reference on the `find_by_pk(...)` parameter. This associated function receives an `&dyn QueryParameter<'_>` as argument, not a value. +Note the leading reference on the `find_by_pk(...)` parameter. This associated function receives an `&dyn QueryParameter` as argument, not a value. ### :wrench: Building more complex queries diff --git a/bash_aliases.sh b/bash_aliases.sh index 64b40415..3c3aeed9 100755 --- a/bash_aliases.sh +++ b/bash_aliases.sh @@ -14,6 +14,10 @@ alias DockerDown='docker-compose -f ./docker/docker-compose.yml down' # Cleans the generated cache folder for the postgres in the docker alias CleanPostgres='rm -rf ./docker/postgres-data' +# Code Quality +alias Clippy='cargo clippy --all-targets --all-features --workspace -- -D warnings' +alias Fmt='cargo fmt --all -- --check' + # Build the project for Windows targets alias BuildCanyonWin='cargo build --all-features --target=x86_64-pc-windows-msvc' alias BuildCanyonWinFull='cargo clean && cargo build --all-features --target=x86_64-pc-windows-msvc' @@ -37,10 +41,11 @@ alias IntegrationTestsLinux='cargo test --all-features --no-fail-fast -p tests - alias ITIncludeIgnoredLinux='cargo test --all-features --no-fail-fast -p tests --target=x86_64-unknown-linux-gnu -- --show-output --test-threads=1 --nocapture --test-threads=1 --include-ignored' alias SqlServerInitializationLinux='cargo test initialize_sql_server_docker_instance -p tests --all-features --no-fail-fast --target=x86_64-unknown-linux-gnu -- --show-output --test-threads=1 --nocapture --include-ignored' - +# ----- # Publish Canyon-SQL to the registry with its dependencies alias PublishCanyon='cargo publish -p canyon_connection && cargo publish -p canyon_crud && cargo publish -p canyon_migrations && cargo publish -p canyon_macros && cargo publish -p canyon_sql_root' +# ----- # Collects the code coverage for the project (tests must run before this) alias CcEnvVars='export CARGO_INCREMENTAL=0 export RUSTFLAGS="-Zprofile -Ccodegen-units=1 -Copt-level=0 -Clink-dead-code -Coverflow-checks=off -Zpanic_abort_tests -Cpanic=abort" diff --git a/canyon_connection/src/canyon_database_connector.rs b/canyon_connection/src/canyon_database_connector.rs deleted file mode 100644 index 11530a7d..00000000 --- a/canyon_connection/src/canyon_database_connector.rs +++ /dev/null @@ -1,322 +0,0 @@ -use serde::Deserialize; - -#[cfg(feature = "mssql")] -use async_std::net::TcpStream; -#[cfg(feature = "mysql")] -use mysql_async::Pool; -#[cfg(feature = "mssql")] -use tiberius::{AuthMethod, Config}; -#[cfg(feature = "postgres")] -use tokio_postgres::{Client, NoTls}; - -use crate::datasources::{Auth, DatasourceConfig}; - -/// Represents the current supported databases by Canyon -#[derive(Deserialize, Debug, Eq, PartialEq, Clone, Copy)] -pub enum DatabaseType { - #[serde(alias = "postgres", alias = "postgresql")] - #[cfg(feature = "postgres")] - PostgreSql, - #[serde(alias = "sqlserver", alias = "mssql")] - #[cfg(feature = "mssql")] - SqlServer, - #[serde(alias = "mysql")] - #[cfg(feature = "mysql")] - MySQL, -} - -impl From<&Auth> for DatabaseType { - fn from(value: &Auth) -> Self { - match value { - #[cfg(feature = "postgres")] - crate::datasources::Auth::Postgres(_) => DatabaseType::PostgreSql, - #[cfg(feature = "mssql")] - crate::datasources::Auth::SqlServer(_) => DatabaseType::SqlServer, - #[cfg(feature = "mysql")] - crate::datasources::Auth::MySQL(_) => DatabaseType::MySQL, - } - } -} - -/// A connection with a `PostgreSQL` database -#[cfg(feature = "postgres")] -pub struct PostgreSqlConnection { - pub client: Client, - // pub connection: Connection, // TODO Hold it, or not to hold it... that's the question! -} - -/// A connection with a `SqlServer` database -#[cfg(feature = "mssql")] -pub struct SqlServerConnection { - pub client: &'static mut tiberius::Client, -} - -/// A connection with a `Mysql` database -#[cfg(feature = "mysql")] -pub struct MysqlConnection { - pub client: Pool, -} - -/// The Canyon database connection handler. When the client's program -/// starts, Canyon gets the information about the desired datasources, -/// process them and generates a pool of 1 to 1 database connection for -/// every datasource defined. -pub enum DatabaseConnection { - #[cfg(feature = "postgres")] - Postgres(PostgreSqlConnection), - #[cfg(feature = "mssql")] - SqlServer(SqlServerConnection), - #[cfg(feature = "mysql")] - MySQL(MysqlConnection), -} - -unsafe impl Send for DatabaseConnection {} -unsafe impl Sync for DatabaseConnection {} - -impl DatabaseConnection { - pub async fn new( - datasource: &DatasourceConfig, - ) -> Result> { - match datasource.get_db_type() { - #[cfg(feature = "postgres")] - DatabaseType::PostgreSql => { - let (username, password) = match &datasource.auth { - crate::datasources::Auth::Postgres(postgres_auth) => match postgres_auth { - crate::datasources::PostgresAuth::Basic { username, password } => { - (username.as_str(), password.as_str()) - } - }, - #[cfg(feature = "mssql")] - crate::datasources::Auth::SqlServer(_) => { - panic!("Found SqlServer auth configuration for a PostgreSQL datasource") - } - #[cfg(feature = "mysql")] - crate::datasources::Auth::MySQL(_) => { - panic!("Found MySql auth configuration for a PostgreSQL datasource") - } - }; - let (new_client, new_connection) = tokio_postgres::connect( - &format!( - "postgres://{user}:{pswd}@{host}:{port}/{db}", - user = username, - pswd = password, - host = datasource.properties.host, - port = datasource.properties.port.unwrap_or_default(), - db = datasource.properties.db_name - )[..], - NoTls, - ) - .await?; - - tokio::spawn(async move { - if let Err(e) = new_connection.await { - eprintln!("An error occurred while trying to connect to the PostgreSQL database: {e}"); - } - }); - - Ok(DatabaseConnection::Postgres(PostgreSqlConnection { - client: new_client, - // connection: new_connection, - })) - } - #[cfg(feature = "mssql")] - DatabaseType::SqlServer => { - let mut config = Config::new(); - - config.host(&datasource.properties.host); - config.port(datasource.properties.port.unwrap_or_default()); - config.database(&datasource.properties.db_name); - - // Using SQL Server authentication. - config.authentication(match &datasource.auth { - #[cfg(feature = "postgres")] - crate::datasources::Auth::Postgres(_) => { - panic!("Found PostgreSQL auth configuration for a SqlServer database") - } - crate::datasources::Auth::SqlServer(sql_server_auth) => match sql_server_auth { - crate::datasources::SqlServerAuth::Basic { username, password } => { - AuthMethod::sql_server(username, password) - } - crate::datasources::SqlServerAuth::Integrated => AuthMethod::Integrated, - }, - #[cfg(feature = "mysql")] - crate::datasources::Auth::MySQL(_) => { - panic!("Found PostgreSQL auth configuration for a SqlServer database") - } - }); - - // on production, it is not a good idea to do this. We should upgrade - // Canyon in future versions to allow the user take care about this - // configuration - config.trust_cert(); - - // Taking the address from the configuration, using async-std's - // TcpStream to connect to the server. - let tcp = TcpStream::connect(config.get_addr()) - .await - .expect("Error instantiating the SqlServer TCP Stream"); - - // We'll disable the Nagle algorithm. Buffering is handled - // internally with a `Sink`. - tcp.set_nodelay(true) - .expect("Error in the SqlServer `nodelay` config"); - - // Handling TLS, login and other details related to the SQL Server. - let client = tiberius::Client::connect(config, tcp).await; - - Ok(DatabaseConnection::SqlServer(SqlServerConnection { - client: Box::leak(Box::new( - client.expect("A failure happened connecting to the database"), - )), - })) - } - #[cfg(feature = "mysql")] - DatabaseType::MySQL => { - let (user, password) = match &datasource.auth { - #[cfg(feature = "mssql")] - crate::datasources::Auth::SqlServer(_) => { - panic!("Found SqlServer auth configuration for a PostgreSQL datasource") - } - #[cfg(feature = "postgres")] - crate::datasources::Auth::Postgres(_) => { - panic!("Found MySql auth configuration for a PostgreSQL datasource") - } - #[cfg(feature = "mysql")] - crate::datasources::Auth::MySQL(mysql_auth) => match mysql_auth { - crate::datasources::MySQLAuth::Basic { username, password } => { - (username, password) - } - }, - }; - - //TODO add options to optionals params in url - - let url = format!( - "mysql://{}:{}@{}:{}/{}", - user, - password, - datasource.properties.host, - datasource.properties.port.unwrap_or_default(), - datasource.properties.db_name - ); - let mysql_connection = Pool::from_url(url)?; - - Ok(DatabaseConnection::MySQL(MysqlConnection { - client: { mysql_connection }, - })) - } - } - } - - #[cfg(feature = "postgres")] - pub fn postgres_connection(&self) -> &PostgreSqlConnection { - match self { - DatabaseConnection::Postgres(conn) => conn, - #[cfg(all(feature = "postgres", feature = "mssql", feature = "mysql"))] - _ => panic!(), - } - } - - #[cfg(feature = "mssql")] - pub fn sqlserver_connection(&mut self) -> &mut SqlServerConnection { - match self { - DatabaseConnection::SqlServer(conn) => conn, - #[cfg(all(feature = "postgres", feature = "mssql", feature = "mysql"))] - _ => panic!(), - } - } - - #[cfg(feature = "mysql")] - pub fn mysql_connection(&self) -> &MysqlConnection { - match self { - DatabaseConnection::MySQL(conn) => conn, - #[cfg(all(feature = "postgres", feature = "mssql", feature = "mysql"))] - _ => panic!(), - } - } -} - -#[cfg(test)] -mod database_connection_handler { - use super::*; - use crate::CanyonSqlConfig; - - /// Tests the behaviour of the `DatabaseType::from_datasource(...)` - #[test] - fn check_from_datasource() { - #[cfg(all(feature = "postgres", feature = "mssql", feature = "mysql"))] - { - const CONFIG_FILE_MOCK_ALT_ALL: &str = r#" - [canyon_sql] - datasources = [ - {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, - {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, - {name = 'MysqlDS', auth = { mysql = { basic = { username = "root", password = "root" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } - ] - "#; - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_ALL) - .expect("A failure happened retrieving the [canyon_sql] section"); - assert_eq!( - config.canyon_sql.datasources[0].get_db_type(), - DatabaseType::PostgreSql - ); - assert_eq!( - config.canyon_sql.datasources[1].get_db_type(), - DatabaseType::SqlServer - ); - assert_eq!( - config.canyon_sql.datasources[2].get_db_type(), - DatabaseType::MySQL - ); - } - - #[cfg(feature = "postgres")] - { - const CONFIG_FILE_MOCK_ALT_PG: &str = r#" - [canyon_sql] - datasources = [ - {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, - ] - "#; - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_PG) - .expect("A failure happened retrieving the [canyon_sql] section"); - assert_eq!( - config.canyon_sql.datasources[0].get_db_type(), - DatabaseType::PostgreSql - ); - } - - #[cfg(feature = "mssql")] - { - const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" - [canyon_sql] - datasources = [ - {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } - ] - "#; - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MSSQL) - .expect("A failure happened retrieving the [canyon_sql] section"); - assert_eq!( - config.canyon_sql.datasources[0].get_db_type(), - DatabaseType::SqlServer - ); - } - - #[cfg(feature = "mysql")] - { - const CONFIG_FILE_MOCK_ALT_MYSQL: &str = r#" - [canyon_sql] - datasources = [ - {name = 'MysqlDS', auth = { mysql = { basic = { username = "root", password = "root" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } - ] - "#; - - let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MYSQL) - .expect("A failure happened retrieving the [canyon_sql] section"); - assert_eq!( - config.canyon_sql.datasources[0].get_db_type(), - DatabaseType::MySQL - ); - } - } -} diff --git a/canyon_connection/src/lib.rs b/canyon_connection/src/lib.rs deleted file mode 100644 index 5bd7a232..00000000 --- a/canyon_connection/src/lib.rs +++ /dev/null @@ -1,123 +0,0 @@ -#[cfg(feature = "mssql")] -pub extern crate async_std; -pub extern crate futures; -pub extern crate lazy_static; -#[cfg(feature = "mysql")] -pub extern crate mysql_async; -#[cfg(feature = "mssql")] -pub extern crate tiberius; -pub extern crate tokio; -#[cfg(feature = "postgres")] -pub extern crate tokio_postgres; -pub extern crate tokio_util; - -pub mod canyon_database_connector; -pub mod datasources; - -use std::fs; -use std::path::PathBuf; - -use crate::datasources::{CanyonSqlConfig, DatasourceConfig}; -use canyon_database_connector::DatabaseConnection; -use indexmap::IndexMap; -use lazy_static::lazy_static; -use tokio::sync::{Mutex, MutexGuard}; -use walkdir::WalkDir; - -lazy_static! { - pub static ref CANYON_TOKIO_RUNTIME: tokio::runtime::Runtime = - tokio::runtime::Runtime::new() // TODO Make the config with the builder - .expect("Failed initializing the Canyon-SQL Tokio Runtime"); - - static ref RAW_CONFIG_FILE: String = fs::read_to_string(find_canyon_config_file()) - .expect("Error opening or reading the Canyon configuration file"); - static ref CONFIG_FILE: CanyonSqlConfig = toml::from_str(RAW_CONFIG_FILE.as_str()) - .expect("Error generating the configuration for Canyon-SQL"); - - pub static ref DATASOURCES: Vec = - CONFIG_FILE.canyon_sql.datasources.clone(); - - pub static ref CACHED_DATABASE_CONN: Mutex> = - Mutex::new(IndexMap::new()); -} - -fn find_canyon_config_file() -> PathBuf { - for e in WalkDir::new(".") - .max_depth(2) - .into_iter() - .filter_map(|e| e.ok()) - { - let filename = e.file_name().to_str().unwrap(); - if e.metadata().unwrap().is_file() - && filename.starts_with("canyon") - && filename.ends_with(".toml") - { - return e.path().to_path_buf(); - } - } - - panic!() -} - -/// Convenient free function to initialize a kind of connection pool based on the datasources present defined -/// in the configuration file. -/// -/// This avoids Canyon to create a new connection to the database on every query, potentially avoiding bottlenecks -/// coming from the instantiation of that new conn every time. -/// -/// Note: We noticed with the integration tests that the [`tokio_postgres`] crate (PostgreSQL) is able to work in an async environment -/// with a new connection per query without no problem, but the [`tiberius`] crate (MSSQL) suffers a lot when it has continuous -/// statements with multiple queries, like and insert followed by a find by id to check if the insert query has done its -/// job done. -pub async fn init_connections_cache() { - for datasource in DATASOURCES.iter() { - CACHED_DATABASE_CONN.lock().await.insert( - &datasource.name, - DatabaseConnection::new(datasource) - .await - .unwrap_or_else(|_| { - panic!( - "Error pooling a new connection for the datasource: {:?}", - datasource.name - ) - }), - ); - } -} - -pub fn get_database_connection<'a>( - datasource_name: &str, - guarded_cache: &'a mut MutexGuard>, -) -> &'a mut DatabaseConnection { - if datasource_name.is_empty() { - guarded_cache - .get_mut( - DATASOURCES - .first() - .expect("We didn't found any valid datasource configuration. Check your `canyon.toml` file") - .name - .as_str() - ).unwrap_or_else(|| panic!("No default datasource found. Check your `canyon.toml` file")) - } else { - guarded_cache.get_mut(datasource_name) - .unwrap_or_else(|| - panic!("Canyon couldn't find a datasource in the pool with the argument provided: {datasource_name}") - ) - } -} - -pub fn get_database_config<'a>( - datasource_name: &str, - datasources_config: &'a [DatasourceConfig], -) -> &'a DatasourceConfig { - if datasource_name.is_empty() { - datasources_config - .first() - .unwrap_or_else(|| panic!("Not exist datasource")) - } else { - datasources_config - .iter() - .find(|dc| dc.name == datasource_name) - .unwrap_or_else(|| panic!("Not found datasource expected {datasource_name}")) - } -} diff --git a/canyon_connection/Cargo.toml b/canyon_core/Cargo.toml similarity index 74% rename from canyon_connection/Cargo.toml rename to canyon_core/Cargo.toml index fac88ef5..65063bb3 100644 --- a/canyon_connection/Cargo.toml +++ b/canyon_core/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "canyon_connection" +name = "canyon_core" version.workspace = true edition.workspace = true authors.workspace = true @@ -10,27 +10,28 @@ license.workspace = true description.workspace = true [dependencies] -tokio = { workspace = true } -tokio-util = { workspace = true } - tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } mysql_async = { workspace = true, optional = true } mysql_common = { workspace = true, optional = true } +chrono = { workspace = true } +async-std = { workspace = true, optional = true } +regex = { workspace = true } + +tokio = { workspace = true, features = ["sync"] } +tokio-util = { workspace = true } futures = { workspace = true } -indexmap = { workspace = true } -lazy_static = { workspace = true } toml = { workspace = true } serde = { workspace = true } -async-std = { workspace = true, optional = true } walkdir = { workspace = true } - +cfg-if = "1.0.0" +bb8-postgres = "0.9.0" +bb8-tiberius = "0.16.0" +bb8 = "0.9.0" [features] postgres = ["tokio-postgres"] mssql = ["tiberius", "async-std"] -mysql = ["mysql_async","mysql_common"] - - +mysql = ["mysql_async", "mysql_common"] \ No newline at end of file diff --git a/canyon_core/src/canyon.rs b/canyon_core/src/canyon.rs new file mode 100644 index 00000000..16707680 --- /dev/null +++ b/canyon_core/src/canyon.rs @@ -0,0 +1,252 @@ +use crate::connection::conn_errors::DatasourceNotFound; +use crate::connection::database_type::DatabaseType; +use crate::connection::datasources::{CanyonSqlConfig, DatasourceConfig, Datasources}; +use crate::connection::{CANYON_INSTANCE, db_connector, get_canyon_tokio_runtime}; +use db_connector::DatabaseConnector; +use std::collections::HashMap; +use std::{error::Error, fs}; + +/// The `Canyon` struct provides the main entry point for interacting with the Canyon-SQL context. +/// +/// This struct is responsible for managing database connections, configuration, and datasources. +/// It acts as a singleton, ensuring that only one instance of the Canyon context exists throughout +/// the application lifecycle. The `Canyon` struct provides methods for initializing the context, +/// accessing datasources, and retrieving database connections. +/// +/// # Features +/// - Singleton access to the Canyon context. +/// - Automatic discovery and loading of configuration files. +/// - Management of multiple database connections. +/// - Support for retrieving connections by name or default. +/// +/// # Examples +/// ```ignore +/// #[tokio::main] +/// async fn main() -> Result<(), Box> { +/// // Initialize the Canyon context +/// let canyon = Canyon::init().await?; +/// +/// // Access datasources +/// let datasources = canyon.datasources(); +/// for ds in datasources { +/// println!("Datasource: {}", ds.name); +/// } +/// +/// // Retrieve a connection by name +/// let connection = canyon.get_connection("MyDatasource").await?; +/// // Use the connection... +/// +/// Ok(()) +/// } +/// ``` +/// +/// # Methods +/// - `init`: Initializes the Canyon context by loading configuration and setting up connections. +/// - `instance`: Provides singleton access to the Canyon context. +/// - `datasources`: Returns a list of configured datasources. +/// - `find_datasource_by_name_or_default`: Finds a datasource by name or returns the default. +/// - `get_connection`: Retrieves a read-only connection from the cache. +/// - `get_mut_connection`: Retrieves a mutable connection from the cache. +pub struct Canyon { + config: Datasources, + connections: HashMap<&'static str, DatabaseConnector>, + default_connection: Option, + default_db_type: Option, +} + +impl Canyon { + /// Returns the global singleton instance of `Canyon`. + /// + /// This function allows access to the singleton instance of the Canyon engine + /// after it has been initialized through [`Canyon::init`]. It returns a shared, + /// read-only reference to the internal `Canyon` state. + /// + /// # Errors + /// + /// Returns an error if the `Canyon` instance has not yet been initialized. + /// In that case, the user must call [`Canyon::init`] before accessing the singleton. + pub fn instance() -> Result<&'static Self, Box> { + Ok(CANYON_INSTANCE.get().ok_or_else(|| { + // TODO: just call Canyon::init()? Why should we raise this error? + // I guess that there's no point in making it fail for the user to manually start Canyon when we can handle everything + // internally + Box::new(std::io::Error::other( + "Canyon not initialized. Call `Canyon::init()` first.", + )) + })?) + } + + /// Initializes the global `Canyon` instance from a configuration file. + /// + /// Loads the `Datasources` configuration from the expected `canyon.toml` file (or another + /// discoverable location), establishes one or more database connections, and sets up the default + /// connection and database type. + /// + /// This function is idempotent: calling it multiple times will reuse the already-initialized instance. + /// + /// # Errors + /// + /// - If the configuration file is missing or malformed. + /// - If deserialization into `CanyonSqlConfig` fails. + /// - If any configured datasource fails to initialize. + /// + /// # Example + /// + /// ```ignore + /// #[tokio::main] + /// async fn main() -> Result<(), Box> { + /// let canyon = Canyon::init().await?; + /// Ok(()) + /// } + /// ``` + pub async fn init() -> Result<&'static Self, Box> { + if CANYON_INSTANCE.get().is_some() { + return Canyon::instance(); // Already initialized, no need to do it again + } + + let path = __impl::find_config_path()?; + let config_content = fs::read_to_string(&path)?; + let config: Datasources = toml::from_str::(&config_content)?.canyon_sql; + + let mut connections: HashMap<&str, DatabaseConnector> = HashMap::new(); + let mut default_connection: Option = None; + let mut default_db_type: Option = None; + + for ds in config.datasources.iter() { + __impl::process_new_conn_by_datasource( + ds, + &mut connections, + &mut default_connection, + &mut default_db_type, + ) + .await?; + } + + let canyon = Canyon { + config, + connections, + default_connection, + default_db_type, + }; + + get_canyon_tokio_runtime(); // Just ensuring that is initialized in manual-mode + Ok(CANYON_INSTANCE.get_or_init(|| canyon)) + } + + /// Returns an immutable slice containing all configured datasources. + /// + /// This slice represents the datasources defined in your `canyon.toml` configuration. + /// + /// # Example + /// + /// ``` + /// use canyon_core::canyon::Canyon; + /// for ds in Canyon::instance()?.datasources() { + /// println!("Datasource name: {}", ds.name); + /// } + /// ``` + #[inline(always)] + pub fn datasources(&self) -> &[DatasourceConfig] { + &self.config.datasources + } + + // Retrieve a datasource by name or returns the first one declared in the configuration file + // or added by the user via the builder interface as the default one (if exists at least one) + pub fn find_datasource_by_name_or_default( + &self, + name: &str, + ) -> Result<&DatasourceConfig, DatasourceNotFound> { + if name.is_empty() { + self.datasources() + .first() + .ok_or_else(|| DatasourceNotFound::from(None)) + } else { + self.datasources() + .iter() + .find(|ds| ds.name == name) + .ok_or_else(|| DatasourceNotFound::from(Some(name))) + } + } + + pub fn get_default_db_type(&self) -> Result { + self.default_db_type + .ok_or_else(|| DatasourceNotFound::from(None)) + } + + // Retrieves a connector to the configured connection as the default connection by the user + // (the first defined in the configuration file) + pub fn get_default_connection(&self) -> Result<&DatabaseConnector, DatasourceNotFound> { + self.default_connection + .as_ref() + .ok_or_else(|| DatasourceNotFound::from(None)) + } + + // Retrieve a read-only connection from the cache + pub fn get_connection(&self, name: &str) -> Result<&DatabaseConnector, DatasourceNotFound> { + if name.is_empty() { + return self.get_default_connection(); + } + + let conn = self + .connections + .get(name) + .ok_or_else(|| DatasourceNotFound::from(Some(name)))?; + + Ok(conn) + } +} + +mod __impl { + use crate::connection::database_type::DatabaseType; + use crate::connection::datasources::DatasourceConfig; + use crate::connection::db_connector::DatabaseConnector; + use std::collections::HashMap; + use std::error::Error; + use std::path::PathBuf; + use walkdir::WalkDir; + + // Internal helper to locate the config file + pub(crate) fn find_config_path() -> Result { + WalkDir::new(".") + .max_depth(2) + .into_iter() + .filter_map(Result::ok) + .find_map(|e| { + let filename = e.file_name().to_string_lossy().to_lowercase(); + if e.metadata().ok()?.is_file() + && filename.starts_with("canyon") + && filename.ends_with(".toml") + { + Some(e.path().to_path_buf()) + } else { + None + } + }) + .ok_or_else(|| { + std::io::Error::new(std::io::ErrorKind::NotFound, "No Canyon config found") + }) + } + + pub(crate) async fn process_new_conn_by_datasource( + ds: &DatasourceConfig, + connections: &mut HashMap<&str, DatabaseConnector>, + default: &mut Option, + default_db_type: &mut Option, + ) -> Result<(), Box> { + if default.is_none() { + let cloned_ds_for_default = ds.clone(); + *default = Some(DatabaseConnector::new(&cloned_ds_for_default).await?); // Only cloning the smart pointer + } + let conn = DatabaseConnector::new(ds).await?; + let name: &'static str = Box::leak(ds.name.clone().into_boxed_str()); + + if default_db_type.is_none() { + *default_db_type = Some(conn.get_db_type()); + } + + let connection_sp = conn; + connections.insert(name, connection_sp); + + Ok(()) + } +} diff --git a/canyon_core/src/column.rs b/canyon_core/src/column.rs new file mode 100644 index 00000000..502b9a35 --- /dev/null +++ b/canyon_core/src/column.rs @@ -0,0 +1,62 @@ +use std::{any::Any, borrow::Cow}; + +#[cfg(feature = "mysql")] +use mysql_async::{self}; +#[cfg(feature = "mssql")] +use tiberius::{self}; +#[cfg(feature = "postgres")] +use tokio_postgres::{self}; + +/// Generic abstraction for hold a Column type that will be one of the Column +/// types present in the dependent crates +// #[derive(Copy, Clone)] +pub struct Column<'a> { + pub(crate) name: Cow<'a, str>, + pub(crate) type_: ColumnType, +} +impl Column<'_> { + pub fn name(&self) -> &str { + &self.name + } + pub fn column_type(&self) -> &ColumnType { + &self.type_ + } + // pub fn type_(&'a self) -> &'_ dyn Type { + // match (*self).type_ { + // #[cfg(feature = "postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, + // #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => v as &'a dyn Type, + // } + // } +} + +pub trait ColType { + fn as_any(&self) -> &dyn Any; +} +#[cfg(feature = "postgres")] +impl ColType for tokio_postgres::types::Type { + fn as_any(&self) -> &dyn Any { + self + } +} +#[cfg(feature = "mssql")] +impl ColType for tiberius::ColumnType { + fn as_any(&self) -> &dyn Any { + self + } +} +#[cfg(feature = "mysql")] +impl ColType for mysql_async::consts::ColumnType { + fn as_any(&self) -> &dyn Any { + self + } +} + +/// Wrapper over the dependencies Column's types +pub enum ColumnType { + #[cfg(feature = "postgres")] + Postgres(tokio_postgres::types::Type), + #[cfg(feature = "mssql")] + SqlServer(tiberius::ColumnType), + #[cfg(feature = "mysql")] + MySQL(mysql_async::consts::ColumnType), +} diff --git a/canyon_core/src/connection/clients/mod.rs b/canyon_core/src/connection/clients/mod.rs new file mode 100644 index 00000000..366249d5 --- /dev/null +++ b/canyon_core/src/connection/clients/mod.rs @@ -0,0 +1,6 @@ +#[cfg(feature = "mssql")] +pub mod mssql; +#[cfg(feature = "mysql")] +pub mod mysql; +#[cfg(feature = "postgres")] +pub mod postgresql; diff --git a/canyon_core/src/connection/clients/mssql.rs b/canyon_core/src/connection/clients/mssql.rs new file mode 100644 index 00000000..e603ef7e --- /dev/null +++ b/canyon_core/src/connection/clients/mssql.rs @@ -0,0 +1,289 @@ +use crate::connection::clients::mssql::sqlserver_query_launcher::execute_query; +use crate::connection::contracts::DbConnection; +use crate::connection::database_type::DatabaseType; +use crate::connection::datasources::DatasourceConfig; +use crate::mapper::RowMapper; +use crate::query::parameters::QueryParameter; +use crate::rows::{CanyonRows, FromSqlOwnedValue}; +use bb8::PooledConnection; +use bb8_tiberius::ConnectionManager as TiberiusConnectionManager; +use std::error::Error; +use std::sync::Arc; +use tiberius::Query; + +type SqlServerConnectionPool = Arc>; + +/// A connector for a `SqlServer` database +pub struct SqlServerConnector(SqlServerConnectionPool); + +impl SqlServerConnector { + pub async fn new(config: &DatasourceConfig) -> Result> { + Ok(Self(__impl::create_sqlserver_connector(config).await?)) + } + pub async fn get_pooled( + &self, + ) -> Result, Box> { + Ok(self.0.get().await?) + } +} + +impl DbConnection for SqlServerConnector { + async fn query_rows( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + let mut conn = self.get_pooled().await?; + let result = execute_query(stmt, params, &mut conn) + .await? + .into_results() + .await? + .into_iter() + .flatten() + .collect(); + + Ok(CanyonRows::Tiberius(result)) + } + + async fn query( + &self, + stmt: S, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + S: AsRef + Send, + R: RowMapper, + Vec: FromIterator<::Output>, + { + let mut conn = self.get_pooled().await?; + Ok(execute_query(stmt.as_ref(), params, &mut conn) + .await? + .into_results() + .await? + .into_iter() + .flatten() + .flat_map(|row| R::deserialize_sqlserver(&row)) + .collect::>()) + } + + async fn query_one( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + R: RowMapper, + { + let mut conn = self.get_pooled().await?; + + let result = execute_query(stmt, params, &mut conn) + .await? + .into_row() + .await?; + + match result { + Some(r) => Ok(Some(R::deserialize_sqlserver(&r)?)), + None => Ok(None), + } + } + + async fn query_one_for>( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + let mut conn = self.get_pooled().await?; + let row = crate::connection::clients::mssql::sqlserver_query_launcher::execute_query( + stmt, params, &mut conn, + ) + .await? + .into_row() + .await? + .ok_or_else(|| { + format!( + "Failure executing 'query_one_for' while retrieving the first row with stmt: {:?}", + stmt + ) + })?; + + Ok(row + .into_iter() + .map(T::from_sql_owned) + .collect::>() + .remove(0)? + .ok_or_else(|| format!("Failure executing 'query_one_for' while retrieving the first column value on the first row with stmt: {:?}", stmt))? + ) + } + + async fn execute( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + let mssql_query = crate::connection::clients::mssql::sqlserver_query_launcher::generate_mssql_query_client(stmt, params).await; + let mut conn = self.get_pooled().await?; + + mssql_query + .execute(&mut conn) + .await + .map(|r| r.total()) + .map_err(From::from) + } + + fn get_database_type(&self) -> Result> { + Ok(DatabaseType::SqlServer) + } +} + +pub(crate) mod sqlserver_query_launcher { + use super::*; + use tiberius::QueryStream; + + pub(crate) async fn execute_query<'a>( + stmt: &str, + params: &[&dyn QueryParameter], + conn: &'a mut bb8::PooledConnection<'_, bb8_tiberius::ConnectionManager>, + ) -> Result, Box> { + let mssql_query = generate_mssql_query_client(stmt, params).await; + mssql_query.query(conn).await.map_err(From::from) + } + + pub(crate) async fn generate_mssql_query_client<'a>( + stmt: &str, + params: &[&'a dyn QueryParameter], + ) -> Query<'a> { + let mut stmt = String::from(stmt); + if stmt.contains("RETURNING") { + // TODO: when the InsertQuerybuilder with a api on the builder for the returning clause + let c = stmt.clone(); + let temp = c.split_once("RETURNING").unwrap(); + let temp2 = temp.0.split_once("VALUES").unwrap(); + + stmt = format!( + "{} OUTPUT inserted.{} VALUES {}", + temp2.0.trim(), + temp.1.trim(), + temp2.1.trim() + ); + } + + let stmt = stmt.replace('$', "@P"); // TODO: this should be solved by the querybuilder + generate_query_and_bind_params(stmt, params) + } + + // Query and parameters are generated in this procedure together to avoid lifetime errors + fn generate_query_and_bind_params<'a>( + stmt: String, + params: &[&'a (dyn QueryParameter + 'a)], + ) -> Query<'a> { + let mut mssql_query = Query::new(stmt); + params.iter().for_each(|param| { + mssql_query.bind(*param); + }); + mssql_query + } +} + +pub(crate) mod __impl { + use super::*; + use crate::connection::datasources::{Auth, SqlServerAuth}; + use bb8::Pool; + use std::sync::Arc; + use tiberius::Config; + + pub(crate) async fn create_sqlserver_connector( + datasource: &DatasourceConfig, + ) -> Result>, Box> { + let sqlserver_config = sqlserver_config_from_datasource(datasource)?; + + let manager = TiberiusConnectionManager::new(sqlserver_config); + let pool = bb8::Pool::builder().max_size(10u32).build(manager).await?; + + Ok(SqlServerConnectionPool::from(pool)) + } + + pub(crate) fn sqlserver_config_from_datasource( + datasource: &DatasourceConfig, + ) -> Result> { + let mut tiberius_config = tiberius::Config::new(); + + tiberius_config.host(&datasource.properties.host); + tiberius_config.port(datasource.get_port_or_default_by_db()); + tiberius_config.database(&datasource.properties.db_name); + + let auth_config = extract_mssql_auth(&datasource.auth)?; + tiberius_config.authentication(auth_config); + tiberius_config.trust_cert(); // TODO: this should be specifically set via user input + tiberius_config.encryption(tiberius::EncryptionLevel::NotSupported); // TODO: user input + // TODO: in MacOS 15, this is the actual workaround. We need to investigate further + // https://github.com/prisma/tiberius/issues/364 + + Ok(tiberius_config) + } + + pub(crate) fn extract_mssql_auth( + auth: &Auth, + ) -> Result> { + match auth { + Auth::SqlServer(sql_server_auth) => match sql_server_auth { + SqlServerAuth::Basic { username, password } => { + Ok(tiberius::AuthMethod::sql_server(username, password)) + } + SqlServerAuth::Integrated => Ok(tiberius::AuthMethod::Integrated), + }, + #[cfg(any(feature = "postgres", feature = "mysql"))] + _ => Err("Invalid auth configuration for a SqlServer datasource.".into()), + } + } +} +#[cfg(test)] +mod tests { + use super::__impl; + use crate::connection::datasources::{ + Auth, DatasourceConfig, DatasourceProperties, SqlServerAuth, + }; + use tiberius::AuthMethod; + + #[test] + fn test_extract_mssql_auth_basic() { + let auth = Auth::SqlServer(SqlServerAuth::Basic { + username: "sa".to_string(), + password: "password123".to_string(), + }); + + let result = __impl::extract_mssql_auth(&auth).unwrap(); + + match result { + // We can only check the variant, not its internals (private fields) + AuthMethod::SqlServer(_) => {} // success + _ => panic!("Expected AuthMethod::SqlServer variant"), + } + } + + #[test] + fn test_extract_mssql_auth_integrated() { + let auth = Auth::SqlServer(SqlServerAuth::Integrated); + let result = __impl::extract_mssql_auth(&auth).unwrap(); + assert!(matches!(result, AuthMethod::Integrated)); + } + + #[test] + fn test_sqlserver_config_from_datasource_basic() { + let datasource = DatasourceConfig { + name: "test_source".into(), + properties: DatasourceProperties { + host: "localhost".into(), + db_name: "test_db".into(), + port: None, // default + migrations: None, + }, + auth: Auth::SqlServer(SqlServerAuth::Basic { + username: "sa".into(), + password: "pass123".into(), + }), + }; + + let config = __impl::sqlserver_config_from_datasource(&datasource).unwrap(); + assert_eq!(config.get_addr(), "localhost:1433"); + } +} diff --git a/canyon_core/src/connection/clients/mysql.rs b/canyon_core/src/connection/clients/mysql.rs new file mode 100644 index 00000000..960b520a --- /dev/null +++ b/canyon_core/src/connection/clients/mysql.rs @@ -0,0 +1,225 @@ +use crate::connection::clients::mysql::mysql_query_launcher::{execute_query, generate_mysql_stmt}; +use crate::connection::contracts::DbConnection; +use crate::connection::database_type::DatabaseType; +use crate::connection::datasources::DatasourceConfig; +use crate::mapper::RowMapper; +use crate::rows::FromSqlOwnedValue; +use crate::{query::parameters::QueryParameter, rows::CanyonRows}; +use mysql_async::Row; +use mysql_async::prelude::Query; +use mysql_common::constants::ColumnType; +use mysql_common::row; +use std::error::Error; + +/// A connection with a `Mysql` database +pub struct MySQLConnector(mysql_async::Pool); + +impl MySQLConnector { + pub async fn new(config: &DatasourceConfig) -> Result> { + Ok(Self(__impl::load_mysql_config(config).await?)) + } +} + +impl DbConnection for MySQLConnector { + async fn query_rows( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + Ok(CanyonRows::MySQL(execute_query(stmt, params, self).await?)) + } + + async fn query( + &self, + stmt: S, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + S: AsRef + Send, + R: RowMapper, + Vec: FromIterator<::Output>, + { + Ok(execute_query(stmt, params, self) + .await? + .iter() + .flat_map(|row| R::deserialize_mysql(row)) + .collect()) + } + + async fn query_one( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + R: RowMapper, + { + let result = execute_query(stmt, params, self).await?; + + match result.first() { + Some(r) => Ok(Some(R::deserialize_mysql(r)?)), + None => Ok(None), + } + } + + async fn query_one_for>( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + Ok(execute_query(stmt, params, self) + .await? + .first() + .ok_or_else(|| format!("Failure executing 'query_one_for' while retrieving the first row with stmt: {:?}", stmt))? + .get::(0) + .ok_or_else(|| format!("Failure executing 'query_one_for' while retrieving the first column value on the first row with stmt: {:?}", stmt))? + ) + } + + async fn execute( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + let mysql_connection = self.0.get_conn().await?; + let mysql_stmt = generate_mysql_stmt(stmt.as_ref(), params)?; + + Ok(mysql_stmt.run(mysql_connection).await?.affected_rows()) + } + + fn get_database_type(&self) -> Result> { + Ok(DatabaseType::MySQL) + } +} + +pub(crate) mod mysql_query_launcher { + pub const DETECT_PARAMS_IN_QUERY: &str = r"\$([\d])+"; + pub const DETECT_QUOTE_IN_QUERY: &str = r#"\"|\\"#; + + use super::*; + + use mysql_async::QueryWithParams; + use mysql_async::Value; + use mysql_async::prelude::Query; + use regex::Regex; + use std::sync::Arc; + + pub(crate) async fn execute_query( + stmt: S, + params: &[&'_ dyn QueryParameter], + conn: &MySQLConnector, + ) -> Result, Box> + where + S: AsRef + Send, + { + let mysql_connection = conn.0.get_conn().await?; + let is_insert = stmt.as_ref().find(" RETURNING"); + let mysql_stmt = generate_mysql_stmt(stmt.as_ref(), params)?; + + let mut query_result = mysql_stmt.run(mysql_connection).await?; + let result_rows = if is_insert.is_some() { + let last_insert = query_result + .last_insert_id() + .map(Value::UInt) + .ok_or("MySQL: Error getting the id in insert")?; + + vec![row::new_row( + vec![last_insert], + Arc::new([mysql_async::Column::new(ColumnType::MYSQL_TYPE_UNKNOWN)]), + )] + } else { + query_result.collect::().await? + }; + + Ok(result_rows) + } + + pub(crate) fn generate_mysql_stmt( + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result>, Box> { + let stmt_with_escape_characters = regex::escape(stmt); + let query_string = + Regex::new(DETECT_PARAMS_IN_QUERY)?.replace_all(&stmt_with_escape_characters, "?"); + + let mut query_string = Regex::new(DETECT_QUOTE_IN_QUERY)? + .replace_all(&query_string, "") + .to_string(); + + if let Some(index_start_clausule_returning) = query_string.find(" RETURNING") { + query_string.truncate(index_start_clausule_returning); + } + + let params_query: Vec = + reorder_params(stmt, params, |f| (*f).as_mysql_param().to_value())?; + + Ok(QueryWithParams { + query: query_string, + params: params_query, + }) + } + + fn reorder_params( + stmt: &str, + params: &[&'_ dyn QueryParameter], + fn_parser: impl Fn(&&dyn QueryParameter) -> T, + ) -> Result, Box> { + use mysql_query_launcher::DETECT_PARAMS_IN_QUERY; + + let mut ordered_params = vec![]; + let rg = Regex::new(DETECT_PARAMS_IN_QUERY) + .expect("Error create regex with detect params pattern expression"); + + for positional_param in rg.find_iter(stmt) { + let pp: &str = positional_param.as_str(); + let pp_index = pp[1..] // param $1 -> get 1 + .parse::()? + - 1; + + let element = params + .get(pp_index) + .expect("Error obtaining the element of the mapping against parameters."); + ordered_params.push(fn_parser(element)); + } + + Ok(ordered_params) + } +} + +pub(crate) mod __impl { + use crate::connection::datasources::{Auth, DatasourceConfig, MySQLAuth}; + use mysql_async::Pool; + use std::error::Error; + + pub(crate) async fn load_mysql_config( + datasource: &DatasourceConfig, + ) -> Result> { + let (user, password) = extract_mysql_auth(&datasource.auth)?; + + // TODO: the pool constrains must be adquired from the datasource config + let pool_constraints = + mysql_async::PoolConstraints::new(2, 10).ok_or("Failure launching the MySQL pool")?; + + let mysql_opts_builder = mysql_async::OptsBuilder::default() + .pool_opts(mysql_async::PoolOpts::default().with_constraints(pool_constraints)) + .user(Some(user)) + .pass(Some(password)) + .db_name(Some(&datasource.properties.db_name)) + .ip_or_hostname(&datasource.properties.host) + .tcp_port(datasource.get_port_or_default_by_db()); + + Ok(mysql_async::Pool::new(mysql_opts_builder)) + } + + pub(crate) fn extract_mysql_auth( + auth: &Auth, + ) -> Result<(&str, &str), Box> { + match auth { + Auth::MySQL(mysql_auth) => match mysql_auth { + MySQLAuth::Basic { username, password } => Ok((username, password)), + }, + #[cfg(any(feature = "postgres", feature = "mssql"))] + _ => Err("Invalid auth configuration for a MySQL datasource.".into()), + } + } +} diff --git a/canyon_core/src/connection/clients/postgresql.rs b/canyon_core/src/connection/clients/postgresql.rs new file mode 100644 index 00000000..ce098cd9 --- /dev/null +++ b/canyon_core/src/connection/clients/postgresql.rs @@ -0,0 +1,263 @@ +use crate::connection::contracts::DbConnection; +use crate::connection::database_type::DatabaseType; +use crate::connection::datasources::{Auth, DatasourceConfig, PostgresAuth}; +use crate::mapper::RowMapper; +use crate::rows::FromSqlOwnedValue; +use crate::{query::parameters::QueryParameter, rows::CanyonRows}; +use bb8::{Pool, PooledConnection}; +use bb8_postgres::PostgresConnectionManager; +use std::error::Error; +use std::sync::Arc; +use tokio_postgres::types::ToSql; +use tokio_postgres::{Config, NoTls}; + +type PgManager = PostgresConnectionManager; +type PostgresConnectionPool = Arc>; + +/// A connector with a `PostgreSQL` database +pub struct PostgresConnector(PostgresConnectionPool); +impl PostgresConnector { + pub async fn new(datasource: &DatasourceConfig) -> Result> { + Ok(Self(create_postgres_connector(datasource).await?)) + } + + pub async fn get_pooled( + &self, + ) -> Result, Box> { + Ok(self.0.get().await?) + } +} + +impl DbConnection for PostgresConnector { + async fn query_rows( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + let r = self + .get_pooled() + .await? + .query(stmt, &get_psql_params(params)) + .await?; + Ok(CanyonRows::Postgres(r)) + } + + async fn query( + &self, + stmt: S, + params: &[&dyn QueryParameter], + ) -> Result, Box> + where + S: AsRef + Send, + R: RowMapper, + Vec: FromIterator<::Output>, + { + Ok(self + .get_pooled() + .await? + .query(stmt.as_ref(), &get_psql_params(params)) + .await? + .iter() + .flat_map(|row| R::deserialize_postgresql(row)) + .collect()) + } + + async fn query_one( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + R: RowMapper, + { + let result = self + .get_pooled() + .await? + .query_one(stmt, &get_psql_params(params)) + .await; + + match result { + Ok(row) => Ok(Some(R::deserialize_postgresql(&row)?)), + Err(e) => match e.to_string().contains("unexpected number of rows") { + true => Ok(None), + _ => Err(e)?, + }, + } + } + + async fn query_one_for>( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + let r = self + .get_pooled() + .await? + .query_one(stmt, &get_psql_params(params)) + .await?; + r.try_get::(0).map_err(From::from) + } + + async fn execute( + &self, + stmt: &str, + params: &[&dyn QueryParameter], + ) -> Result> { + self.get_pooled() + .await? + .execute(stmt, &get_psql_params(params)) + .await + .map_err(From::from) + } + + fn get_database_type(&self) -> Result> { + Ok(DatabaseType::PostgreSql) + } +} + +fn get_psql_params<'a>(params: &'a [&'a dyn QueryParameter]) -> Vec<&'a (dyn ToSql + Sync)> { + params + .iter() + .map(|param| param.as_postgres_param()) + .collect::>() +} + +// Façade helper to create a new postgres connector +async fn create_postgres_connector( + datasource: &DatasourceConfig, +) -> Result>, Box> { + let (user, password) = __impl::extract_postgres_auth(&datasource.auth)?; + let config = __impl::set_tokio_postgres_configs(datasource, user, password); + let conn_pool = __impl::create_postgres_connection_pool(config).await?; + + Ok(PostgresConnectionPool::from(conn_pool)) +} + +mod __impl { + use super::*; + + pub(crate) fn set_tokio_postgres_configs( + datasource_config: &DatasourceConfig, + user: &str, + password: &str, + ) -> Config { + let mut config = tokio_postgres::Config::new(); + config.host(&datasource_config.properties.host); + config.port(datasource_config.get_port_or_default_by_db()); + config.dbname(&datasource_config.properties.db_name); + config.user(user); + config.password(password); + + // Optimize connection settings for better performance + config.connect_timeout(std::time::Duration::from_secs(5)); + config.keepalives_idle(std::time::Duration::from_secs(30)); + config.keepalives_interval(std::time::Duration::from_secs(10)); + config.keepalives_retries(3); + + config + } + + pub(crate) fn extract_postgres_auth( + auth: &Auth, + ) -> Result<(&str, &str), Box> { + match auth { + Auth::Postgres(pg_auth) => match pg_auth { + PostgresAuth::Basic { username, password } => Ok((username, password)), + }, + #[cfg(any(feature = "mssql", feature = "mysql"))] + _ => Err("Invalid auth configuration for a Postgres datasource.".into()), + } + } + + pub(crate) async fn create_postgres_connection_pool( + config: Config, + ) -> Result, Box> { + let manager = PgManager::new(config, NoTls); + let pool = bb8::Pool::builder().max_size(10u32).build(manager).await?; + Ok(pool) + } +} + +#[cfg(test)] +mod tests { + use super::__impl; + use crate::connection::datasources::{ + Auth, DatasourceConfig, DatasourceProperties, PostgresAuth, + }; + + #[test] + fn test_extract_postgres_auth_basic() { + let auth = Auth::Postgres(PostgresAuth::Basic { + username: "pguser".into(), + password: "pgpass".into(), + }); + + let (user, pass) = __impl::extract_postgres_auth(&auth).unwrap(); + assert_eq!(user, "pguser"); + assert_eq!(pass, "pgpass"); + } + + #[test] + fn test_set_tokio_postgres_configs_basic() { + let datasource = DatasourceConfig { + name: "pg_test".into(), + properties: DatasourceProperties { + host: "localhost".into(), + db_name: "pg_db".into(), + port: Some(5433), + migrations: None, + }, + auth: Auth::Postgres(PostgresAuth::Basic { + username: "pguser".into(), + password: "pgpass".into(), + }), + }; + + let config = __impl::set_tokio_postgres_configs(&datasource, "pguser", "pgpass"); + + assert_eq!( + config.get_hosts(), + vec![tokio_postgres::config::Host::Tcp("localhost".into())] + ); + assert_eq!(config.get_dbname(), Some("pg_db")); + assert_eq!(config.get_user(), Some("pguser")); + assert_eq!(*config.get_ports().first().unwrap(), 5433); + + // sanity check for configured timeouts and keepalives + assert_eq!( + config.get_connect_timeout(), + Some(std::time::Duration::from_secs(5)).as_ref() + ); + assert_eq!( + config.get_keepalives_idle(), + std::time::Duration::from_secs(30) + ); + assert_eq!( + config.get_keepalives_interval(), + Some(std::time::Duration::from_secs(10)) + ); + assert_eq!(config.get_keepalives_retries(), Some(3)); + } + + #[test] + fn test_set_tokio_postgres_configs_default_port() { + let datasource = DatasourceConfig { + name: "pg_test_default".into(), + properties: DatasourceProperties { + host: "127.0.0.1".into(), + db_name: "default_db".into(), + port: None, + migrations: None, + }, + auth: Auth::Postgres(PostgresAuth::Basic { + username: "user".into(), + password: "pass".into(), + }), + }; + + let config = __impl::set_tokio_postgres_configs(&datasource, "user", "pass"); + assert_eq!(*config.get_ports().first().unwrap(), 5432); // default Postgres port + assert_eq!(config.get_dbname(), Some("default_db")); + assert_eq!(config.get_user(), Some("user")); + } +} diff --git a/canyon_core/src/connection/conn_errors.rs b/canyon_core/src/connection/conn_errors.rs new file mode 100644 index 00000000..3b2ed455 --- /dev/null +++ b/canyon_core/src/connection/conn_errors.rs @@ -0,0 +1,26 @@ +//! Defines the Canyon-SQL custom connection error types + +/// Raised when a [`crate::connection::datasources::DatasourceConfig`] isn't found given a user input +#[derive(Debug, Clone)] +pub struct DatasourceNotFound { + pub datasource_name: String, +} +impl From> for DatasourceNotFound { + fn from(value: Option<&str>) -> Self { + DatasourceNotFound { + datasource_name: value + .map(String::from) + .unwrap_or_else(|| String::from("No datasource name was provided")), + } + } +} +impl std::fmt::Display for DatasourceNotFound { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "Unable to found a datasource that matches: {:?}", + self.datasource_name + ) + } +} +impl std::error::Error for DatasourceNotFound {} diff --git a/canyon_core/src/connection/contracts/mod.rs b/canyon_core/src/connection/contracts/mod.rs new file mode 100644 index 00000000..7ade67ad --- /dev/null +++ b/canyon_core/src/connection/contracts/mod.rs @@ -0,0 +1,118 @@ +use crate::connection::database_type::DatabaseType; +use crate::mapper::RowMapper; +use crate::query::parameters::QueryParameter; +use crate::rows::{CanyonRows, FromSqlOwnedValue}; +use std::error::Error; +use std::future::Future; + +/// The `DbConnection` trait defines the core functionality required for interacting with a database connection. +/// It provides methods for executing queries, retrieving rows, and obtaining metadata about the database type. +/// +/// This trait is designed to be implemented by various database connection types, enabling a unified interface +/// for database operations. Each method is asynchronous and returns a `Future` to support non-blocking operations. +/// +/// # Examples +/// +/// ```ignore +/// use crate::connection::DbConnection; +/// +/// async fn execute_query(conn: &C) { +/// let result = conn.execute("INSERT INTO users (name) VALUES ($1)", &[&"John"]).await; +/// match result { +/// Ok(rows_affected) => println!("Rows affected: {}", rows_affected), +/// Err(e) => eprintln!("Error executing query: {}", e), +/// } +/// } +/// ``` +/// +/// # Required Methods +/// Each method in this trait must be implemented by the implementor. +pub trait DbConnection { + /// Executes a query and retrieves multiple rows from the database. + /// + /// # Arguments + /// * `stmt` - A SQL statement to execute. + /// * `params` - A slice of query parameters to bind to the statement. + /// + /// # Returns + /// A [Future] that resolves to a [Result] containing [`CanyonRows`] on success or an error on failure. + fn query_rows( + &self, + stmt: &str, + params: &[&dyn QueryParameter], + ) -> impl Future>> + Send; + + /// Executes a query and maps the result to a collection of rows of type `R`. + /// + /// # Arguments + /// * `stmt` - A SQL statement to execute. + /// * `params` - A slice of query parameters to bind to the statement. + /// + /// # Returns + /// A [Future] that resolves to a [Result] containing a `Vec` on success or an error on failure. + /// + /// The `R` type must implement the [`RowMapper`] trait. + fn query( + &self, + stmt: S, + params: &[&dyn QueryParameter], + ) -> impl Future, Box>> + Send + where + S: AsRef + Send, + R: RowMapper, + Vec: FromIterator<::Output>; + + /// Executes a query and retrieves a single row mapped to type `R`. + /// + /// # Arguments + /// * `stmt` - A SQL statement to execute. + /// * `params` - A slice of query parameters to bind to the statement. + /// + /// # Returns + /// A [Future] that resolves to a [Result] containing an `Option` on success or an error on failure. + /// + /// The `R` type must implement the [`RowMapper`] trait. + fn query_one( + &self, + stmt: &str, + params: &[&dyn QueryParameter], + ) -> impl Future, Box>> + Send + where + R: RowMapper; + + /// Executes a query and retrieves a single value of type `T`. + /// + /// # Arguments + /// * `stmt` - A SQL statement to execute. + /// * `params` - A slice of query parameters to bind to the statement. + /// + /// # Returns + /// A [Future] that resolves to a [Result] containing the value of type `T` on success or an error on failure. + /// + /// The `T` type must implement the [`FromSqlOwnedValue`] trait. + fn query_one_for>( + &self, + stmt: &str, + params: &[&dyn QueryParameter], + ) -> impl Future>> + Send; + + /// Executes a SQL statement and returns the number of affected rows. + /// + /// # Arguments + /// * `stmt` - A SQL statement to execute. + /// * `params` - A slice of query parameters to bind to the statement. + /// + /// # Returns + /// A [Future] that resolves to a [Result] containing the number of affected rows on success or an error on failure. + fn execute( + &self, + stmt: &str, + params: &[&dyn QueryParameter], + ) -> impl Future>> + Send; + + /// Retrieves the type of the database associated with the connection. + /// + /// # Returns + /// A `Result` containing the [`DatabaseType`] on success or an error on failure. + fn get_database_type(&self) -> Result>; +} diff --git a/canyon_core/src/connection/database_type.rs b/canyon_core/src/connection/database_type.rs new file mode 100644 index 00000000..d3915d71 --- /dev/null +++ b/canyon_core/src/connection/database_type.rs @@ -0,0 +1,96 @@ +use super::datasources::Auth; +use crate::canyon::Canyon; +use serde::Deserialize; +use std::error::Error; +use std::fmt::Display; + +/// Represents the supported database backends in **Canyon-SQL**. +/// +/// This enum abstracts over the specific database dialects supported by Canyon, +/// allowing queries and builders to adapt automatically to the correct SQL syntax +/// and placeholder conventions (`$1`, `?`, `@P1`, etc.) according to the active +/// [`DatabaseType`]. +/// +/// The variant used at runtime is determined either: +/// - Explicitly, when passed to a [`QueryBuilder`] constructor, or +/// - Implicitly, from the first configured data source via +/// [`Canyon::get_default_db_type()`]. +/// +/// # Example +/// ```rust,ignore +/// use canyon_core::connection::database_type::DatabaseType; +/// +/// // Create a query builder explicitly targeting PostgreSQL: +/// let builder = QueryBuilder::new_for(table, columns, DatabaseType::PostgreSql)?; +/// +/// // Or defer the database type resolution until runtime: +/// let builder = QueryBuilder::new_for(table, columns, DatabaseType::Deferred)?; +/// let query = builder.build()?; // will resolve to the default DB type +/// ``` +#[derive(Deserialize, Debug, Eq, PartialEq, Clone, Copy)] +pub enum DatabaseType { + /// The Postgres database backend. + #[cfg(feature = "postgres")] + #[serde(alias = "postgres", alias = "postgresql")] + PostgreSql, + + /// The Microsoft SQL Server backend. + #[cfg(feature = "mssql")] + #[serde(alias = "sqlserver", alias = "mssql")] + SqlServer, + + /// The MySQL or MariaDB backend. + #[cfg(feature = "mysql")] + #[serde(alias = "mysql")] + MySQL, + + /// A **placeholder variant** used when the database dialect + /// cannot be determined at compile time. + /// + /// The [`Deferred`](Self::Deferred) variant allows you to construct a query + /// (for example, through a procedural macro like `CanyonCrud`) before the + /// actual database type is known — typically at compile-time code generation. + /// + /// When using this variant, the [`QueryBuilder::build()`] method will automatically + /// attempt to resolve the concrete database type from the active [`Canyon`] + /// instance at runtime. + /// + /// # Use case + /// This is particularly useful for macros and compile-time query generation, + /// where it’s desirable to emit code that’s agnostic of the target database. + /// The macro can safely emit `DatabaseType::Deferred` in the generated code, + /// and Canyon will resolve it dynamically when executing queries. + /// + /// # Example + /// ```rust,ignore + /// let query = SelectQueryBuilder::new_for( + /// &table_metadata, + /// DatabaseType::Deferred + /// )? + /// .where_("id", Comp::Eq, &42) + /// .build()?; // resolved dynamically to the active database type + /// ``` + Deferred, +} + +impl Display for DatabaseType { + fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(fmt, "{:?}", self) + } +} + +impl From<&Auth> for DatabaseType { + fn from(value: &Auth) -> Self { + value.get_db_type() + } +} + +/// The default implementation for [`DatabaseType`] returns the database type for the first +/// datasource configured +impl DatabaseType { + pub fn default_type() -> Result> { + Canyon::instance()? + .get_default_db_type() + .map_err(|err| Box::new(err) as Box) + } +} diff --git a/canyon_connection/src/datasources.rs b/canyon_core/src/connection/datasources.rs similarity index 54% rename from canyon_connection/src/datasources.rs rename to canyon_core/src/connection/datasources.rs index 11edcd31..a1b39a93 100644 --- a/canyon_connection/src/datasources.rs +++ b/canyon_core/src/connection/datasources.rs @@ -1,8 +1,14 @@ +//! The datasources module of Canyon-SQL. +//! +//! This module defines the configuration and authentication mechanisms for database datasources. +//! It includes support for multiple database backends and provides utilities for managing +//! datasource properties. + use serde::Deserialize; -use crate::canyon_database_connector::DatabaseType; +use super::database_type::DatabaseType; -/// ``` +/// ```rust #[test] fn load_ds_config_from_array() { #[cfg(feature = "postgres")] @@ -112,14 +118,27 @@ pub struct DatasourceConfig { impl DatasourceConfig { pub fn get_db_type(&self) -> DatabaseType { - match self.auth { + self.auth.get_db_type() + } + + pub fn has_migrations_enabled(&self) -> bool { + if let Some(migrations) = self.properties.migrations { + migrations.has_migrations_enabled() + } else { + false + } + } + + pub fn get_port_or_default_by_db(&self) -> u16 { + self.properties.port.unwrap_or(match self.get_db_type() { #[cfg(feature = "postgres")] - Auth::Postgres(_) => DatabaseType::PostgreSql, + DatabaseType::PostgreSql => 5432, #[cfg(feature = "mssql")] - Auth::SqlServer(_) => DatabaseType::SqlServer, + DatabaseType::SqlServer => 1433, #[cfg(feature = "mysql")] - Auth::MySQL(_) => DatabaseType::MySQL, - } + DatabaseType::MySQL => 3306, + _ => todo!("Non legal port cfg"), + }) } } @@ -136,6 +155,19 @@ pub enum Auth { MySQL(MySQLAuth), } +impl Auth { + pub fn get_db_type(&self) -> DatabaseType { + match self { + #[cfg(feature = "postgres")] + Auth::Postgres(_) => DatabaseType::PostgreSql, + #[cfg(feature = "mssql")] + Auth::SqlServer(_) => DatabaseType::SqlServer, + #[cfg(feature = "mysql")] + Auth::MySQL(_) => DatabaseType::MySQL, + } + } +} + #[derive(Deserialize, Debug, Clone, PartialEq)] #[cfg(feature = "postgres")] pub enum PostgresAuth { @@ -175,3 +207,93 @@ pub enum Migrations { #[serde(alias = "Disabled", alias = "disabled")] Disabled, } + +impl Migrations { + pub fn has_migrations_enabled(&self) -> bool { + matches!(self, Migrations::Enabled) + } +} + +#[cfg(test)] +mod datasources_tests { + use super::*; + + /// Tests the behaviour of the `DatabaseType::from_datasource(...)` + #[test] + fn check_from_datasource() { + #[cfg(all(feature = "postgres", feature = "mssql", feature = "mysql"))] + { + const CONFIG_FILE_MOCK_ALT_ALL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' }, + {name = 'MysqlDS', auth = { mysql = { basic = { username = "root", password = "root" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_ALL) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::PostgreSql + ); + assert_eq!( + config.canyon_sql.datasources[1].get_db_type(), + DatabaseType::SqlServer + ); + assert_eq!( + config.canyon_sql.datasources[2].get_db_type(), + DatabaseType::MySQL + ); + } + + #[cfg(feature = "postgres")] + { + const CONFIG_FILE_MOCK_ALT_PG: &str = r#" + [canyon_sql] + datasources = [ + {name = 'PostgresDS', auth = { postgresql = { basic = { username = "postgres", password = "postgres" } } }, properties.host = 'localhost', properties.db_name = 'triforce', properties.migrations='enabled' }, + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_PG) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::PostgreSql + ); + } + + #[cfg(feature = "mssql")] + { + const CONFIG_FILE_MOCK_ALT_MSSQL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'SqlServerDS', auth = { sqlserver = { basic = { username = "sa", password = "SqlServer-10" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MSSQL) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::SqlServer + ); + } + + #[cfg(feature = "mysql")] + { + const CONFIG_FILE_MOCK_ALT_MYSQL: &str = r#" + [canyon_sql] + datasources = [ + {name = 'MysqlDS', auth = { mysql = { basic = { username = "root", password = "root" } } }, properties.host = '192.168.0.250.1', properties.port = 3340, properties.db_name = 'triforce2', properties.migrations='disabled' } + ] + "#; + + let config: CanyonSqlConfig = toml::from_str(CONFIG_FILE_MOCK_ALT_MYSQL) + .expect("A failure happened retrieving the [canyon_sql] section"); + assert_eq!( + config.canyon_sql.datasources[0].get_db_type(), + DatabaseType::MySQL + ); + } + } +} diff --git a/canyon_core/src/connection/db_connector.rs b/canyon_core/src/connection/db_connector.rs new file mode 100644 index 00000000..73e3b9af --- /dev/null +++ b/canyon_core/src/connection/db_connector.rs @@ -0,0 +1,66 @@ +#[cfg(feature = "mssql")] +use crate::connection::clients::mssql::SqlServerConnector; +#[cfg(feature = "mysql")] +use crate::connection::clients::mysql::MySQLConnector; +#[cfg(feature = "postgres")] +use crate::connection::clients::postgresql::PostgresConnector; + +use crate::connection::database_type::DatabaseType; +use crate::connection::datasources::DatasourceConfig; +use crate::mapper::RowMapper; +use crate::query::parameters::QueryParameter; +use crate::rows::{CanyonRows, FromSqlOwnedValue}; +use std::error::Error; + +/// The Canyon database connection handler. When the client's program +/// starts, Canyon gets the information about the desired datasources, +/// process them and generates a pool of connections for +/// every datasource defined. +pub enum DatabaseConnector { + #[cfg(feature = "postgres")] + Postgres(PostgresConnector), + #[cfg(feature = "mssql")] + SqlServer(SqlServerConnector), + #[cfg(feature = "mysql")] + MySQL(MySQLConnector), +} + +unsafe impl Send for DatabaseConnector {} +unsafe impl Sync for DatabaseConnector {} + +crate::impl_db_connection_for_db_connector!(DatabaseConnector); +crate::impl_db_connection_for_db_connector!(&DatabaseConnector); +crate::impl_db_connection_for_db_connector!(&mut DatabaseConnector); + +impl DatabaseConnector { + pub async fn new(datasource: &DatasourceConfig) -> Result> { + // Add connection pooling at the client level for better performance + match datasource.get_db_type() { + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => { + Ok(Self::Postgres(PostgresConnector::new(datasource).await?)) + } + + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => { + Ok(Self::SqlServer(SqlServerConnector::new(datasource).await?)) + } + + #[cfg(feature = "mysql")] + DatabaseType::MySQL => Ok(Self::MySQL(MySQLConnector::new(datasource).await?)), + + DatabaseType::Deferred => panic!("Deferred connection"), + } + } + + pub fn get_db_type(&self) -> DatabaseType { + match self { + #[cfg(feature = "postgres")] + DatabaseConnector::Postgres(_) => DatabaseType::PostgreSql, + #[cfg(feature = "mssql")] + DatabaseConnector::SqlServer(_) => DatabaseType::SqlServer, + #[cfg(feature = "mysql")] + DatabaseConnector::MySQL(_) => DatabaseType::MySQL, + } + } +} diff --git a/canyon_core/src/connection/impl_db_connection_macro.rs b/canyon_core/src/connection/impl_db_connection_macro.rs new file mode 100644 index 00000000..c7c024cd --- /dev/null +++ b/canyon_core/src/connection/impl_db_connection_macro.rs @@ -0,0 +1,183 @@ +//! This module contains macros for helping us to reduce boilerplate implementation code of the +//! [`crate::connection::DbConnection`] + +#[macro_export] +macro_rules! impl_db_connection_for_db_connector { + ($type:ty) => { + impl $crate::connection::contracts::DbConnection for $type { + async fn query_rows( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + match self { + #[cfg(feature = "postgres")] + DatabaseConnector::Postgres(client) => client.query_rows(stmt, params).await, + + #[cfg(feature = "mssql")] + DatabaseConnector::SqlServer(client) => client.query_rows(stmt, params).await, + + #[cfg(feature = "mysql")] + DatabaseConnector::MySQL(client) => client.query_rows(stmt, params).await, + } + } + + async fn query( + &self, + stmt: S, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + S: AsRef + Send, + R: RowMapper, + Vec: FromIterator<::Output>, + { + match self { + #[cfg(feature = "postgres")] + DatabaseConnector::Postgres(client) => client.query(stmt, params).await, + + #[cfg(feature = "mssql")] + DatabaseConnector::SqlServer(client) => client.query(stmt, params).await, + + #[cfg(feature = "mysql")] + DatabaseConnector::MySQL(client) => client.query(stmt, params).await, + } + } + + async fn query_one( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + R: RowMapper, + { + match self { + #[cfg(feature = "postgres")] + DatabaseConnector::Postgres(client) => { + client.query_one::(stmt, params).await + } + + #[cfg(feature = "mssql")] + DatabaseConnector::SqlServer(client) => { + client.query_one::(stmt, params).await + } + + #[cfg(feature = "mysql")] + DatabaseConnector::MySQL(client) => client.query_one::(stmt, params).await, + } + } + + async fn query_one_for>( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + match self { + #[cfg(feature = "postgres")] + DatabaseConnector::Postgres(client) => client.query_one_for(stmt, params).await, + + #[cfg(feature = "mssql")] + DatabaseConnector::SqlServer(client) => { + client.query_one_for(stmt, params).await + } + + #[cfg(feature = "mysql")] + DatabaseConnector::MySQL(client) => client.query_one_for(stmt, params).await, + } + } + + async fn execute( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + match self { + #[cfg(feature = "postgres")] + DatabaseConnector::Postgres(client) => client.execute(stmt, params).await, + + #[cfg(feature = "mssql")] + DatabaseConnector::SqlServer(client) => client.execute(stmt, params).await, + + #[cfg(feature = "mysql")] + DatabaseConnector::MySQL(client) => client.execute(stmt, params).await, + } + } + + fn get_database_type(&self) -> Result> { + Ok(self.get_db_type()) + } + } + }; +} + +#[macro_export] +macro_rules! impl_db_connection_for_str { + ($type:ty) => { + impl $crate::connection::contracts::DbConnection for $type { + async fn query_rows( + &self, + stmt: &str, + params: &[&'_ dyn $crate::query::parameters::QueryParameter], + ) -> Result<$crate::rows::CanyonRows, Box> { + let conn = $crate::connection::Canyon::instance()?.get_connection(self)?; + conn.query_rows(stmt, params).await + } + + async fn query( + &self, + stmt: S, + params: &[&'_ dyn $crate::query::parameters::QueryParameter], + ) -> Result, Box> + where + S: AsRef + Send, + R: $crate::mapper::RowMapper, + Vec: std::iter::FromIterator<::Output>, + { + let conn = $crate::connection::Canyon::instance()?.get_connection(self)?; + conn.query(stmt, params).await + } + + async fn query_one( + &self, + stmt: &str, + params: &[&'_ dyn $crate::query::parameters::QueryParameter], + ) -> Result, Box> + where + R: $crate::mapper::RowMapper, + { + let conn = $crate::connection::Canyon::instance()?.get_connection(self)?; + conn.query_one::(stmt, params).await + } + + async fn query_one_for>( + &self, + stmt: &str, + params: &[&'_ dyn $crate::query::parameters::QueryParameter], + ) -> Result> { + let conn = $crate::connection::Canyon::instance()?.get_connection(self)?; + conn.query_one_for(stmt, params).await + } + + async fn execute( + &self, + stmt: &str, + params: &[&'_ dyn $crate::query::parameters::QueryParameter], + ) -> Result> { + let conn = $crate::connection::Canyon::instance()?.get_connection(self)?; + conn.execute(stmt, params).await + } + + fn get_database_type( + &self, + ) -> Result< + $crate::connection::database_type::DatabaseType, + Box, + > { + Ok($crate::connection::Canyon::instance()? + .find_datasource_by_name_or_default(self)? + .get_db_type()) + } + } + }; +} diff --git a/canyon_core/src/connection/mod.rs b/canyon_core/src/connection/mod.rs new file mode 100644 index 00000000..432fe27a --- /dev/null +++ b/canyon_core/src/connection/mod.rs @@ -0,0 +1,123 @@ +//! The connection module of Canyon-SQL. +//! +//! This module handles database connections, including connection pooling and configuration. +//! It provides abstractions for managing multiple datasources and supports asynchronous operations. + +#[cfg(feature = "postgres")] +pub extern crate tokio_postgres; + +#[cfg(feature = "mssql")] +pub extern crate async_std; +#[cfg(feature = "mssql")] +pub extern crate tiberius; + +#[cfg(feature = "mysql")] +pub extern crate mysql_async; + +pub extern crate futures; +pub extern crate tokio; +pub extern crate tokio_util; + +#[macro_use] +pub mod impl_db_connection_macro; + +pub mod clients; +pub mod conn_errors; +pub mod contracts; +pub mod database_type; +pub mod datasources; +pub mod db_connector; + +use crate::canyon::Canyon; +use crate::connection::contracts::DbConnection; +use crate::connection::database_type::DatabaseType; + +use std::error::Error; +use std::sync::{Arc, OnceLock}; + +use tokio::runtime::Runtime; +use tokio::sync::Mutex; + +// // TODO's: DatabaseConnector and DataSource can implement default, so there's no need to use str and &str +// // as defaults anymore, since the can load as the default the first one defined in the config file, or have more +// // complex workflows that are deferred to initialization time +// +// // TODO: Crud Operations should be split into two different derives, splitting the automagic from the _with ones + +pub(crate) static CANYON_INSTANCE: OnceLock = OnceLock::new(); + +// Use OnceLock for the Tokio runtime +static CANYON_TOKIO_RUNTIME: OnceLock = OnceLock::new(); + +// Function to get the runtime (lazy initialization) +pub fn get_canyon_tokio_runtime() -> &'static Runtime { + CANYON_TOKIO_RUNTIME + .get_or_init(|| Runtime::new().expect("Failed initializing the Canyon-SQL Tokio Runtime")) +} + +use crate::mapper::RowMapper; +use crate::query::parameters::QueryParameter; +use crate::rows::{CanyonRows, FromSqlOwnedValue}; + +// Apply the macro to implement DbConnection for &str and str +use crate::impl_db_connection_for_str; +impl_db_connection_for_str!(str); +impl_db_connection_for_str!(&str); + +impl DbConnection for Arc> +where + T: DbConnection + Send, + Self: Clone, +{ + async fn query_rows( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + self.lock().await.query_rows(stmt, params).await + } + + async fn query( + &self, + stmt: S, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + S: AsRef + Send, + R: RowMapper, + Vec: FromIterator, + { + self.lock().await.query(stmt, params).await + } + + async fn query_one( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result, Box> + where + R: RowMapper, + { + self.lock().await.query_one::(stmt, params).await + } + + async fn query_one_for>( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + self.lock().await.query_one_for::(stmt, params).await + } + + async fn execute( + &self, + stmt: &str, + params: &[&'_ dyn QueryParameter], + ) -> Result> { + self.lock().await.execute(stmt, params).await + } + + fn get_database_type(&self) -> Result> { + todo!() + } +} diff --git a/canyon_core/src/lib.rs b/canyon_core/src/lib.rs new file mode 100644 index 00000000..01a90728 --- /dev/null +++ b/canyon_core/src/lib.rs @@ -0,0 +1,28 @@ +//! The core module of Canyon-SQL. +//! +//! This module provides the foundational components for database connections, query execution, +//! and data mapping. It includes support for multiple database backends such as PostgreSQL, +//! MySQL, and SQL Server, and defines traits and utilities for interacting with these databases. + +#[cfg(feature = "postgres")] +pub extern crate tokio_postgres; + +#[cfg(feature = "mssql")] +pub extern crate async_std; +#[cfg(feature = "mssql")] +pub extern crate tiberius; + +#[cfg(feature = "mysql")] +pub extern crate mysql_async; + +extern crate core; + +pub mod canyon; + +pub mod column; +pub mod connection; +pub mod mapper; +pub mod query; +pub mod row; +pub mod rows; +pub mod transaction; diff --git a/canyon_core/src/mapper.rs b/canyon_core/src/mapper.rs new file mode 100644 index 00000000..ba0af768 --- /dev/null +++ b/canyon_core/src/mapper.rs @@ -0,0 +1,45 @@ +//! The mapper module of Canyon-SQL. +//! +//! This module defines traits and utilities for mapping database query results to user-defined +//! types. It includes the `RowMapper` trait and related functionality for deserialization. + +/// Declares functions that takes care to deserialize data incoming +/// from some supported database in Canyon-SQL into a user's defined +/// type `T` +pub trait RowMapper: Sized { + type Output; + + #[cfg(feature = "postgres")] + fn deserialize_postgresql( + row: &tokio_postgres::Row, + ) -> Result<::Output, CanyonError>; + #[cfg(feature = "mssql")] + fn deserialize_sqlserver( + row: &tiberius::Row, + ) -> Result<::Output, CanyonError>; + #[cfg(feature = "mysql")] + fn deserialize_mysql( + row: &mysql_async::Row, + ) -> Result<::Output, CanyonError>; +} + +pub trait DefaultRowMapper { + type Mapper: RowMapper; +} + +// Blanket impl to make `Mapper = Self` for any `T: RowMapper` +impl DefaultRowMapper for T +where + T: RowMapper, +{ + type Mapper = T; +} + +pub type CanyonError = Box; // TODO: convert this into a +// real error +pub trait IntoResults { + fn into_results(self) -> Result, CanyonError> + where + R: RowMapper, + Vec: FromIterator<::Output>; +} diff --git a/canyon_core/src/query/bounds.rs b/canyon_core/src/query/bounds.rs new file mode 100644 index 00000000..867ad303 --- /dev/null +++ b/canyon_core/src/query/bounds.rs @@ -0,0 +1,110 @@ +use crate::query::parameters::QueryParameter; +use crate::rows::FromSqlOwnedValue; + +/// Contract that provides a way to Canyon to inspect certain property or values at runtime. +/// +/// Typically, these will be used by the macros to gather some information or to create some user code +/// in more complex scenarios, like when insert an entity, when we need to know the value of the fields of +/// the current instance that we'd like to insert +pub trait Inspectionable<'a> { + type PrimaryKeyType: FromSqlOwnedValue; + + /// Returns an allocated linear collection with the current values of all the fields declared + /// for the implementor, as the result of the evaluation of the &self.#field expression, iteratively + /// over every type member, but if the type contains in some field the #[primary_key] annotation, + /// this will be skipped!! + /// + /// This is mostly because this operation now is only useful on the insert_entity family operations, + /// and is a fixed invariant in our logic nowadays. + /// + /// # Warning + /// This may change in the future, so that's why this operation shouldn't be used, nor it's + /// recommended to use it publicly as an end-user. + fn fields_actual_values(&self) -> Vec<&dyn QueryParameter>; + + /// Returns a linear collection with the names of every field for the implementor as a String + fn fields_names(&self) -> &[&'static str]; + fn fields_as_comma_sep_string(&self) -> &'static str; + + fn queries_placeholders(&self) -> &'static str; + + fn primary_key(&self) -> Option<&'static str>; + fn primary_key_st() -> Option<&'static str>; + fn primary_key_actual_value(&self) -> &'_ (dyn QueryParameter + '_); + fn set_primary_key_actual_value( + &mut self, + value: Self::PrimaryKeyType, + ) -> Result<(), Box>; +} + +pub trait TableMetadata: std::fmt::Display { + fn as_str(&self) -> &'static str; +} + +/// Created for retrieve the field's name of a field of a struct, giving +/// the Canyon's autogenerated enum with the variants that maps this +/// fields. +/// +/// ``` +/// pub struct Struct<'a> { +/// pub some_field: &'a str +/// } +/// +/// // Autogenerated enum +/// #[derive(Debug)] +/// #[allow(non_camel_case_types)] +/// pub enum StructField { +/// some_field +/// } +/// ``` +/// So, to retrieve the field's name, something like this w'd be used on some part +/// of the Canyon's Manager crate, to wire the necessary code to pass the field +/// name, retrieved from the enum variant, to a called. +/// +/// // Something like: +/// `let struct_field_name_from_variant = StructField::some_field.field_name_as_str();` +pub trait FieldIdentifier: std::fmt::Display { + fn as_str(&self) -> &'static str; + + /// Returns a formatted string as `{.}`. + /// + /// This is useful during queries generations for example, in join statements, when you + /// alias other defined names, etc. + fn table_and_column_name(&self) -> String; +} + +/// Represents some kind of introspection to make the implementors +/// able to retrieve a value inside some variant of an associated enum type. +/// and convert it to a tuple struct formed by the column name as an String, +/// and the dynamic value of the [`QueryParameter`] trait object contained +/// inside the variant requested, +/// enabling a conversion of that value into something +/// that can be part of an SQL query. +/// +/// +/// Ex: +/// `SELECT * FROM some_table WHERE id = 2` +/// +/// That '2' it's extracted from some enum that implements [`FieldValueIdentifier`], +/// where usually the variant w'd be something like: +/// +/// ``` +/// pub enum Enum { +/// IntVariant(i32) +/// } +/// ``` +pub trait FieldValueIdentifier { + fn value(&self) -> (&'static str, &dyn QueryParameter); +} + +/// Bounds to some type T in order to make it callable over some fn parameter T +/// +/// Represents the ability of an struct to be considered as candidate to perform +/// actions over it as it holds the 'parent' side of a foreign key relation. +/// +/// Usually, it's used on the Canyon macros to retrieve the column that +/// this side of the relation it's representing +pub trait ForeignKeyable { + /// Returns the actual value of the field related to the column passed in + fn get_fk_column(&self, column: &str) -> Option<&dyn QueryParameter>; +} diff --git a/canyon_core/src/query/mod.rs b/canyon_core/src/query/mod.rs new file mode 100644 index 00000000..332fbafb --- /dev/null +++ b/canyon_core/src/query/mod.rs @@ -0,0 +1,7 @@ +#![allow(clippy::module_inception)] +pub mod query; + +pub mod bounds; +pub mod operators; +pub mod parameters; +pub mod querybuilder; diff --git a/canyon_crud/src/query_elements/operators.rs b/canyon_core/src/query/operators.rs similarity index 63% rename from canyon_crud/src/query_elements/operators.rs rename to canyon_core/src/query/operators.rs index 015ced03..91c46e90 100644 --- a/canyon_crud/src/query_elements/operators.rs +++ b/canyon_core/src/query/operators.rs @@ -1,6 +1,7 @@ -use canyon_connection::canyon_database_connector::DatabaseType; +use crate::connection::database_type::DatabaseType; +use std::fmt::{Display, Formatter}; -pub trait Operator { +pub trait Operator: Display { fn as_str(&self, placeholder_counter: usize, datasource_type: &DatabaseType) -> String; } @@ -21,8 +22,22 @@ pub enum Comp { LtEq, } +impl Display for Comp { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let op = match *self { + Self::Eq => "=", + Self::Neq => "<>", + Self::Gt => ">", + Self::GtEq => ">=", + Self::Lt => "<", + Self::LtEq => "<=", + }; + write!(f, "{}", op) + } +} + impl Operator for Comp { - fn as_str(&self, placeholder_counter: usize, _datasource_type: &DatabaseType) -> String { + fn as_str(&self, placeholder_counter: usize, _with_type: &DatabaseType) -> String { match *self { Self::Eq => format!(" = ${placeholder_counter}"), Self::Neq => format!(" <> ${placeholder_counter}"), @@ -52,11 +67,14 @@ impl Operator for Like { DatabaseType::SqlServer => "VARCHAR", #[cfg(feature = "mysql")] DatabaseType::MySQL => "CHAR", + _ => panic!("Provisional LIKE"), }; match *self { Like::Full => { - format!(" LIKE CONCAT('%', CAST(${placeholder_counter} AS {type_data_to_cast_str}) ,'%')") + format!( + " LIKE CONCAT('%', CAST(${placeholder_counter} AS {type_data_to_cast_str}) ,'%')" + ) } Like::Left => format!( " LIKE CONCAT('%', CAST(${placeholder_counter} AS {type_data_to_cast_str}))" @@ -67,3 +85,17 @@ impl Operator for Like { } } } + +impl Display for Like { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{}", + match *self { + Like::Full => "Like::Full", + Like::Left => "Like::Left", + Like::Right => "Like::Right", + } + ) + } +} diff --git a/canyon_core/src/query/parameters.rs b/canyon_core/src/query/parameters.rs new file mode 100644 index 00000000..7094da3c --- /dev/null +++ b/canyon_core/src/query/parameters.rs @@ -0,0 +1,630 @@ +#[cfg(feature = "mysql")] +use mysql_async::{self, prelude::ToValue}; +use std::any::Any; +use std::fmt::Debug; +#[cfg(feature = "mssql")] +use tiberius::{self, ColumnData, IntoSql}; +#[cfg(feature = "postgres")] +use tokio_postgres::{self, types::ToSql}; + +// TODO: cfg feature for this re-exports, as date-time or something +use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; + +pub trait QueryParameterValue<'a> { + fn downcast_ref(&'a self) -> Option<&'a T>; + fn to_owned_any(&'a self) -> Box; +} +impl<'a> QueryParameterValue<'a> for dyn QueryParameter { + fn downcast_ref(&'a self) -> Option<&'a T> { + self.as_any().downcast_ref() + } + + fn to_owned_any(&'a self) -> Box { + Box::new(self.downcast_ref::().cloned().unwrap()) + } +} +impl<'a> QueryParameterValue<'a> for &'a dyn QueryParameter { + fn downcast_ref(&'a self) -> Option<&'a T> { + self.as_any().downcast_ref() + } + + fn to_owned_any(&self) -> Box { + todo!() + } +} + +// Define a zero-sized type to represent the absence of a primary key +// #[derive(Debug, Clone, Copy)] +// pub struct NoPrimaryKey; +// +// // Implement the QueryParameter trait for the zero-sized type +// impl QueryParameter for NoPrimaryKey { +// fn as_any(&'a self) -> &'a dyn Any { +// todo!() +// } +// +// fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { +// todo!() +// } +// +// fn as_sqlserver_param(&self) -> ColumnData<'_> { +// todo!() +// } +// +// fn as_mysql_param(&self) -> &dyn ToValue { +// todo!() +// } +// } +// + +/// Defines a trait for represent type bounds against the allowed +/// data types supported by Canyon to be used as query parameters. +pub trait QueryParameter: Debug + Send + Sync { + fn as_any(&self) -> &dyn Any; + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync); + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_>; + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue; +} + +/// The implementation of the [`crate::connection::tiberius`] [`IntoSql`] for the +/// query parameters. +/// +/// This implementation is necessary because of the generic amplitude +/// of the arguments of the [`crate::transaction::Transaction::query`], that should work with +/// a collection of [`QueryParameter`], in order to allow a workflow +/// that is not dependent of the specific type of the argument that holds +/// the query parameters of the database connectors +#[cfg(feature = "mssql")] +impl<'b> IntoSql<'b> for &'b dyn QueryParameter { + fn into_sql(self) -> ColumnData<'b> { + self.as_sqlserver_param() + } +} + +//TODO Pending to review and see if it is necessary to apply something similar to the previous implementation. + +impl QueryParameter for bool { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::Bit(Some(*self)) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for i16 { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::I16(Option::from(*self)) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option<&'static i16> { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::I16(Some(*self.unwrap())) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for i32 { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::I32(Some(*self)) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::I32(*self) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for u32 { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + panic!("Unsupported sqlserver parameter type "); + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + panic!("Unsupported sqlserver parameter type "); + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for f32 { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::F32(Some(*self)) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::F32(*self) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for f64 { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::F64(Some(*self)) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::F64(*self) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for i64 { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::I64(Some(*self)) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::I64(*self) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for String { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::String(Some(std::borrow::Cow::Owned(self.to_owned()))) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + match self { + Some(string) => ColumnData::String(Some(std::borrow::Cow::Owned(string.to_owned()))), + None => ColumnData::String(None), + } + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option<&'static String> { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + match self { + Some(string) => ColumnData::String(Some(std::borrow::Cow::Borrowed(string))), + None => ColumnData::String(None), + } + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for &'static str { + fn as_any(&self) -> &dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + ColumnData::String(Some(std::borrow::Cow::Borrowed(self))) + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option<&'static str> { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + match *self { + Some(str) => ColumnData::String(Some(std::borrow::Cow::Borrowed(str))), + None => ColumnData::String(None), + } + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { + self + } +} + +impl QueryParameter for NaiveDate { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for NaiveTime { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +impl QueryParameter for NaiveDateTime { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { + self + } +} + +impl QueryParameter for Option { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + self + } +} + +//TODO pending +impl QueryParameter for DateTime { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + todo!() + } +} + +impl QueryParameter for Option> { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + todo!() + } +} + +impl QueryParameter for DateTime { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + todo!() + } +} + +impl QueryParameter for Option> { + fn as_any(&'_ self) -> &'_ dyn Any { + self + } + + #[cfg(feature = "postgres")] + fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { + self + } + #[cfg(feature = "mssql")] + fn as_sqlserver_param(&self) -> ColumnData<'_> { + self.into_sql() + } + #[cfg(feature = "mysql")] + fn as_mysql_param(&self) -> &dyn ToValue { + todo!() + } +} diff --git a/canyon_core/src/query/query.rs b/canyon_core/src/query/query.rs new file mode 100644 index 00000000..85e28c9c --- /dev/null +++ b/canyon_core/src/query/query.rs @@ -0,0 +1,58 @@ +use crate::canyon::Canyon; +use crate::connection::contracts::DbConnection; +use crate::mapper::RowMapper; +use crate::query::parameters::QueryParameter; +use crate::transaction::Transaction; +use std::error::Error; +use std::fmt::Debug; + +// TODO: query should implement ToStatement (as the drivers underneath Canyon) or similar +// to be usable directly in the input of Transaction and DbConnenction +/// Holds a sql sentence details +/// +/// Plan: The MacroTokens struct gets some generic bounds to retrieve the fields names at compile +/// time (already does it) and the querybuilder uses it with const_format to introduce the names of the +/// columns instead of just using * (in this case, is the same, unless we introduce new annotations like #[skip_mapping] +#[derive(Debug)] +pub struct Query<'a> { + pub sql: String, + pub params: Vec<&'a dyn QueryParameter>, +} + +impl AsRef for Query<'_> { + fn as_ref(&self) -> &str { + self.sql.as_str() + } +} + +impl<'a> Query<'a> { + /// Constructs a new [`Self`] but receiving the number of expected query parameters, allowing + /// to pre-allocate the underlying linear collection that holds the arguments to the exact capacity, + /// potentially saving re-allocations when the query is created + pub fn new(sql: String, params: Vec<&'a dyn QueryParameter>) -> Query<'a> { + Self { sql, params } + } + + /// Launches the generated query against the database assuming the default + /// [`DbConnection`] + pub async fn launch_default( + self, + ) -> Result, Box> + where + Vec: FromIterator<::Output>, + { + let default_conn = Canyon::instance()?.get_default_connection()?; + ::query(&self.sql, &self.params, default_conn).await + } + + /// Launches the generated query against the database with the selected [`DbConnection`] + pub async fn launch_with( + self, + input: I, + ) -> Result, Box> + where + Vec: FromIterator<::Output>, + { + input.query(&self.sql, &self.params).await + } +} diff --git a/canyon_core/src/query/querybuilder/contracts/mod.rs b/canyon_core/src/query/querybuilder/contracts/mod.rs new file mode 100644 index 00000000..8f86c83b --- /dev/null +++ b/canyon_core/src/query/querybuilder/contracts/mod.rs @@ -0,0 +1,202 @@ +//! Contains the elements that makes part of the formal declaration +//! of the behaviour of the Canyon-SQL QueryBuilder + +use std::error::Error; +use crate::query::bounds::{FieldIdentifier, FieldValueIdentifier, TableMetadata}; +use crate::query::operators::Comp; +use crate::query::parameters::QueryParameter; + +pub trait DeleteQueryBuilderOps<'a>: QueryBuilderOps<'a> {} + +pub trait UpdateQueryBuilderOps<'a>: QueryBuilderOps<'a> { + /// Creates an SQL `SET` clause by specifying the columns that must be updated in the sentence, + /// but without adding any [`QueryParameter`] value to the internal querybuilder + fn set(self, columns: &'a [String]) -> Result> + where Self: Sized; + + /// Similar to [`Self::set`] but storing the underlying update values for each column in the + /// internal values collection of the [`crate::query::querybuilder::QueryBuilder`] + fn set_with_values(self, columns: &'a [(Z, Q)]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Self: Sized, + Vec<&'a dyn QueryParameter>: Extend<&'a Q>; +} + +pub trait SelectQueryBuilderOps<'a>: QueryBuilderOps<'a> { + /// Adds the column names that must be added to the query in order to retrieve the correct mapped fields + /// If this method isn't invoked, the querybuilder will create a SELECT * FROM query + fn with_columns(self, columns: &'a [String]) -> Self; + + /// Adds a *LEFT JOIN* SQL statement to the underlying + /// `Sql Statement` held by the [`QueryBuilder`], where: + /// + /// * `join_table` - The table target of the join operation + /// * `col1` - The left side of the ON operator for the join + /// * `col2` - The right side of the ON operator for the join + /// + /// > Note: The order on the column parameters is irrelevant + fn left_join( + self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, + col2: impl FieldIdentifier, + ) -> Self; + + /// Adds a *INNER JOIN* SQL statement to the underlying + /// `Sql Statement` held by the [`QueryBuilder`], where: + /// + /// * `join_table` - The table target of the join operation + /// * `col1` - The left side of the ON operator for the join + /// * `col2` - The right side of the ON operator for the join + /// + /// > Note: The order on the column parameters is irrelevant + fn inner_join( + self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, + col2: impl FieldIdentifier, + ) -> Self; + + /// Adds a *RIGHT JOIN* SQL statement to the underlying + /// `Sql Statement` held by the [`QueryBuilder`], where: + /// + /// * `join_table` - The table target of the join operation + /// * `col1` - The left side of the ON operator for the join + /// * `col2` - The right side of the ON operator for the join + /// + /// > Note: The order on the column parameters is irrelevant + fn right_join( + self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, + col2: impl FieldIdentifier, + ) -> Self; + + /// Adds a *FULL JOIN* SQL statement to the underlying + /// `Sql Statement` held by the [`QueryBuilder`], where: + /// + /// * `join_table` - The table target of the join operation + /// * `col1` - The left side of the ON operator for the join + /// * `col2` - The right side of the ON operator for the join + /// + /// > Note: The order on the column parameters is irrelevant + fn full_join( + self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, + col2: impl FieldIdentifier, + ) -> Self; +} + +/// The [`QueryBuilder`] trait is the root of a kind of hierarchy +/// on more specific [`super::QueryBuilder`], that are: +/// +/// * [`super::SelectQueryBuilder`] +/// * [`super::UpdateQueryBuilder`] +/// * [`super::DeleteQueryBuilder`] +/// +/// This trait provides the formal declaration of the behaviour that the +/// implementors must provide in their public interfaces, grouping +/// the common elements between every element down in that +/// hierarchy. +/// +/// For example, the [`super::QueryBuilder`] type holds the data +/// necessary for track the SQL sentence while it's being generated +/// thought the fluent builder, and provides the behaviour of +/// the common elements defined in this trait. +/// +/// The more concrete types represents a wrapper over a raw +/// [`super::QueryBuilder`], offering all the elements declared +/// in this trait in its public interface, and which implementation +/// only consists of call the same method on the wrapped +/// [`super::QueryBuilder`]. +/// +/// This allows us to declare in their public interface their +/// specific operations, like, for example, join operations +/// on the [`super::SelectQueryBuilder`], and the usage +/// of the `SET` clause on a [`super::UpdateQueryBuilder`], +/// without mixing types or polluting everything into +/// just one type. +pub trait QueryBuilderOps<'a> { + /// Returns a read-only reference to the underlying SQL sentence, + /// with the same lifetime as self + fn read_sql(&'a self) -> &'a str; + + /// Public interface for append the content of a slice to the end of + /// the underlying SQL sentence. + /// + /// This mutator will allow the user to wire SQL code to the already + /// generated one + /// + /// * `sql` - The [`&str`] to be wired in the SQL + fn push_sql(self, sql: &str); + + /// Generates a `WHERE` SQL clause for constraint the query. + /// + /// * `column` - An [`&str`] that will provide the target column name + /// * `op` - Any element that implements [`Operator`] for create the comparison + /// or equality binary operator + /// * `value` - Any implementor of [`QueryParameter] that will be the value to filter + fn r#where(self, column: &'a str, op: Comp, value: &'a dyn QueryParameter) -> Self; + + /// Generates a `WHERE` SQL clause for constraint the query. + /// + /// * `column` - A [`FieldValueIdentifier`] that will provide the target + /// column name and the value for the filter + /// * `op` - Any element that implements [`Operator`] for create the comparison + /// or equality binary operator + fn where_value(self, column: &'a Z, op: Comp) -> Self; + + /// Generates an `AND` SQL clause for constraint the query. + /// + /// * `column` - A [`FieldValueIdentifier`] that will provide the target + /// column name and the value for the filter + /// * `op` - Any element that implements [`Operator`] for create the comparison + /// or equality binary operator + fn and(self, column: &'a Z, op: Comp) -> Self; + + /// Generates an `AND` SQL clause for constraint the query that's being constructed + /// + /// * `column` - A [`FieldIdentifier`] that will provide the target + /// column name for the filter, based on the variant that represents + /// the field name that maps the targeted column name + /// * `values` - An array of [`QueryParameter`] with the values to filter + /// inside the `IN` operator + fn and_values_in<'b, Z, Q>(self, column: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + Self: std::marker::Sized; + + /// Generates an `OR` SQL clause for constraint the query that will create + /// the filter in conjunction with an `IN` operator that will ac + /// + /// * `column` - A [`FieldIdentifier`] that will provide the target + /// column name for the filter, based on the variant that represents + /// the field name that maps the targeted column name + /// * `values` - An array of [`QueryParameter`] with the values to filter + /// inside the `IN` operator + fn or_values_in<'b, Z, Q>(self, r#or: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + Self: std::marker::Sized; + + /// Generates an `OR` SQL clause for constraint the query. + /// + /// * `column` - A [`FieldValueIdentifier`] that will provide the target + /// column name and the value for the filter + /// * `op` - Any element that implements [`Operator`] for create the comparison + /// or equality binary operator + fn or(self, column: &'a Z, op: Comp) -> Self; + + /// Generates a `ORDER BY` SQL clause for constraint the query. + /// + /// * `order_by` - A [`FieldIdentifier`] that will provide the target column name + /// * `desc` - a boolean indicating if the generated `ORDER_BY` must be in ascending or descending order + fn order_by(self, order_by: Z, desc: bool) -> Self; +} diff --git a/canyon_core/src/query/querybuilder/mod.rs b/canyon_core/src/query/querybuilder/mod.rs new file mode 100644 index 00000000..6484710f --- /dev/null +++ b/canyon_core/src/query/querybuilder/mod.rs @@ -0,0 +1,5 @@ +pub mod contracts; +pub mod types; +pub mod syntax; + +pub use self::{contracts::*, types::*}; diff --git a/canyon_core/src/query/querybuilder/syntax/clause.rs b/canyon_core/src/query/querybuilder/syntax/clause.rs new file mode 100644 index 00000000..30d42204 --- /dev/null +++ b/canyon_core/src/query/querybuilder/syntax/clause.rs @@ -0,0 +1,30 @@ +use crate::query::operators::Comp; +use crate::query::parameters::QueryParameter; +use crate::query::querybuilder::syntax::tokens::SqlToken; + +pub struct ConditionClause<'a> { + // TODO: where are missing complex where usages, like in joins, so we should consider to add the table + // to the column like where table.column = ... + pub(crate) kind: ConditionClauseKind, + pub(crate) column_name: SqlToken<'a>, + pub(crate) operator: Comp, + pub(crate) value: &'a dyn QueryParameter +} +#[derive(Eq, PartialEq)] +pub enum ConditionClauseKind { + Where, + And, + Or, + In // TODO: should this one be a Comp instead? +} + +impl<'a> AsRef for ConditionClauseKind { + fn as_ref(&self) -> &str { + match self { + ConditionClauseKind::Where => "WHERE", + ConditionClauseKind::And => "AND", + ConditionClauseKind::In => "IN", + ConditionClauseKind::Or => "OR", + } + } +} \ No newline at end of file diff --git a/canyon_core/src/query/querybuilder/syntax/mod.rs b/canyon_core/src/query/querybuilder/syntax/mod.rs new file mode 100644 index 00000000..c063b12d --- /dev/null +++ b/canyon_core/src/query/querybuilder/syntax/mod.rs @@ -0,0 +1,4 @@ +pub(crate) mod query_kind; +pub(crate) mod table_metadata; +pub(crate) mod tokens; +pub(crate) mod clause; diff --git a/canyon_core/src/query/querybuilder/syntax/query_kind.rs b/canyon_core/src/query/querybuilder/syntax/query_kind.rs new file mode 100644 index 00000000..381a00d0 --- /dev/null +++ b/canyon_core/src/query/querybuilder/syntax/query_kind.rs @@ -0,0 +1,27 @@ +use std::borrow::Cow; +use crate::query::querybuilder::syntax::tokens::{SqlToken, ToSqlTokens}; + +pub enum QueryKind { + Select, + Update, + Delete, +} + +impl<'a> ToSqlTokens<'a> for QueryKind { + fn to_tokens(&self) -> SqlToken<'a> { + match self { + QueryKind::Select => SqlToken::Keyword(Cow::from("SELECT")), + QueryKind::Update => SqlToken::Keyword(Cow::from("UPDATE")), + QueryKind::Delete => SqlToken::Keyword(Cow::from("DELETE")), + } + } +} +impl AsRef for QueryKind { + fn as_ref(&self) -> &str { + match self { + QueryKind::Select => { "SELECT" } + QueryKind::Update => { "UPDATE " } + QueryKind::Delete => { "DELETE " } + } + } +} diff --git a/canyon_core/src/query/querybuilder/syntax/table_metadata.rs b/canyon_core/src/query/querybuilder/syntax/table_metadata.rs new file mode 100644 index 00000000..a4bbc58e --- /dev/null +++ b/canyon_core/src/query/querybuilder/syntax/table_metadata.rs @@ -0,0 +1,79 @@ +use std::fmt::{Display, Formatter}; +use crate::query::querybuilder::syntax::tokens::{SqlToken, ToSqlTokens}; + +#[derive(Clone, Default, Debug)] +pub struct TableMetadata { + pub schema: Option, + pub name: String, +} // TODO: we can have those fields as Cow<'_> for max performance + +impl<'a> ToSqlTokens<'a> for TableMetadata { + fn to_tokens(&self) -> SqlToken<'a> { + match &self.schema { + Some(s) => { + out.push(SqlToken::Ident(s)); + out.push(SqlToken::Symbol('.')); + out.push(SqlToken::Ident(&self.name)); + } + None => { + out.push(SqlToken::Ident(&self.name)); + } + } + } +} +impl From<&str> for TableMetadata { + /// Creates a new [`TableMetadata`] from a string slice. + /// + /// If the slice contains a dot, we assume that is a schema.table_name format, otherwise, + /// we assume that the client is just creating a [`Self`] from the passed in string + fn from(value: &str) -> Self { + if let Some((schema, table)) = value.split_once('.') { + TableMetadata { + schema: Some(schema.to_string()), + name: table.to_string(), + } + } else { + TableMetadata { + schema: None, + name: value.to_string(), + } + } + } +} + + +impl<'a> TableMetadata { + pub fn new(schema: &'a str, name: &'a str) -> Self { + Self { schema: Some(schema.to_string()), name: name.to_string() } + } + pub fn schema(&mut self, schema: String) { self.schema = Some(schema); } + pub fn table_name(&mut self, table_name: String) { self.name = table_name } + + /// Returns an already formatted version of the schema and table of a target database table + /// ready to be used in a SQL statement. + /// + /// This method allocates a new string, so it returns an owned one to the callee. + /// Just take it in consideration if someday someone uses it outside the macro generation + /// and there's some heavy callee procedure + pub fn sql(&self) -> String { + match &self.schema { + Some(schema_name) => {format!("{}.{}", schema_name, self.name)} + None => self.name.to_string() + } + } +} + +impl Display for TableMetadata { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + match &self.schema { + Some(schema_name) => {write!(f, "{}.{}", schema_name, self.name)} + None => {write!(f, "{}", self.name)} + } + } +} + +impl AsRef for TableMetadata { + fn as_ref(&self) -> &str { + self.schema.as_ref().unwrap() + } +} diff --git a/canyon_core/src/query/querybuilder/syntax/tokens.rs b/canyon_core/src/query/querybuilder/syntax/tokens.rs new file mode 100644 index 00000000..115e93b7 --- /dev/null +++ b/canyon_core/src/query/querybuilder/syntax/tokens.rs @@ -0,0 +1,49 @@ +use std::borrow::Cow; +use std::fmt::Write; +use crate::connection::database_type::DatabaseType; + +pub trait ToSqlTokens<'a> { + fn to_tokens(&self) -> SqlToken<'a>; +} + +pub enum SqlToken<'a> { + Keyword(Cow<'a, str>), // SELECT, WHERE, AND, OR, FROM, UPDATE, DELETE + WhiteSpace, + Ident(Cow<'a, str>), // table, column + Symbol(char), // = , ( ) , . + Placeholder(usize), // $1, ? , @P1 +} + +pub struct TokenWriter<'a> { + pub tokens: Vec>, +} + +impl<'a> TokenWriter<'a> { + pub fn new() -> Self { + Self { tokens: Vec::new() } + } + + pub fn push(&mut self, token: SqlToken<'a>) { + self.tokens.push(token); + } + + pub fn render(self, db: DatabaseType) -> String { + let mut out = String::new(); + + for t in self.tokens { + match t { + SqlToken::Keyword(k) => write!(out, " {}", k).unwrap(), + SqlToken::Ident(i) => write!(out, " {}", i).unwrap(), + SqlToken::Symbol(c) => write!(out, " {}", c).unwrap(), + SqlToken::Placeholder(p) => match db { // TODO: invent something like placeholder kind, so we can avoid to match it on every pplaceholder? + DatabaseType::PostgreSql => write!(out, " ${}", p).unwrap(), + DatabaseType::SqlServer => write!(out, " @P{}", p).unwrap(), + DatabaseType::MySQL => write!(out, " ?").unwrap(), + DatabaseType::Deferred => write!(out, " ?").unwrap(), + }, + } + } + + out.trim_start().to_string() + } +} \ No newline at end of file diff --git a/canyon_core/src/query/querybuilder/types/delete.rs b/canyon_core/src/query/querybuilder/types/delete.rs new file mode 100644 index 00000000..0de2120a --- /dev/null +++ b/canyon_core/src/query/querybuilder/types/delete.rs @@ -0,0 +1,106 @@ +use crate::connection::database_type::DatabaseType; +use crate::query::bounds::{FieldIdentifier, FieldValueIdentifier}; +use crate::query::operators::Comp; +use crate::query::parameters::QueryParameter; +use crate::query::query::Query; +use crate::query::querybuilder::types::TableMetadata; +use crate::query::querybuilder::{DeleteQueryBuilderOps, QueryBuilder, QueryBuilderOps}; +use crate::query::querybuilder::syntax::query_kind::QueryKind; +use std::error::Error; + +/// Contains the specific database operations associated with the +/// *DELETE* SQL statements. +/// +/// * `set` - To construct a new `SET` clause to determine the columns to +/// update with the provided values +pub struct DeleteQueryBuilder<'a> { + pub(crate) _inner: QueryBuilder<'a>, +} + +impl<'a> DeleteQueryBuilder<'a> { + /// Generates a new public instance of the [`DeleteQueryBuilder`] + pub fn new( + table_schema_data: impl Into, + ) -> Result> { + Self::new_for(table_schema_data, DatabaseType::Deferred) + } + + pub fn new_for( + table_schema_data: impl Into, + database_type: DatabaseType, + ) -> Result> { + Ok(Self { + _inner: QueryBuilder::new(table_schema_data, QueryKind::Delete, database_type)?, + }) + } + + pub fn build(self) -> Result, Box> { + self._inner.build() + } +} + +impl<'a> DeleteQueryBuilderOps<'a> for DeleteQueryBuilder<'a> {} // NOTE: for now, this is just a type formalism + +impl<'a> QueryBuilderOps<'a> for DeleteQueryBuilder<'a> { + #[inline] + fn read_sql(&'a self) -> &'a str { + self._inner.sql.as_str() + } + + #[inline(always)] + fn push_sql(mut self, sql: &str) { + self._inner.sql.push_str(sql); + } + + #[inline] + fn r#where(mut self, column_name: &'a str, operator: Comp, value: &'a dyn QueryParameter) -> Self { + self._inner.r#where(column_name, operator, value); + self + } + + #[inline] + fn where_value(mut self, r#where: &'a Z, op: Comp) -> Self { + self._inner.where_value(r#where, op); + self + } + + #[inline] + fn and(mut self, column: &'a Z, op: Comp) -> Self { + self._inner.and(column, op); + self + } + + #[inline] + fn and_values_in<'b, Z, Q>(mut self, r#and: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + { + self._inner.and_values_in(and, values)?; + Ok(self) + } + + #[inline] + fn or_values_in<'b, Z, Q>(mut self, r#or: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + { + self._inner.or_values_in(or, values)?; + Ok(self) + } + + #[inline] + fn or(mut self, column: &'a Z, op: Comp) -> Self { + self._inner.or(column, op); + self + } + + #[inline] + fn order_by(mut self, order_by: Z, desc: bool) -> Self { + self._inner.order_by(order_by, desc); + self + } +} diff --git a/canyon_core/src/query/querybuilder/types/mod.rs b/canyon_core/src/query/querybuilder/types/mod.rs new file mode 100644 index 00000000..bebf078e --- /dev/null +++ b/canyon_core/src/query/querybuilder/types/mod.rs @@ -0,0 +1,324 @@ +pub mod delete; +pub mod select; +pub mod update; + +use crate::query::querybuilder::syntax::table_metadata::TableMetadata; +pub use self::{delete::*, select::*, update::*}; +use crate::connection::database_type::DatabaseType; +use crate::query::bounds::{FieldIdentifier, FieldValueIdentifier}; +use crate::query::operators::Comp; +use crate::query::parameters::QueryParameter; +use crate::query::query::Query; +use std::error::Error; +use std::fmt::Write; +use crate::query::querybuilder::syntax::clause::{ConditionClause, ConditionClauseKind}; +use crate::query::querybuilder::syntax::query_kind::QueryKind; +use crate::query::querybuilder::syntax::tokens::SqlToken; + +/// Type for construct more complex queries than the classical CRUD ones. +pub struct QueryBuilder<'a> { + pub(crate) meta: TableMetadata, + pub(crate) kind: QueryKind, + pub(crate) params: Vec<&'a dyn QueryParameter>, + pub(crate) database_type: DatabaseType, + pub(crate) condition_clauses: Vec>, +} + +unsafe impl Send for QueryBuilder<'_> {} +unsafe impl Sync for QueryBuilder<'_> {} + +impl<'a> QueryBuilder<'a> { + pub fn new( + table_metadata: impl Into, + kind: QueryKind, + database_type: DatabaseType, + ) -> Result> + { + Ok(Self { + meta: table_metadata.into(), + kind, + sql: String::new(), + params: Vec::new(), + database_type, + condition_clauses: Vec::new(), + }) + } + + /// Convenient SQL writer that starts all the appended SQL sentences by adding an initial empty + /// whitespace + pub fn push_sql(&mut self, part: &str) -> Result<(), Box> { + write!(self.sql, " {}", part)?; + Ok(()) + } + + /// Same as [Self::push_sql] but for adding char values to the underlying buffer + pub fn push_sql_char(&mut self, part: char) -> Result<(), Box> { + write!(self.sql, " {}", part)?; + Ok(()) + } + + // pub fn build(mut self) -> Result, Box> { + // self.sql.push_str(self.kind.as_ref()); + // + // let __self = __impl::check_invariants_over_condition_clauses(self)?; + // let mut __self = __impl::write_from_clause(__self)?; + // + // __self.sql.push(';'); + // Ok(Query::new(__self.sql, __self.params)) + // } + + pub fn build(self) -> Result, Box> { + let qb = __impl::check_invariants_over_condition_clauses(self)?; + let mut tokens = Vec::::new(); + + __impl::emit_kind(&qb, &mut tokens); + __impl::emit_from(&qb, &mut tokens); + __impl::emit_conditions(&qb, &mut tokens); + + tokens.push(SqlToken::Symbol(';')); + + let sql = SqlToken::render_all(&tokens); + Ok(Query::new(sql, qb.params)) + } + + fn r#where(&mut self, column_name: &'a str, operator: Comp, value: &'a dyn QueryParameter) { + __impl::create_condition_clause(self, ConditionClauseKind::Where, column_name, operator, value); + } + + pub fn where_value(&mut self, r#where: &'a Z, operator: Comp) { + let (column_name, value) = r#where.value(); + self.params.push(value); + __impl::create_condition_clause(self, ConditionClauseKind::Where, column_name, operator, value); + } + + pub fn and(&mut self, r#and: &'a Z, operator: Comp) { + let (column_name, value) = r#and.value(); + self.params.push(value); + __impl::create_condition_clause(self, ConditionClauseKind::And, column_name, operator, value); + } + + pub fn and_values_in<'b, Z, Q>(&mut self, field: Z, values: &'a [Q]) + -> Result<(), Box> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q> + { + __impl::generate_values_in_for_and_or_or_clause(self, ConditionClauseKind::And, field, values)?; + Ok(()) + } + + pub fn or_values_in<'b, Z, Q>(&mut self, r#or: Z, values: &'a [Q]) + -> Result<(), Box> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q> + { + __impl::generate_values_in_for_and_or_or_clause(self, ConditionClauseKind::Or, r#or, values)?; + Ok(()) + } + + pub fn or(&mut self, r#or: &'a Z, operator: Comp) { + let (column_name, value) = r#or.value(); + self.params.push(value); + __impl::create_condition_clause(self, ConditionClauseKind::And, column_name, operator, value); + } + + #[inline] + pub fn order_by(&mut self, order_by: Z, desc: bool) { + self.sql.push_str( + &(format!( + " ORDER BY {}{}", + order_by.as_str(), + if desc { " DESC " } else { "" } + )), + ); + } + +} + + +mod __impl { + use std::borrow::Cow; + use std::error::Error; + use crate::query::querybuilder::types::__detail::write_param_placeholder; + use crate::query::querybuilder::QueryBuilder; + use std::fmt::Write; + use crate::query::bounds::FieldIdentifier; + use crate::query::operators::Comp; + use crate::query::parameters::QueryParameter; + use crate::query::querybuilder::syntax::clause::{ConditionClause, ConditionClauseKind}; + use crate::query::querybuilder::syntax::query_kind::QueryKind; + use crate::query::querybuilder::syntax::tokens::{SqlToken, ToSqlTokens}; + use crate::query::querybuilder::types::__validators; + + pub(crate) fn emit_kind<'a>(qb: &'a QueryBuilder<'a>, out: &mut Vec>) { + out.push(qb.kind.to_tokens()); + } + + pub(crate) fn emit_from<'a>(qb: &QueryBuilder<'a>, out: &mut Vec>) { + match qb.kind { + QueryKind::Select => { + out.push(SqlToken::Symbol('*')); + out.push(SqlToken::Keyword(Cow::from("FROM"))); + qb.meta.to_tokens(out); + } + QueryKind::Delete => { + out.push(SqlToken::Keyword(std::borrow::Cow::Borrowed("FROM"))); + qb.meta.to_tokens(out); + } + QueryKind::Update => { + qb.meta.to_tokens(out); + } + } + } + + pub(crate) fn emit_conditions<'a>( + qb: &QueryBuilder<'a>, + out: &mut Vec> + ) { + for (i, c) in qb.condition_clauses.iter().enumerate() { + c.to_tokens(out); + out.push(SqlToken::Placeholder(i + 1)); + } + } + + pub(crate) fn write_from_clause<'a>(mut _self: QueryBuilder<'a>) -> Result, Box> { + if let Some(where_clause) = &_self.condition_clauses.first() { + write!(_self.sql, + " WHERE {} {}", + where_clause.column_name, + where_clause.operator + )?; + write_param_placeholder(_self.database_type, &mut _self.sql, _self.params.iter())?; + } + Ok(_self) + } + + + pub(crate) fn generate_values_in_for_and_or_or_clause<'a, Z, Q>( + _self: &mut QueryBuilder<'a>, + conjunction_clause_kind: ConditionClauseKind, + field: Z, + values: &'a [Q] + ) -> Result<(), Box> + where + Q: QueryParameter, + Z: FieldIdentifier, + Vec<&'a dyn QueryParameter>: Extend<&'a Q> + { + let target_column = field.as_str(); + __validators::check_not_empty_in_clause_values(&_self.meta, target_column, values)?; + + _self.sql.push_str(" "); + _self.sql.push_str(conjunction_clause_kind.as_ref()); + _self.sql.push_str(" "); + _self.sql.push_str(target_column); + _self.sql.push_str(" IN "); // TODO: was for reference, this is wrong + _self.sql.push_str(ConditionClauseKind::In.as_ref()); + _self.sql.push_str(" ("); + + let start = _self.params.len(); + let placeholders = (0..values.len()) + .map(|i| format!("${}", start + i + 1)) + .collect::>() + .join(", "); + + _self.sql.push_str(&placeholders); + _self.sql.push(')'); + + _self.params.extend(values); + + Ok(()) + } + + /// Quick standalone that acts as a façade for an orchestrator that just organizes a procedural way of testing + /// that the constructed underlying query is syntactically correct + pub(crate) fn check_invariants_over_condition_clauses<'a>(_self: QueryBuilder<'a>) -> Result, Box> { + let _self = super::__validators::check_where_clause_position(_self)?; + Ok(_self) + } + + pub(crate) fn create_condition_clause<'a>(_self: &mut QueryBuilder<'a>, kind: ConditionClauseKind, column_name: &'a str, operator: Comp, value: &'a dyn QueryParameter) { + _self.condition_clauses.push( + ConditionClause { + kind, + column_name, + operator, + value + }); + } +} + + +mod __detail { + use crate::connection::database_type::DatabaseType; + use std::error::Error; + use std::fmt::Write; + + /// Convenient standalone that helps us to interpolate the placeholder of the parameters of a SQL + /// query directly into the passed in buffer, avoiding the need to construct and allocate temporary strings + /// for such purpose + pub(crate) fn write_param_placeholder(db_type: DatabaseType, buffer: &mut String, params: impl Iterator) -> Result<(), Box> { + Ok(match db_type{ + DatabaseType::PostgreSql => write!(buffer, "${}", calculate_param_placeholder_count_value(params)), + DatabaseType::SqlServer => write!(buffer, "@P{}", calculate_param_placeholder_count_value(params)), + DatabaseType::MySQL => write!(buffer, "?"), + _ => panic!("Provisional (placeholder)"), + }?) + } + + fn calculate_param_placeholder_count_value(container: impl Iterator) -> usize{ + container.count() + } +} + +mod __validators { + use std::error::Error; + use std::fmt::Display; + use crate::query::parameters::QueryParameter; + use crate::query::querybuilder::QueryBuilder; + use crate::query::querybuilder::syntax::clause::ConditionClauseKind; + use crate::query::querybuilder::types::__errors; + + pub(crate) fn check_where_clause_position<'a>(_self: QueryBuilder<'a>) -> Result< QueryBuilder<'a>, Box> { + if let Some(condition_clause) = &_self.condition_clauses.first() { + if condition_clause.kind.ne(&ConditionClauseKind::Where) { // TODO: decide if we just re-organize the condition clauses + return __errors::where_clause_position() + } + } + Ok(_self) + } + + pub(crate) fn check_not_empty_in_clause_values<'a, 'b, Q>(table_metadata: impl Display, column: &'a str, values: &'a [Q]) + -> Result<(), Box> + where + Q: QueryParameter { + if values.is_empty() { + return __errors::empty_in_clause(table_metadata, column); + } + Ok(()) + } +} + +mod __errors { + use std::error::Error; + use std::fmt::Display; + use std::io::ErrorKind; + + use crate::query::querybuilder::QueryBuilder; + + + pub(crate) fn where_clause_position<'a>() -> Result, Box> { + return Err(std::io::Error::new( // TODO: CanyonError + ErrorKind::Unsupported, + "Where clauses should be the first condition clause on a SQL sentence").into()) + } + + pub(crate) fn empty_in_clause<'a, 'b>(table_metadata: impl Display, column: &'a str) -> Result<(), Box> { + return Err(std::io::Error::new( // TODO: CanyonError + ErrorKind::Unsupported, + format!("An IN clause has been added with empty values for {table_metadata} on the column: {column}", )).into()) + } +} diff --git a/canyon_core/src/query/querybuilder/types/select.rs b/canyon_core/src/query/querybuilder/types/select.rs new file mode 100644 index 00000000..55e6dd80 --- /dev/null +++ b/canyon_core/src/query/querybuilder/types/select.rs @@ -0,0 +1,203 @@ +use crate::connection::database_type::DatabaseType; +use crate::query::bounds::TableMetadata; +use crate::query::bounds::{FieldIdentifier, FieldValueIdentifier}; +use crate::query::operators::Comp; +use crate::query::parameters::QueryParameter; +use crate::query::query::Query; +use crate::query::querybuilder::types::TableMetadata as TableSchemaData; +use crate::query::querybuilder::{QueryBuilder, QueryBuilderOps, SelectQueryBuilderOps}; +use std::error::Error; +use crate::query::querybuilder::syntax::query_kind::QueryKind; + +pub struct SelectQueryBuilder<'a> { + pub(crate) _inner: QueryBuilder<'a>, + pub(crate) columns: &'a [String], +} + +impl<'a> SelectQueryBuilder<'a> { + /// The constructor for creating [`QueryBuilder`] instances of type: SELECT + pub fn new( + table_schema_data: impl Into, + ) -> Result> + { + SelectQueryBuilder::new_for(table_schema_data, DatabaseType::Deferred) + } + + /// Same as [`SelectQueryBuilder::new`] but specifying the [`DatabaseType`] + pub fn new_for( + table_schema_data: impl Into, + database_type: DatabaseType, + ) -> Result> + { + Ok(Self { + _inner: QueryBuilder::new(table_schema_data, QueryKind::Select, database_type)?, + columns: &[], + }) + } + + pub fn build(self) -> Result, Box> { + let __self = __impl::write_columns_or_select_all(self)?; + let __self = __impl::write_from_clause(__self)?; + __self._inner.build() + } +} + +impl<'a> SelectQueryBuilderOps<'a> for SelectQueryBuilder<'a> { + fn with_columns(mut self, columns: &'a [String]) -> Self { + self.columns = columns; + self + } + + fn left_join( + mut self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, // TODO: t_col, not only col + col2: impl FieldIdentifier, + ) -> Self { + self._inner.sql.push_str(&format!( + " LEFT JOIN {join_table} ON {} = {}", // TODO: this should be avoided + col1.table_and_column_name(), + col2.table_and_column_name() + )); + self + } + + fn inner_join( + mut self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, + col2: impl FieldIdentifier, + ) -> Self { + self._inner.sql.push_str(&format!( + " INNER JOIN {join_table} ON {} = {}", + col1.table_and_column_name(), + col2.table_and_column_name() + )); + self + } + + fn right_join( + mut self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, + col2: impl FieldIdentifier, + ) -> Self { + self._inner.sql.push_str(&format!( + " RIGHT JOIN {join_table} ON {} = {}", + col1.table_and_column_name(), + col2.table_and_column_name() + )); + self + } + + fn full_join( + mut self, + join_table: impl TableMetadata, + col1: impl FieldIdentifier, + col2: impl FieldIdentifier, + ) -> Self { + self._inner.sql.push_str(&format!( + " FULL JOIN {join_table} ON {} = {}", + col1.table_and_column_name(), + col2.table_and_column_name() + )); + self + } +} + +impl<'a> QueryBuilderOps<'a> for SelectQueryBuilder<'a> { + #[inline] + fn read_sql(&'a self) -> &'a str { + self._inner.sql.as_str() + } + + #[inline(always)] + fn push_sql(mut self, sql: &str) { + self._inner.sql.push_str(sql); + } + + #[inline] + fn r#where(mut self, column_name: &'a str, operator: Comp, value: &'a dyn QueryParameter) -> Self { + self._inner.r#where(column_name, operator, value); + self + } + + #[inline] + fn where_value(mut self, r#where: &'a Z, op: Comp) -> Self { + self._inner.where_value(r#where, op); + self + } + + #[inline] + fn and(mut self, column: &'a Z, op: Comp) -> Self { + self._inner.and(column, op); + self + } + + #[inline] + fn and_values_in<'b, Z, Q>(mut self, r#and: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + Self: Sized + { + self._inner.and_values_in(and, values)?; + Ok(self) + } + + #[inline] + fn or_values_in<'b, Z, Q>(mut self, r#and: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + Self: std::marker::Sized, + { + self._inner.or_values_in(and, values)?; + Ok(self) + } + + #[inline] + fn or(mut self, column: &'a Z, op: Comp) -> Self { + self._inner.or(column, op); + self + } + + #[inline] + fn order_by(mut self, order_by: Z, desc: bool) -> Self { + self._inner.order_by(order_by, desc); + self + } +} + +mod __impl { + use crate::query::querybuilder::SelectQueryBuilder; + use std::error::Error; + use std::fmt::Write; + + /// Appends to the underlying SQL buffer all the columns passed in by the callee or simply pushes + /// a wildcard * for the SELECT * FROM + pub(crate) fn write_columns_or_select_all<'a>( + mut _self: SelectQueryBuilder<'a>, + ) -> Result, Box> { + if _self.columns.is_empty() { + _self._inner.push_sql_char('*')?; + } else { + for (i, c) in _self.columns.iter().enumerate() { + if i > 0 { + _self._inner.push_sql(", ")?; + } + _self._inner.sql.push_str(c); + } + } + Ok(_self) + } + + pub(crate) fn write_from_clause<'a>( + mut _self: SelectQueryBuilder<'a>, + ) -> Result, Box> { + write!(_self._inner.sql, "FROM {}", _self._inner.meta)?; + Ok(_self) + } +} diff --git a/canyon_core/src/query/querybuilder/types/update.rs b/canyon_core/src/query/querybuilder/types/update.rs new file mode 100644 index 00000000..5122203e --- /dev/null +++ b/canyon_core/src/query/querybuilder/types/update.rs @@ -0,0 +1,173 @@ +use crate::connection::database_type::DatabaseType; +use crate::query::bounds::{FieldIdentifier, FieldValueIdentifier}; +use crate::query::operators::Comp; +use crate::query::parameters::QueryParameter; +use crate::query::query::Query; +use crate::query::querybuilder::types::TableMetadata; +use crate::query::querybuilder::{QueryBuilder, QueryBuilderOps, UpdateQueryBuilderOps}; +use crate::query::querybuilder::syntax::query_kind::QueryKind; +use std::error::Error; +use crate::canyon::Canyon; + +/// Contains the specific database operations of the *UPDATE* SQL statements. +pub struct UpdateQueryBuilder<'a> { + pub(crate) _inner: QueryBuilder<'a>, + pub(crate) columns: &'a [String], +} + +impl<'a> UpdateQueryBuilder<'a> { + /// Generates a new public instance of the [`UpdateQueryBuilder`] + pub fn new( + table_schema_data: impl Into, + ) -> Result> + { + UpdateQueryBuilder::new_for(table_schema_data, DatabaseType::Deferred) + } + + pub fn new_for( + table_schema_data: impl Into, + database_type: DatabaseType, + ) -> Result> + { + Ok(Self { + _inner: QueryBuilder::new(table_schema_data, QueryKind::Update, database_type)?, + columns: &[], + }) + } + + pub fn build(mut self) -> Result, Box> { + __impl::create_set_clause_columns_with_placeholders(&mut self); + self._inner.build() + } +} + +impl<'a> UpdateQueryBuilderOps<'a> for UpdateQueryBuilder<'a> { + fn set(mut self, columns: &'a [String]) -> Result> where Self: std::marker::Sized { + __validators::set_clause_values_not_empty(columns)?; + self.columns = columns; + Ok(self) + } + + fn set_with_values(mut self, columns: &'a [(Z, Q)]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a dyn QueryParameter>: Extend<&'a Q> + { + __validators::set_clause_not_already_present(&self)?; + __validators::set_clause_values_not_empty(columns)?; + + self._inner.params.extend(columns.iter().map(|(_l, value)| value)); + + Ok(self) + } +} + +impl<'a> QueryBuilderOps<'a> for UpdateQueryBuilder<'a> { + #[inline] + fn read_sql(&'a self) -> &'a str { + self._inner.sql.as_str() + } + + #[inline(always)] + fn push_sql(mut self, sql: &str) { + self._inner.sql.push_str(sql); + } + + #[inline] + fn r#where(mut self, column_name: &'a str, operator: Comp, value: &'a dyn QueryParameter) -> Self { + self._inner.r#where(column_name, operator, value); + self + } + + #[inline] + fn where_value(mut self, r#where: &'a Z, op: Comp) -> Self { + self._inner.where_value(r#where, op); + self + } + + #[inline] + fn and(mut self, column: &'a Z, op: Comp) -> Self { + self._inner.and(column, op); + self + } + + #[inline] + fn and_values_in<'b, Z, Q>(mut self, r#and: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + { + self._inner.and_values_in(and, values)?; + Ok(self) + } + + #[inline] + fn or_values_in<'b, Z, Q>(mut self, r#or: Z, values: &'a [Q]) -> Result> + where + Z: FieldIdentifier, + Q: QueryParameter, + Vec<&'a (dyn QueryParameter + 'a)>: Extend<&'a Q>, + { + self._inner.or_values_in(or, values)?; + Ok(self) + } + + #[inline] + fn or(mut self, column: &'a Z, op: Comp) -> Self { + self._inner.or(column, op); + self + } + + #[inline] + fn order_by(mut self, order_by: Z, desc: bool) -> Self { + self._inner.order_by(order_by, desc); + self + } +} + +mod __impl { + use crate::query::querybuilder::UpdateQueryBuilder; + + pub(super) fn create_set_clause_columns_with_placeholders(_self: &mut UpdateQueryBuilder) { + let mut set_clause = String::new(); + set_clause.push_str(" SET "); + + for (idx, column) in _self.columns.iter().enumerate() { + set_clause.push_str(&format!( + "{} = ${}", + column, + _self._inner.params.len() + 1 + )); + + if idx < _self.columns.len() - 1 { + set_clause.push_str(", "); + } + } + } +} + +mod __validators { + use std::error::Error; + use std::io::ErrorKind; + use crate::query::querybuilder::UpdateQueryBuilder; + + pub(super) fn set_clause_not_already_present<'a>(_self: &UpdateQueryBuilder<'a>) -> Result<(), Box> { + if !_self.columns.is_empty() { + return Err(std::io::Error::new( // TODO: CanyonError + ErrorKind::Unsupported, + "SET clause already present").into()) + } + Ok(()) + } + + pub(super) fn set_clause_values_not_empty(values: &[T]) -> Result<(), Box> { + if values.is_empty() { + return Err(std::io::Error::new( // TODO: CanyonError + ErrorKind::Unsupported, + "Empty SET clause").into()) + } + Ok(()) + } +} \ No newline at end of file diff --git a/canyon_core/src/row.rs b/canyon_core/src/row.rs new file mode 100644 index 00000000..bbc3eea4 --- /dev/null +++ b/canyon_core/src/row.rs @@ -0,0 +1,185 @@ +#![allow(unused_imports)] + +#[cfg(feature = "mysql")] +use mysql_async::{self}; +#[cfg(feature = "mssql")] +use tiberius::{self}; +#[cfg(feature = "postgres")] +use tokio_postgres::{self}; + +use crate::column::{Column, ColumnType}; +use std::{any::Any, borrow::Cow}; + +/// Generic abstraction to represent any of the Row types +/// from the client crates +pub trait Row { + fn as_any(&self) -> &dyn Any; +} + +#[cfg(feature = "postgres")] +impl Row for tokio_postgres::Row { + fn as_any(&self) -> &dyn Any { + self + } +} + +#[cfg(feature = "mssql")] +impl Row for tiberius::Row { + fn as_any(&self) -> &dyn Any { + self + } +} + +#[cfg(feature = "mysql")] +impl Row for mysql_async::Row { + fn as_any(&self) -> &dyn Any { + self + } +} + +pub trait RowOperations { + #[cfg(feature = "postgres")] + fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: tokio_postgres::types::FromSql<'a>; + #[cfg(feature = "mssql")] + fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: tiberius::FromSql<'a>; + #[cfg(feature = "mysql")] + fn get_mysql<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: mysql_async::prelude::FromValue; + + #[cfg(feature = "postgres")] + fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: tokio_postgres::types::FromSql<'a>; + #[cfg(feature = "mssql")] + fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: tiberius::FromSql<'a>; + + #[cfg(feature = "mysql")] + fn get_mysql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: mysql_async::prelude::FromValue; + + fn columns(&self) -> Vec>; +} + +impl RowOperations for &dyn Row { + #[cfg(feature = "postgres")] + fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: tokio_postgres::types::FromSql<'a>, + { + if let Some(row) = self.as_any().downcast_ref::() { + return row.get::<&str, Output>(col_name); + }; + panic!() // TODO into result and propagate + } + #[cfg(feature = "mssql")] + fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: tiberius::FromSql<'a>, + { + if let Some(row) = self.as_any().downcast_ref::() { + return row + .get::(col_name) + .expect("Failed to obtain a row in the MSSQL migrations"); + }; + panic!() // TODO into result and propagate + } + + #[cfg(feature = "mysql")] + fn get_mysql<'a, Output>(&'a self, col_name: &'a str) -> Output + where + Output: mysql_async::prelude::FromValue, + { + self.get_mysql_opt(col_name) + .expect("Failed to obtain a column in the MySql") + } + + #[cfg(feature = "postgres")] + fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: tokio_postgres::types::FromSql<'a>, + { + if let Some(row) = self.as_any().downcast_ref::() { + return row.get::<&str, Option>(col_name); + }; + panic!() // TODO into result and propagate + } + + #[cfg(feature = "mssql")] + fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: tiberius::FromSql<'a>, + { + if let Some(row) = self.as_any().downcast_ref::() { + return row.get::(col_name); + }; + panic!() // TODO into result and propagate + } + #[cfg(feature = "mysql")] + fn get_mysql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option + where + Output: mysql_async::prelude::FromValue, + { + if let Some(row) = self.as_any().downcast_ref::() { + return row.get::(col_name); + }; + panic!() // TODO into result and propagate + } + + fn columns(&self) -> Vec> { + let mut cols = vec![]; + + #[cfg(feature = "postgres")] + { + if self.as_any().is::() { + self.as_any() + .downcast_ref::() + .expect("Not a tokio postgres Row for column") + .columns() + .iter() + .for_each(|c| { + cols.push(Column { + name: std::borrow::Cow::from(c.name()), + type_: crate::column::ColumnType::Postgres(c.type_().to_owned()), + }) + }) + } + } + #[cfg(feature = "mssql")] + { + if self.as_any().is::() { + self.as_any() + .downcast_ref::() + .expect("Not a Tiberius Row for column") + .columns() + .iter() + .for_each(|c| { + cols.push(Column { + name: Cow::from(c.name()), + type_: ColumnType::SqlServer(c.column_type()), + }) + }) + }; + } + #[cfg(feature = "mysql")] + { + if let Some(mysql_row) = self.as_any().downcast_ref::() { + mysql_row.columns_ref().iter().for_each(|c| { + cols.push(Column { + name: c.name_str(), + type_: ColumnType::MySQL(c.column_type()), + }) + }) + } + } + + cols + } +} diff --git a/canyon_core/src/rows.rs b/canyon_core/src/rows.rs new file mode 100644 index 00000000..ed72f9b8 --- /dev/null +++ b/canyon_core/src/rows.rs @@ -0,0 +1,225 @@ +#![allow(unreachable_patterns)] + +//! The rows module of Canyon-SQL. +//! +//! This module defines the `CanyonRows` enum, which wraps database query results for supported +//! databases. It also provides traits and utilities for mapping rows to user-defined types. + +#[cfg(feature = "mysql")] +use mysql_async::{self}; +#[cfg(feature = "mssql")] +use tiberius::{self}; +#[cfg(feature = "postgres")] +use tokio_postgres::{self}; + +use crate::mapper::RowMapper; +use crate::row::Row; + +use cfg_if::cfg_if; + +/// Lightweight wrapper over the collection of results of the different crates +/// supported by Canyon-SQL. +/// +/// Even tho the wrapping seems meaningless, this allows us to provide internal +/// operations that are too difficult or too ugly to implement in the macros that +/// will call the query method of Crud. +#[derive(Debug)] +pub enum CanyonRows { + #[cfg(feature = "postgres")] + Postgres(Vec), + #[cfg(feature = "mssql")] + Tiberius(Vec), + #[cfg(feature = "mysql")] + MySQL(Vec), +} + +// impl IntoResults for Result { +// fn into_results(self) -> Result, CanyonError> +// where +// R: RowMapper, +// Vec: FromIterator<::Output>, +// { +// self.map(move |rows| rows.into_results::()) +// } +// } + +impl CanyonRows { + #[cfg(feature = "postgres")] + pub fn get_postgres_rows(&self) -> &Vec { + match self { + Self::Postgres(v) => v, + _ => panic!("This branch will never ever should be reachable"), + } + } + + #[cfg(feature = "mssql")] + pub fn get_tiberius_rows(&self) -> &Vec { + match self { + Self::Tiberius(v) => v, + _ => panic!("This branch will never ever should be reachable"), + } + } + + #[cfg(feature = "mysql")] + pub fn get_mysql_rows(&self) -> &Vec { + match self { + Self::MySQL(v) => v, + _ => panic!("This branch will never ever should be reachable"), + } + } + + // /// Consumes `self` and returns the wrapped [`std::vec::Vec`] with the instances of R + // pub fn into_results(self) -> Vec + // where + // R: RowMapper, + // Vec: FromIterator<::Output>, + // { + // match self { + // #[cfg(feature = "postgres")] + // Self::Postgres(v) => v.iter().map(|row| R::deserialize_postgresql(row)?).collect(), + // #[cfg(feature = "mssql")] + // Self::Tiberius(v) => v.iter().map(|row| R::deserialize_sqlserver(row)?).collect(), + // #[cfg(feature = "mysql")] + // Self::MySQL(v) => v.iter().map(|row| R::deserialize_mysql(row)?).collect(), + // } + // } + + /// Returns the entity at the given index for the returned rows + /// + /// This is just a wrapper get operation over the [Vec] get operation + pub fn get_row_at(&self, index: usize) -> Option<&dyn Row> { + match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.get(index).map(|inner| inner as &dyn Row), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.get(index).map(|inner| inner as &dyn Row), + #[cfg(feature = "mysql")] + Self::MySQL(v) => v.get(index).map(|inner| inner as &dyn Row), + } + } + + pub fn first_row>(&self) -> Option { + let row = match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.first().map(|r| T::deserialize_postgresql(r)), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.first().map(|r| T::deserialize_sqlserver(r)), + #[cfg(feature = "mysql")] + Self::MySQL(v) => v.first().map(|r| T::deserialize_mysql(r)), + }; + + row?.ok() + } + + /// Returns the number of elements present on the wrapped collection + pub fn len(&self) -> usize { + match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.len(), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.len(), + #[cfg(feature = "mysql")] + Self::MySQL(v) => v.len(), + } + } + + /// Returns true whenever the wrapped collection of Rows does not contains any elements + pub fn is_empty(&self) -> bool { + match self { + #[cfg(feature = "postgres")] + Self::Postgres(v) => v.is_empty(), + #[cfg(feature = "mssql")] + Self::Tiberius(v) => v.is_empty(), + #[cfg(feature = "mysql")] + Self::MySQL(v) => v.is_empty(), + } + } +} + +cfg_if! { + if #[cfg(all(feature = "postgres", feature = "mysql", feature = "mssql"))] { + pub trait FromSql<'a, T>: tokio_postgres::types::FromSql<'a> + + tiberius::FromSql<'a> + + mysql_async::prelude::FromValue {} + impl<'a, T> FromSql<'a, T> for T where T: + tokio_postgres::types::FromSql<'a> + + tiberius::FromSql<'a> + + mysql_async::prelude::FromValue + {} + + pub trait FromSqlOwnedValue: tokio_postgres::types::FromSqlOwned + + tiberius::FromSqlOwned + + mysql_async::prelude::FromValue {} + impl FromSqlOwnedValue for T where T: + tokio_postgres::types::FromSqlOwned + + tiberius::FromSqlOwned + + mysql_async::prelude::FromValue + {} + } else if #[cfg(all(feature = "postgres", feature = "mysql"))] { + pub trait FromSql<'a, T>: tokio_postgres::types::FromSql<'a> + + mysql_async::prelude::FromValue {} + impl<'a, T> FromSql<'a, T> for T where T: + tokio_postgres::types::FromSql<'a> + + mysql_async::prelude::FromValue + {} + + pub trait FromSqlOwnedValue: tokio_postgres::types::FromSqlOwned + + mysql_async::prelude::FromValue {} + impl FromSqlOwnedValue for T where T: + tokio_postgres::types::FromSqlOwned + + mysql_async::prelude::FromValue + {} + } else if #[cfg(all(feature = "postgres", feature = "mssql"))] { + pub trait FromSql<'a, T>: tokio_postgres::types::FromSql<'a> + + tiberius::FromSql<'a> {} + impl<'a, T> FromSql<'a, T> for T where T: + tokio_postgres::types::FromSql<'a> + + tiberius::FromSql<'a> + {} + + pub trait FromSqlOwnedValue: tokio_postgres::types::FromSqlOwned + + tiberius::FromSqlOwned {} + impl FromSqlOwnedValue for T where T: + tokio_postgres::types::FromSqlOwned + + tiberius::FromSqlOwned + {} + } else if #[cfg(all(feature = "mysql", feature = "mssql"))] { + pub trait FromSql<'a, T>: mysql_async::prelude::FromValue + + tiberius::FromSql<'a> {} + impl<'a, T> FromSql<'a, T> for T where T: + mysql_async::prelude::FromValue + + tiberius::FromSql<'a> + {} + + pub trait FromSqlOwnedValue: mysql_async::prelude::FromValue + + tiberius::FromSqlOwned {} + impl FromSqlOwnedValue for T where T: + mysql_async::prelude::FromValue + + tiberius::FromSqlOwned + {} + } else if #[cfg(feature = "postgres")] { + pub trait FromSql<'a, T>: tokio_postgres::types::FromSql<'a> {} + impl<'a, T> FromSql<'a, T> for T where T: + tokio_postgres::types::FromSql<'a> {} + + pub trait FromSqlOwnedValue: tokio_postgres::types::FromSqlOwned {} + impl FromSqlOwnedValue for T where T: + tokio_postgres::types::FromSqlOwned {} + } else if #[cfg(feature = "mysql")] { + pub trait FromSql<'a, T>: mysql_async::prelude::FromValue {} + impl<'a, T> FromSql<'a, T> for T where T: + mysql_async::prelude::FromValue {} + + pub trait FromSqlOwnedValue: mysql_async::prelude::FromValue {} + impl FromSqlOwnedValue for T where T: + mysql_async::prelude::FromValue {} + } else if #[cfg(feature = "mssql")] { + pub trait FromSql<'a, T>: tiberius::FromSql<'a> {} + impl<'a, T> FromSql<'a, T> for T where T: + tiberius::FromSql<'a> {} + + pub trait FromSqlOwnedValue: tiberius::FromSqlOwned {} + impl FromSqlOwnedValue for T where T: + tiberius::FromSqlOwned {} + } +} diff --git a/canyon_core/src/transaction.rs b/canyon_core/src/transaction.rs new file mode 100644 index 00000000..70f2bed0 --- /dev/null +++ b/canyon_core/src/transaction.rs @@ -0,0 +1,108 @@ +use crate::connection::contracts::DbConnection; +use crate::mapper::RowMapper; +use crate::rows::FromSqlOwnedValue; +use crate::{query::parameters::QueryParameter, rows::CanyonRows}; +use std::error::Error; +use std::future::Future; + +/// The `Transaction` trait serves as a proxy for types implementing CRUD operations. +/// +/// This trait provides a set of static methods that mirror the functionality of CRUD operations, +/// allowing implementors to be coerced into `<#ty as Transaction>::...` usage patterns. +/// It is primarily used by the generated macros of `CrudOperations` to simplify interaction +/// with database entities by abstracting common operations such as querying rows, executing +/// statements, and retrieving single results. +/// +/// # Purpose +/// The `Transaction` trait is typically used to provide a unified interface for CRUD operations +/// on database entities. It enables developers to work with any type that implements the required +/// CRUD traits, abstracting away the underlying database connection details. +/// +/// # Features +/// - Acts as a proxy for CRUD operations. +/// - Provides static methods for common database entity operations. +/// - Simplifies interaction with database entities. +/// +/// # Examples +/// ```ignore +/// async fn perform_query(entity: E) { +/// let result = ::query("SELECT * FROM users", &[], entity).await; +/// match result { +/// Ok(rows) => println!("Retrieved {} rows", rows.len()), +/// Err(e) => eprintln!("Error: {}", e), +/// } +/// } +/// ``` +/// +/// # Methods +/// - `query`: Executes a query and retrieves multiple rows mapped to a user-defined type. +/// - `query_one`: Executes a query and retrieves a single row mapped to a user-defined type. +/// - `query_one_for`: Executes a query and retrieves a single value of a specific type. +/// - `query_rows`: Executes a query and retrieves the raw rows wrapped in `CanyonRows`. +/// - `execute`: Executes a SQL statement and returns the number of affected rows. +pub trait Transaction { + fn query( + stmt: S, + params: &[&dyn QueryParameter], + input: impl DbConnection + Send, + ) -> impl Future, Box>> + where + S: AsRef + Send, + R: RowMapper, + Vec: FromIterator<::Output>, + { + async move { input.query(stmt, params).await } + } + + fn query_one<'a, S, Z, R>( + stmt: S, + params: Z, + input: impl DbConnection + Send + 'a, + ) -> impl Future, Box>> + Send + where + S: AsRef + Send + 'a, + Z: AsRef<[&'a dyn QueryParameter]> + Send, + R: RowMapper, + { + async move { input.query_one::(stmt.as_ref(), params.as_ref()).await } + } + + fn query_one_for<'a, S, Z, F: FromSqlOwnedValue>( + stmt: S, + params: Z, + input: impl DbConnection + Send + 'a, + ) -> impl Future>> + Send + where + S: AsRef + Send + 'a, + Z: AsRef<[&'a dyn QueryParameter]> + Send + 'a, + { + async move { input.query_one_for(stmt.as_ref(), params.as_ref()).await } + } + + /// Performs a query against the targeted database by the selected or + /// the defaulted datasource, wrapping the resultant collection of entities + /// in [`super::rows::CanyonRows`] + fn query_rows<'a, S, Z>( + stmt: S, + params: Z, + input: impl DbConnection + Send + 'a, + ) -> impl Future>> + Send + where + S: AsRef + Send + 'a, + Z: AsRef<[&'a dyn QueryParameter]> + Send + 'a, + { + async move { input.query_rows(stmt.as_ref(), params.as_ref()).await } + } + + fn execute<'a, S, Z>( + stmt: S, + params: Z, + input: impl DbConnection + Send + 'a, + ) -> impl Future>> + Send + where + S: AsRef + Send + 'a, + Z: AsRef<[&'a dyn QueryParameter]> + Send + 'a, + { + async move { input.execute(stmt.as_ref(), params.as_ref()).await } + } +} diff --git a/canyon_crud/Cargo.toml b/canyon_crud/Cargo.toml index dfdd3ddb..a4c37b0a 100644 --- a/canyon_crud/Cargo.toml +++ b/canyon_crud/Cargo.toml @@ -10,19 +10,17 @@ license.workspace = true description.workspace = true [dependencies] +canyon_core = { workspace = true } + tokio-postgres = { workspace = true, optional = true } tiberius = { workspace = true, optional = true } mysql_async = { workspace = true, optional = true } mysql_common = { workspace = true, optional = true } chrono = { workspace = true } -async-trait = { workspace = true } - -canyon_connection = { workspace = true } - regex = { workspace = true } [features] -postgres = ["tokio-postgres", "canyon_connection/postgres"] -mssql = ["tiberius", "canyon_connection/mssql"] -mysql = ["mysql_async","mysql_common", "canyon_connection/mysql"] +postgres = ["tokio-postgres", "canyon_core/postgres"] +mssql = ["tiberius", "canyon_core/mssql"] +mysql = ["mysql_async","mysql_common", "canyon_core/mysql"] diff --git a/canyon_crud/src/bounds.rs b/canyon_crud/src/bounds.rs deleted file mode 100644 index 27ffb97f..00000000 --- a/canyon_crud/src/bounds.rs +++ /dev/null @@ -1,875 +0,0 @@ -use crate::{ - crud::{CrudOperations, Transaction}, - mapper::RowMapper, -}; -#[cfg(feature = "mysql")] -use canyon_connection::mysql_async::{self, prelude::ToValue}; -#[cfg(feature = "mssql")] -use canyon_connection::tiberius::{self, ColumnData, IntoSql}; -#[cfg(feature = "postgres")] -use canyon_connection::tokio_postgres::{self, types::ToSql}; - -use chrono::{DateTime, FixedOffset, NaiveDate, NaiveDateTime, NaiveTime, Utc}; - -use std::{any::Any, borrow::Cow}; - -/// Created for retrieve the field's name of a field of a struct, giving -/// the Canyon's autogenerated enum with the variants that maps this -/// fields. -/// -/// ``` -/// pub struct Struct<'a> { -/// pub some_field: &'a str -/// } -/// -/// // Autogenerated enum -/// #[derive(Debug)] -/// #[allow(non_camel_case_types)] -/// pub enum StructField { -/// some_field -/// } -/// ``` -/// So, to retrieve the field's name, something like this w'd be used on some part -/// of the Canyon's Manager crate, to wire the necessary code to pass the field -/// name, retrieved from the enum variant, to a called. -/// -/// // Something like: -/// `let struct_field_name_from_variant = StructField::some_field.field_name_as_str();` -pub trait FieldIdentifier -where - T: Transaction + CrudOperations + RowMapper, -{ - fn as_str(&self) -> &'static str; -} - -/// Represents some kind of introspection to make the implementors -/// able to retrieve a value inside some variant of an associated enum type. -/// and convert it to a tuple struct formed by the column name as an String, -/// and the dynamic value of the [`QueryParameter<'_>`] trait object contained -/// inside the variant requested, -/// enabling a conversion of that value into something -/// that can be part of an SQL query. -/// -/// -/// Ex: -/// `SELECT * FROM some_table WHERE id = 2` -/// -/// That '2' it's extracted from some enum that implements [`FieldValueIdentifier`], -/// where usually the variant w'd be something like: -/// -/// ``` -/// pub enum Enum { -/// IntVariant(i32) -/// } -/// ``` -pub trait FieldValueIdentifier<'a, T> -where - T: Transaction + CrudOperations + RowMapper, -{ - fn value(self) -> (&'static str, &'a dyn QueryParameter<'a>); -} - -/// Bounds to some type T in order to make it callable over some fn parameter T -/// -/// Represents the ability of an struct to be considered as candidate to perform -/// actions over it as it holds the 'parent' side of a foreign key relation. -/// -/// Usually, it's used on the Canyon macros to retrieve the column that -/// this side of the relation it's representing -pub trait ForeignKeyable { - /// Retrieves the field related to the column passed in - fn get_fk_column(&self, column: &str) -> Option<&dyn QueryParameter<'_>>; -} - -/// Generic abstraction to represent any of the Row types -/// from the client crates -pub trait Row { - fn as_any(&self) -> &dyn Any; -} - -#[cfg(feature = "postgres")] -impl Row for tokio_postgres::Row { - fn as_any(&self) -> &dyn Any { - self - } -} - -#[cfg(feature = "mssql")] -impl Row for tiberius::Row { - fn as_any(&self) -> &dyn Any { - self - } -} - -#[cfg(feature = "mysql")] -impl Row for mysql_async::Row { - fn as_any(&self) -> &dyn Any { - self - } -} - -/// Generic abstraction for hold a Column type that will be one of the Column -/// types present in the dependent crates -// #[derive(Copy, Clone)] -pub struct Column<'a> { - name: Cow<'a, str>, - type_: ColumnType, -} -impl<'a> Column<'a> { - pub fn name(&self) -> &str { - &self.name - } - pub fn column_type(&self) -> &ColumnType { - &self.type_ - } - // pub fn type_(&'a self) -> &'_ dyn Type { - // match (*self).type_ { - // #[cfg(feature = "postgres")] ColumnType::Postgres(v) => v as &'a dyn Type, - // #[cfg(feature = "mssql")] ColumnType::SqlServer(v) => v as &'a dyn Type, - // } - // } -} - -pub trait Type { - fn as_any(&self) -> &dyn Any; -} -#[cfg(feature = "postgres")] -impl Type for tokio_postgres::types::Type { - fn as_any(&self) -> &dyn Any { - self - } -} -#[cfg(feature = "mssql")] -impl Type for tiberius::ColumnType { - fn as_any(&self) -> &dyn Any { - self - } -} -#[cfg(feature = "mysql")] -impl Type for mysql_async::consts::ColumnType { - fn as_any(&self) -> &dyn Any { - self - } -} - -/// Wrapper over the dependencies Column's types -pub enum ColumnType { - #[cfg(feature = "postgres")] - Postgres(tokio_postgres::types::Type), - #[cfg(feature = "mssql")] - SqlServer(tiberius::ColumnType), - #[cfg(feature = "mysql")] - MySQL(mysql_async::consts::ColumnType), -} - -pub trait RowOperations { - #[cfg(feature = "postgres")] - fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output - where - Output: tokio_postgres::types::FromSql<'a>; - #[cfg(feature = "mssql")] - fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output - where - Output: tiberius::FromSql<'a>; - #[cfg(feature = "mysql")] - fn get_mysql<'a, Output>(&'a self, col_name: &'a str) -> Output - where - Output: mysql_async::prelude::FromValue; - - #[cfg(feature = "postgres")] - fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where - Output: tokio_postgres::types::FromSql<'a>; - #[cfg(feature = "mssql")] - fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where - Output: tiberius::FromSql<'a>; - - #[cfg(feature = "mysql")] - fn get_mysql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where - Output: mysql_async::prelude::FromValue; - - fn columns(&self) -> Vec; -} - -impl RowOperations for &dyn Row { - #[cfg(feature = "postgres")] - fn get_postgres<'a, Output>(&'a self, col_name: &'a str) -> Output - where - Output: tokio_postgres::types::FromSql<'a>, - { - if let Some(row) = self.as_any().downcast_ref::() { - return row.get::<&str, Output>(col_name); - }; - panic!() // TODO into result and propagate - } - #[cfg(feature = "mssql")] - fn get_mssql<'a, Output>(&'a self, col_name: &'a str) -> Output - where - Output: tiberius::FromSql<'a>, - { - if let Some(row) = self.as_any().downcast_ref::() { - return row - .get::(col_name) - .expect("Failed to obtain a row in the MSSQL migrations"); - }; - panic!() // TODO into result and propagate - } - - #[cfg(feature = "mysql")] - fn get_mysql<'a, Output>(&'a self, col_name: &'a str) -> Output - where - Output: mysql_async::prelude::FromValue, - { - self.get_mysql_opt(col_name) - .expect("Failed to obtain a column in the MySql") - } - - #[cfg(feature = "postgres")] - fn get_postgres_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where - Output: tokio_postgres::types::FromSql<'a>, - { - if let Some(row) = self.as_any().downcast_ref::() { - return row.get::<&str, Option>(col_name); - }; - panic!() // TODO into result and propagate - } - - #[cfg(feature = "mssql")] - fn get_mssql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where - Output: tiberius::FromSql<'a>, - { - if let Some(row) = self.as_any().downcast_ref::() { - return row.get::(col_name); - }; - panic!() // TODO into result and propagate - } - #[cfg(feature = "mysql")] - fn get_mysql_opt<'a, Output>(&'a self, col_name: &'a str) -> Option - where - Output: mysql_async::prelude::FromValue, - { - if let Some(row) = self.as_any().downcast_ref::() { - return row.get::(col_name); - }; - panic!() // TODO into result and propagate - } - - fn columns(&self) -> Vec { - let mut cols = vec![]; - - #[cfg(feature = "postgres")] - { - if self.as_any().is::() { - self.as_any() - .downcast_ref::() - .expect("Not a tokio postgres Row for column") - .columns() - .iter() - .for_each(|c| { - cols.push(Column { - name: Cow::from(c.name()), - type_: ColumnType::Postgres(c.type_().to_owned()), - }) - }) - } - } - #[cfg(feature = "mssql")] - { - if self.as_any().is::() { - self.as_any() - .downcast_ref::() - .expect("Not a Tiberius Row for column") - .columns() - .iter() - .for_each(|c| { - cols.push(Column { - name: Cow::from(c.name()), - type_: ColumnType::SqlServer(c.column_type()), - }) - }) - }; - } - #[cfg(feature = "mysql")] - { - if let Some(mysql_row) = self.as_any().downcast_ref::() { - mysql_row.columns_ref().iter().for_each(|c| { - cols.push(Column { - name: c.name_str(), - type_: ColumnType::MySQL(c.column_type()), - }) - }) - } - } - - cols - } -} - -/// Defines a trait for represent type bounds against the allowed -/// data types supported by Canyon to be used as query parameters. -pub trait QueryParameter<'a>: std::fmt::Debug + Sync + Send { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync); - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_>; - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue; -} - -/// The implementation of the [`canyon_connection::tiberius`] [`IntoSql`] for the -/// query parameters. -/// -/// This implementation is necessary because of the generic amplitude -/// of the arguments of the [`Transaction::query`], that should work with -/// a collection of [`QueryParameter<'a>`], in order to allow a workflow -/// that is not dependent of the specific type of the argument that holds -/// the query parameters of the database connectors -#[cfg(feature = "mssql")] -impl<'a> IntoSql<'a> for &'a dyn QueryParameter<'a> { - fn into_sql(self) -> ColumnData<'a> { - self.as_sqlserver_param() - } -} - -//TODO Pending to review and see if it is necessary to apply something similar to the previous implementation. - -impl<'a> QueryParameter<'a> for bool { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::Bit(Some(*self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn ToValue { - self - } -} -impl<'a> QueryParameter<'a> for i16 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I16(Some(*self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for &i16 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I16(Some(**self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I16(*self) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option<&i16> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I16(Some(*self.unwrap())) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for i32 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I32(Some(*self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for &i32 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I32(Some(**self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I32(*self) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option<&i32> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I32(Some(*self.unwrap())) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for f32 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F32(Some(*self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for &f32 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F32(Some(**self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F32(*self) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option<&f32> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F32(Some( - *self.expect("Error on an f32 value on QueryParameter<'_>"), - )) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for f64 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F64(Some(*self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for &f64 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F64(Some(**self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F64(*self) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option<&f64> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::F64(Some( - *self.expect("Error on an f64 value on QueryParameter<'_>"), - )) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for i64 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I64(Some(*self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for &i64 { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I64(Some(**self)) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I64(*self) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option<&i64> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::I64(Some(*self.unwrap())) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for String { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::String(Some(std::borrow::Cow::Owned(self.to_owned()))) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for &String { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::String(Some(std::borrow::Cow::Borrowed(self))) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - match self { - Some(string) => ColumnData::String(Some(std::borrow::Cow::Owned(string.to_owned()))), - None => ColumnData::String(None), - } - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option<&String> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - match self { - Some(string) => ColumnData::String(Some(std::borrow::Cow::Borrowed(string))), - None => ColumnData::String(None), - } - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for &'_ str { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - ColumnData::String(Some(std::borrow::Cow::Borrowed(*self))) - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option<&'_ str> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - match *self { - Some(str) => ColumnData::String(Some(std::borrow::Cow::Borrowed(str))), - None => ColumnData::String(None), - } - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for NaiveDate { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for NaiveTime { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for NaiveDateTime { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} -impl<'a> QueryParameter<'a> for Option { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - self - } -} - -//TODO pending -impl<'a> QueryParameter<'a> for DateTime { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - todo!() - } -} - -impl<'a> QueryParameter<'a> for Option> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - todo!() - } -} - -impl<'a> QueryParameter<'a> for DateTime { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - todo!() - } -} - -impl<'a> QueryParameter<'a> for Option> { - #[cfg(feature = "postgres")] - fn as_postgres_param(&self) -> &(dyn ToSql + Sync) { - self - } - #[cfg(feature = "mssql")] - fn as_sqlserver_param(&self) -> ColumnData<'_> { - self.into_sql() - } - #[cfg(feature = "mysql")] - fn as_mysql_param(&self) -> &dyn mysql_async::prelude::ToValue { - todo!() - } -} diff --git a/canyon_crud/src/crud.rs b/canyon_crud/src/crud.rs index 981c24f1..195f84b4 100644 --- a/canyon_crud/src/crud.rs +++ b/canyon_crud/src/crud.rs @@ -1,70 +1,13 @@ -use async_trait::async_trait; -use std::fmt::Display; - -use canyon_connection::canyon_database_connector::DatabaseConnection; -use canyon_connection::{get_database_connection, CACHED_DATABASE_CONN}; - -use crate::bounds::QueryParameter; -use crate::mapper::RowMapper; -use crate::query_elements::query_builder::{ +use canyon_core::connection::contracts::DbConnection; +use canyon_core::connection::database_type::DatabaseType; +use canyon_core::mapper::RowMapper; +use canyon_core::query::bounds::Inspectionable; +use canyon_core::query::parameters::QueryParameter; +use canyon_core::query::querybuilder::{ DeleteQueryBuilder, SelectQueryBuilder, UpdateQueryBuilder, }; -use crate::rows::CanyonRows; - -#[cfg(feature = "mysql")] -pub const DETECT_PARAMS_IN_QUERY: &str = r"\$([\d])+"; -#[cfg(feature = "mysql")] -pub const DETECT_QUOTE_IN_QUERY: &str = r#"\"|\\"#; - -/// This traits defines and implements a query against a database given -/// an statement `stmt` and the params to pass the to the client. -/// -/// Returns [`std::result::Result`] of [`CanyonRows`], which is the core Canyon type to wrap -/// the result of the query provide automatic mappings and deserialization -#[async_trait] -pub trait Transaction { - /// Performs a query against the targeted database by the selected or - /// the defaulted datasource, wrapping the resultant collection of entities - /// in [`super::rows::CanyonRows`] - async fn query<'a, S, Z>( - stmt: S, - params: Z, - datasource_name: &'a str, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> - where - S: AsRef + Display + Sync + Send + 'a, - Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, - { - let mut guarded_cache = CACHED_DATABASE_CONN.lock().await; - let database_conn = get_database_connection(datasource_name, &mut guarded_cache); - - match *database_conn { - #[cfg(feature = "postgres")] - DatabaseConnection::Postgres(_) => { - postgres_query_launcher::launch::( - database_conn, - stmt.to_string(), - params.as_ref(), - ) - .await - } - #[cfg(feature = "mssql")] - DatabaseConnection::SqlServer(_) => { - sqlserver_query_launcher::launch::( - database_conn, - &mut stmt.to_string(), - params, - ) - .await - } - #[cfg(feature = "mysql")] - DatabaseConnection::MySQL(_) => { - mysql_query_launcher::launch::(database_conn, stmt.to_string(), params.as_ref()) - .await - } - } - } -} +use std::error::Error; +use std::future::Future; /// *CrudOperations* it's the core part of Canyon-SQL. /// @@ -72,260 +15,248 @@ pub trait Transaction { /// that the user has available, just by deriving the `CanyonCrud` /// derive macro when a struct contains the annotation. /// -/// Also, this traits needs that the type T over what it's generified +/// Also, these traits needs that the type R over what it's generified /// to implement certain types in order to work correctly. /// -/// The most notorious one it's the [`RowMapper`] one, which allows +/// The most notorious one it's the [`RowMapper`] one, which allows /// Canyon to directly maps database results into structs. /// /// See it's definition and docs to see the implementations. /// Also, you can find the written macro-code that performs the auto-mapping /// in the *canyon_sql_root::canyon_macros* crates, on the root of this project. -#[async_trait] -pub trait CrudOperations: Transaction +pub trait CrudOperations: Send where - T: CrudOperations + RowMapper, + R: RowMapper, + Vec: FromIterator<::Output>, { - async fn find_all<'a>() -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>>; + fn find_all() -> impl Future, Box>> + Send; - async fn find_all_datasource<'a>( - datasource_name: &'a str, - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>>; - - async fn find_all_unchecked<'a>() -> Vec; - - async fn find_all_unchecked_datasource<'a>(datasource_name: &'a str) -> Vec; - - fn select_query<'a>() -> SelectQueryBuilder<'a, T>; - - fn select_query_datasource(datasource_name: &str) -> SelectQueryBuilder<'_, T>; + fn find_all_with<'a, I>( + input: I, + ) -> impl Future, Box>> + Send + where + I: DbConnection + Send + 'a; - async fn count() -> Result>; + fn select_query<'a>() -> Result, Box>; - async fn count_datasource<'a>( - datasource_name: &'a str, - ) -> Result>; + fn select_query_with<'a>( + database_type: DatabaseType, + ) -> Result, Box>; - async fn find_by_pk<'a>( - value: &'a dyn QueryParameter<'a>, - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>>; + fn count() -> impl Future>> + Send; - async fn find_by_pk_datasource<'a>( - value: &'a dyn QueryParameter<'a>, - datasource_name: &'a str, - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>>; + fn count_with<'a, I>( + input: I, + ) -> impl Future>> + Send + where + I: DbConnection + Send + 'a; - async fn insert<'a>(&mut self) -> Result<(), Box>; + fn find_by_pk<'a, 'b>( + value: &'a dyn QueryParameter, + ) -> impl Future, Box>> + Send; - async fn insert_datasource<'a>( + fn find_by_pk_with<'a, 'b, I>( + value: &'a dyn QueryParameter, + input: I, + ) -> impl Future, Box>> + Send + where + I: DbConnection + Send + 'a; + + /// Inserts the current instance into the corresponding database table using the default datasource. + /// + /// This asynchronous operation creates a new row in the database based on the data in `self`. + /// Upon successful insertion, it updates the primary key field (`self.`) with the value + /// generated by the database. + /// + /// # Behavior + /// + /// - Requires a mutable reference to `self` (`&mut self`) because the method updates the primary key field. + /// - Utilizes the default datasource as specified in the configuration. + /// - Returns a `Result` indicating success or failure of the operation. + /// + /// # Errors + /// + /// Returns an error if: + /// - The insertion fails due to database constraints or connectivity issues. + /// - The default datasource is not properly configured or unavailable. + /// + /// # Examples + /// + /// ```ignore + /// let mut lec = League { + /// id: Default::default(), + /// ext_id: 1, + /// slug: "LEC".to_string(), + /// name: "League Europe Champions".to_string(), + /// region: "EU West".to_string(), + /// image_url: "https://lec.eu".to_string(), + /// }; + /// + /// println!("Before insert: {:?}", lec); + /// + /// match lec.insert().await { + /// Ok(_) => println!("After insert: {:?}", lec), + /// Err(e) => eprintln!("Insert failed: {:?}", e), + /// } + /// ``` + /// + /// # Notes + /// + /// Ensure that the default datasource is correctly configured in your application settings. + /// The primary key field must be set to some column before calling `insert`, otherwise, the + /// operation will be launched anyway and will insert all the fields, so ensure that your table + /// your [`Canyon`] annotations matches your database definitions + fn insert<'a, 'b>( + &'a mut self, + ) -> impl Future>> + Send; + + /// # Brief + /// + /// This operation is the same as [`self.insert()`](method@self.insert) + /// + /// + /// Inserts the current instance into the specified datasource. + /// + /// Similar to [`insert`](Self::insert), but allows specifying the datasource to use for the operation. + /// + /// # Parameters + /// + /// - `input`: An implementation of [`DbConnection`] representing the target datasource. + /// + /// # Behavior + /// + /// - Requires a mutable reference to `self` (`&mut self`) because the method updates the primary key field. + /// - Uses the provided `DbConnection` instead of the default datasource. + /// - Returns a `Result` indicating success or failure of the operation. + /// + /// # Errors + /// + /// Returns an error if: + /// - The insertion fails due to database constraints or connectivity issues. + /// - The provided datasource is not properly configured or unavailable. + /// + /// # Examples + /// + /// ```ignore + /// let mut lec = League { + /// id: Default::default(), + /// ext_id: 1, + /// slug: "LEC".to_string(), + /// name: "League Europe Champions".to_string(), + /// region: "EU West".to_string(), + /// image_url: "https://lec.eu".to_string(), + /// }; + /// + /// let custom_connection = canyon_sql::core::Canyon()::instance()? + /// .get_default_connection()? + /// .lock() + /// .await; + /// + /// match lec.insert_with(custom_connection).await { + /// Ok(_) => println!("Insert successful"), + /// Err(e) => eprintln!("Insert failed: {:?}", e), + /// } + /// ``` + /// + /// # Notes + /// + /// Use this method when you need to insert data into a specific datasource other than the default, + /// or when you have an actual mock of the [`DbConnection`] implementor and you're interested in + /// unit testing your procedure. + fn insert_with<'a, I>( &mut self, - datasource_name: &'a str, - ) -> Result<(), Box>; - - async fn multi_insert<'a>( - instances: &'a mut [&'a mut T], - ) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>>; - - async fn multi_insert_datasource<'a>( - instances: &'a mut [&'a mut T], - datasource_name: &'a str, - ) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>>; - - async fn update(&self) -> Result<(), Box>; - - async fn update_datasource<'a>( - &self, - datasource_name: &'a str, - ) -> Result<(), Box>; - - fn update_query<'a>() -> UpdateQueryBuilder<'a, T>; - - fn update_query_datasource(datasource_name: &str) -> UpdateQueryBuilder<'_, T>; + input: I, + ) -> impl Future>> + Send + where + I: DbConnection + Send + 'a; - async fn delete(&self) -> Result<(), Box>; + fn insert_entity<'a, 'b, T>( + entity: &'a mut T, + ) -> impl Future>> + where + T: RowMapper + Inspectionable<'a> + Sync + 'a; - async fn delete_datasource<'a>( + fn insert_entity_with<'a, 'b, T, I>( + entity: &'a mut T, + input: I, + ) -> impl Future>> + where + T: RowMapper + Inspectionable<'a> + Sync + 'a, + I: DbConnection + Send + 'a; + + // TODO: the horripilant multi_insert MUST be replaced with a batch insert + // fn multi_insert<'a, T>( + // instances: &'a mut [&'a mut T], + // ) -> impl Future>> + Send; + // + // fn multi_insert_with<'a, T, I>( + // instances: &'a mut [&'a mut T], + // input: I, + // ) -> impl Future>> + Send + // where + // I: DbConnection + Send + 'a; + + /// Updates a database record that matches the current instance of a T type, returning a + /// result indicating a possible failure querying the database. + fn update(&self) -> impl Future>> + Send; + + fn update_with<'a, I>( &self, - datasource_name: &'a str, - ) -> Result<(), Box>; - - fn delete_query<'a>() -> DeleteQueryBuilder<'a, T>; - - fn delete_query_datasource(datasource_name: &str) -> DeleteQueryBuilder<'_, T>; -} - -#[cfg(feature = "postgres")] -mod postgres_query_launcher { - use canyon_connection::canyon_database_connector::DatabaseConnection; - - use crate::bounds::QueryParameter; - use crate::rows::CanyonRows; - - pub async fn launch<'a, T>( - db_conn: &DatabaseConnection, - stmt: String, - params: &'a [&'_ dyn QueryParameter<'_>], - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { - let mut m_params = Vec::new(); - for param in params { - m_params.push(param.as_postgres_param()); - } - - let r = db_conn - .postgres_connection() - .client - .query(&stmt, m_params.as_slice()) - .await?; - - Ok(CanyonRows::Postgres(r)) - } -} - -#[cfg(feature = "mssql")] -mod sqlserver_query_launcher { - use crate::rows::CanyonRows; - use crate::{ - bounds::QueryParameter, - canyon_connection::{canyon_database_connector::DatabaseConnection, tiberius::Query}, - }; - - pub async fn launch<'a, T, Z>( - db_conn: &mut DatabaseConnection, - stmt: &mut String, - params: Z, - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> + input: I, + ) -> impl Future>> + Send where - Z: AsRef<[&'a dyn QueryParameter<'a>]> + Sync + Send + 'a, - { - // Re-generate de insert statement to adequate it to the SQL SERVER syntax to retrieve the PK value(s) after insert - if stmt.contains("RETURNING") { - let c = stmt.clone(); - let temp = c.split_once("RETURNING").unwrap(); - let temp2 = temp.0.split_once("VALUES").unwrap(); - - *stmt = format!( - "{} OUTPUT inserted.{} VALUES {}", - temp2.0.trim(), - temp.1.trim(), - temp2.1.trim() - ); - } - - let mut mssql_query = Query::new(stmt.to_owned().replace('$', "@P")); - params - .as_ref() - .iter() - .for_each(|param| mssql_query.bind(*param)); - - let _results = mssql_query - .query(db_conn.sqlserver_connection().client) - .await? - .into_results() - .await?; - - Ok(CanyonRows::Tiberius( - _results.into_iter().flatten().collect(), - )) - } -} - -#[cfg(feature = "mysql")] -mod mysql_query_launcher { - use std::sync::Arc; - - use mysql_async::prelude::Query; - use mysql_async::QueryWithParams; - use mysql_async::Value; - - use canyon_connection::canyon_database_connector::DatabaseConnection; - - use crate::bounds::QueryParameter; - use crate::rows::CanyonRows; - use mysql_async::Row; - use mysql_common::constants::ColumnType; - use mysql_common::row; + I: DbConnection + Send + 'a; - use super::reorder_params; - use crate::crud::{DETECT_PARAMS_IN_QUERY, DETECT_QUOTE_IN_QUERY}; - use regex::Regex; - - pub async fn launch<'a, T>( - db_conn: &DatabaseConnection, - stmt: String, - params: &'a [&'_ dyn QueryParameter<'_>], - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { - let mysql_connection = db_conn.mysql_connection().client.get_conn().await?; - - let stmt_with_escape_characters = regex::escape(&stmt); - let query_string = - Regex::new(DETECT_PARAMS_IN_QUERY)?.replace_all(&stmt_with_escape_characters, "?"); - - let mut query_string = Regex::new(DETECT_QUOTE_IN_QUERY)? - .replace_all(&query_string, "") - .to_string(); - - let mut is_insert = false; - if let Some(index_start_clausule_returning) = query_string.find(" RETURNING") { - query_string.truncate(index_start_clausule_returning); - is_insert = true; - } - - let params_query: Vec = - reorder_params(&stmt, params, |f| f.as_mysql_param().to_value()); + fn update_entity<'a, 'b, T>( + entity: &'a T, + ) -> impl Future>> + where + T: RowMapper + Inspectionable<'a> + Sync + 'a; - let query_with_params = QueryWithParams { - query: query_string, - params: params_query, - }; + fn update_entity_with<'a, 'b, T, I>( + entity: &'a T, + input: I, + ) -> impl Future>> + where + T: RowMapper + Inspectionable<'a> + Sync + 'a, + I: DbConnection + Send + 'a; - let mut query_result = query_with_params - .run(mysql_connection) - .await - .expect("Error executing query in mysql"); + fn update_query<'a>() -> Result, Box>; - let result_rows = if is_insert { - let last_insert = query_result - .last_insert_id() - .map(Value::UInt) - .expect("Error getting pk id in insert"); + fn update_query_with<'a>( + database_type: DatabaseType, + ) -> Result, Box>; - vec![row::new_row( - vec![last_insert], - Arc::new([mysql_async::Column::new(ColumnType::MYSQL_TYPE_UNKNOWN)]), - )] - } else { - query_result - .collect::() - .await - .expect("Error resolved trait FromRow in mysql") - }; + fn delete(&self) -> impl Future>> + Send; - Ok(CanyonRows::MySQL(result_rows)) - } -} + fn delete_with<'a, 'b, I>( + &self, + input: I, + ) -> impl Future>> + Send + where + I: DbConnection + Send + 'a; -#[cfg(feature = "mysql")] -fn reorder_params( - stmt: &str, - params: &[&'_ dyn QueryParameter<'_>], - fn_parser: impl Fn(&&dyn QueryParameter<'_>) -> T, -) -> Vec { - let mut ordered_params = vec![]; - let rg = regex::Regex::new(DETECT_PARAMS_IN_QUERY) - .expect("Error create regex with detect params pattern expression"); + fn delete_entity<'a, 'b, T>( + entity: &'a T, + ) -> impl Future>> + where + T: RowMapper + Inspectionable<'a> + Sync + 'a; - for positional_param in rg.find_iter(stmt) { - let pp: &str = positional_param.as_str(); - let pp_index = pp[1..] // param $1 -> get 1 - .parse::() - .expect("Error parse mapped parameter to usized.") - - 1; + fn delete_entity_with<'a, 'b, T, I>( + entity: &'a T, + input: I, + ) -> impl Future>> + where + T: RowMapper + Inspectionable<'a> + Sync + 'a, + I: DbConnection + Send + 'a; - let element = params - .get(pp_index) - .expect("Error obtaining the element of the mapping against parameters."); - ordered_params.push(fn_parser(element)); - } + fn delete_query<'a, 'b>() -> Result, Box> + where + 'a: 'b; - ordered_params + fn delete_query_with<'a, 'b>( + database_type: DatabaseType, + ) -> Result, Box> + where + 'a: 'b; } diff --git a/canyon_crud/src/lib.rs b/canyon_crud/src/lib.rs index cea474cb..c4ae5e53 100644 --- a/canyon_crud/src/lib.rs +++ b/canyon_crud/src/lib.rs @@ -1,13 +1,5 @@ -pub extern crate async_trait; -extern crate canyon_connection; - -pub mod bounds; pub mod crud; -pub mod mapper; -pub mod query_elements; -pub mod rows; - -pub use query_elements::operators::*; +pub use canyon_core::query::operators::*; -pub use canyon_connection::{canyon_database_connector::DatabaseType, datasources::*}; +pub use canyon_core::connection::{database_type::DatabaseType, datasources::*}; pub use chrono; diff --git a/canyon_crud/src/mapper.rs b/canyon_crud/src/mapper.rs deleted file mode 100644 index 252df1ce..00000000 --- a/canyon_crud/src/mapper.rs +++ /dev/null @@ -1,20 +0,0 @@ -#[cfg(feature = "mysql")] -use canyon_connection::mysql_async; -#[cfg(feature = "mssql")] -use canyon_connection::tiberius; -#[cfg(feature = "postgres")] -use canyon_connection::tokio_postgres; - -use crate::crud::Transaction; - -/// Declares functions that takes care to deserialize data incoming -/// from some supported database in Canyon-SQL into a user's defined -/// type `T` -pub trait RowMapper>: Sized { - #[cfg(feature = "postgres")] - fn deserialize_postgresql(row: &tokio_postgres::Row) -> T; - #[cfg(feature = "mssql")] - fn deserialize_sqlserver(row: &tiberius::Row) -> T; - #[cfg(feature = "mysql")] - fn deserialize_mysql(row: &mysql_async::Row) -> T; -} diff --git a/canyon_crud/src/query_elements/mod.rs b/canyon_crud/src/query_elements/mod.rs deleted file mode 100644 index e319d4a4..00000000 --- a/canyon_crud/src/query_elements/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod operators; -pub mod query; -pub mod query_builder; diff --git a/canyon_crud/src/query_elements/query.rs b/canyon_crud/src/query_elements/query.rs deleted file mode 100644 index 3923d3b6..00000000 --- a/canyon_crud/src/query_elements/query.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::{fmt::Debug, marker::PhantomData}; - -use crate::{ - bounds::QueryParameter, - crud::{CrudOperations, Transaction}, - mapper::RowMapper, -}; - -/// Holds a sql sentence details -#[derive(Debug, Clone)] -pub struct Query<'a, T: CrudOperations + Transaction + RowMapper> { - pub sql: String, - pub params: Vec<&'a dyn QueryParameter<'a>>, - marker: PhantomData, -} - -impl<'a, T> Query<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - pub fn new(sql: String) -> Query<'a, T> { - Self { - sql, - params: vec![], - marker: PhantomData, - } - } -} diff --git a/canyon_crud/src/query_elements/query_builder.rs b/canyon_crud/src/query_elements/query_builder.rs deleted file mode 100644 index 4d56401a..00000000 --- a/canyon_crud/src/query_elements/query_builder.rs +++ /dev/null @@ -1,687 +0,0 @@ -use std::fmt::Debug; - -use canyon_connection::{ - canyon_database_connector::DatabaseType, get_database_config, DATASOURCES, -}; - -use crate::{ - bounds::{FieldIdentifier, FieldValueIdentifier, QueryParameter}, - crud::{CrudOperations, Transaction}, - mapper::RowMapper, - query_elements::query::Query, - Operator, -}; - -/// Contains the elements that makes part of the formal declaration -/// of the behaviour of the Canyon-SQL QueryBuilder -pub mod ops { - pub use super::*; - - /// The [`QueryBuilder`] trait is the root of a kind of hierarchy - /// on more specific [`super::QueryBuilder`], that are: - /// - /// * [`super::SelectQueryBuilder`] - /// * [`super::UpdateQueryBuilder`] - /// * [`super::DeleteQueryBuilder`] - /// - /// This trait provides the formal declaration of the behaviour that the - /// implementors must provide in their public interfaces, groping - /// the common elements between every element down in that - /// hierarchy. - /// - /// For example, the [`super::QueryBuilder`] type holds the data - /// necessary for track the SQL sentence while it's being generated - /// thought the fluent builder, and provides the behaviour of - /// the common elements defined in this trait. - /// - /// The more concrete types represents a wrapper over a raw - /// [`super::QueryBuilder`], offering all the elements declared - /// in this trait in its public interface, and which implementation - /// only consists of call the same method on the wrapped - /// [`super::QueryBuilder`]. - /// - /// This allows us to declare in their public interface their - /// specific operations, like, for example, join operations - /// on the [`super::SelectQueryBuilder`], and the usage - /// of the `SET` clause on a [`super::UpdateQueryBuilder`], - /// without mixing types or convoluting everything into - /// just one type. - pub trait QueryBuilder<'a, T> - where - T: CrudOperations + Transaction + RowMapper, - { - /// Returns a read-only reference to the underlying SQL sentence, - /// with the same lifetime as self - fn read_sql(&'a self) -> &'a str; - - /// Public interface for append the content of an slice to the end of - /// the underlying SQL sentece. - /// - /// This mutator will allow the user to wire SQL code to the already - /// generated one - /// - /// * `sql` - The [`&str`] to be wired in the SQL - fn push_sql(&mut self, sql: &str); - - /// Generates a `WHERE` SQL clause for constraint the query. - /// - /// * `column` - A [`FieldValueIdentifier`] that will provide the target - /// column name and the value for the filter - /// * `op` - Any element that implements [`Operator`] for create the comparison - /// or equality binary operator - fn r#where>( - &mut self, - column: Z, - op: impl Operator, - ) -> &mut Self - where - T: Debug + CrudOperations + Transaction + RowMapper; - - /// Generates an `AND` SQL clause for constraint the query. - /// - /// * `column` - A [`FieldValueIdentifier`] that will provide the target - /// column name and the value for the filter - /// * `op` - Any element that implements [`Operator`] for create the comparison - /// or equality binary operator - fn and>( - &mut self, - column: Z, - op: impl Operator, - ) -> &mut Self; - - /// Generates an `AND` SQL clause for constraint the query that will create - /// the filter in conjunction with an `IN` operator that will ac - /// - /// * `column` - A [`FieldIdentifier`] that will provide the target - /// column name for the filter, based on the variant that represents - /// the field name that maps the targeted column name - /// * `values` - An array of [`QueryParameter`] with the values to filter - /// inside the `IN` operator - fn and_values_in(&mut self, column: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>; - - /// Generates an `OR` SQL clause for constraint the query that will create - /// the filter in conjunction with an `IN` operator that will ac - /// - /// * `column` - A [`FieldIdentifier`] that will provide the target - /// column name for the filter, based on the variant that represents - /// the field name that maps the targeted column name - /// * `values` - An array of [`QueryParameter`] with the values to filter - /// inside the `IN` operator - fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>; - - /// Generates an `OR` SQL clause for constraint the query. - /// - /// * `column` - A [`FieldValueIdentifier`] that will provide the target - /// column name and the value for the filter - /// * `op` - Any element that implements [`Operator`] for create the comparison - /// or equality binary operator - fn or>(&mut self, column: Z, op: impl Operator) - -> &mut Self; - - /// Generates a `ORDER BY` SQL clause for constraint the query. - /// - /// * `order_by` - A [`FieldIdentifier`] that will provide the target column name - /// * `desc` - a boolean indicating if the generated `ORDER_BY` must be in ascending or descending order - fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self; - } -} - -/// Type for construct more complex queries than the classical CRUD ones. -#[derive(Debug, Clone)] -pub struct QueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - query: Query<'a, T>, - datasource_name: &'a str, - datasource_type: DatabaseType, -} - -unsafe impl<'a, T> Send for QueryBuilder<'a, T> where - T: CrudOperations + Transaction + RowMapper -{ -} -unsafe impl<'a, T> Sync for QueryBuilder<'a, T> where - T: CrudOperations + Transaction + RowMapper -{ -} - -impl<'a, T> QueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - /// Returns a new instance of the [`QueryBuilder`] - pub fn new(query: Query<'a, T>, datasource_name: &'a str) -> Self { - Self { - query, - datasource_name, - datasource_type: DatabaseType::from( - &get_database_config(datasource_name, &DATASOURCES).auth, - ), - } - } - - /// Launches the generated query against the database targeted - /// by the selected datasource - pub async fn query( - &'a mut self, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> { - self.query.sql.push(';'); - - Ok(T::query( - self.query.sql.clone(), - self.query.params.to_vec(), - self.datasource_name, - ) - .await? - .into_results::()) - } - - pub fn r#where>(&mut self, r#where: Z, op: impl Operator) { - let (column_name, value) = r#where.value(); - - let where_ = String::from(" WHERE ") - + column_name - + &op.as_str(self.query.params.len() + 1, &self.datasource_type); - - self.query.sql.push_str(&where_); - self.query.params.push(value); - } - - pub fn and>(&mut self, r#and: Z, op: impl Operator) { - let (column_name, value) = r#and.value(); - - let and_ = String::from(" AND ") - + column_name - + &op.as_str(self.query.params.len() + 1, &self.datasource_type); - - self.query.sql.push_str(&and_); - self.query.params.push(value); - } - - pub fn or>(&mut self, r#and: Z, op: impl Operator) { - let (column_name, value) = r#and.value(); - - let and_ = String::from(" OR ") - + column_name - + &op.as_str(self.query.params.len() + 1, &self.datasource_type); - - self.query.sql.push_str(&and_); - self.query.params.push(value); - } - - pub fn and_values_in(&mut self, r#and: Z, values: &'a [Q]) - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - if values.is_empty() { - return; - } - - self.query - .sql - .push_str(&format!(" AND {} IN (", r#and.as_str())); - - let mut counter = 1; - values.iter().for_each(|qp| { - if values.len() != counter { - self.query - .sql - .push_str(&format!("${}, ", self.query.params.len())); - counter += 1; - } else { - self.query - .sql - .push_str(&format!("${}", self.query.params.len())); - } - self.query.params.push(qp) - }); - - self.query.sql.push(')') - } - - fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - if values.is_empty() { - return; - } - - self.query - .sql - .push_str(&format!(" OR {} IN (", r#or.as_str())); - - let mut counter = 1; - values.iter().for_each(|qp| { - if values.len() != counter { - self.query - .sql - .push_str(&format!("${}, ", self.query.params.len())); - counter += 1; - } else { - self.query - .sql - .push_str(&format!("${}", self.query.params.len())); - } - self.query.params.push(qp) - }); - - self.query.sql.push(')') - } - - #[inline] - pub fn order_by>(&mut self, order_by: Z, desc: bool) { - self.query.sql.push_str( - &(format!( - " ORDER BY {}{}", - order_by.as_str(), - if desc { " DESC " } else { "" } - )), - ); - } -} - -#[derive(Debug, Clone)] -pub struct SelectQueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - _inner: QueryBuilder<'a, T>, -} - -impl<'a, T> SelectQueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - /// Generates a new public instance of the [`SelectQueryBuilder`] - pub fn new(table_schema_data: &str, datasource_name: &'a str) -> Self { - Self { - _inner: QueryBuilder::::new( - Query::new(format!("SELECT * FROM {table_schema_data}")), - datasource_name, - ), - } - } - - /// Launches the generated query to the database pointed by the - /// selected datasource - #[inline] - pub async fn query( - &'a mut self, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> { - self._inner.query().await - } - - /// Adds a *LEFT JOIN* SQL statement to the underlying - /// [`Query`] held by the [`QueryBuilder`], where: - /// - /// * `join_table` - The table target of the join operation - /// * `col1` - The left side of the ON operator for the join - /// * `col2` - The right side of the ON operator for the join - /// - /// > Note: The order on the column parameters is irrelevant - pub fn left_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { - self._inner - .query - .sql - .push_str(&format!(" LEFT JOIN {join_table} ON {col1} = {col2}")); - self - } - - /// Adds a *RIGHT JOIN* SQL statement to the underlying - /// [`Query`] held by the [`QueryBuilder`], where: - /// - /// * `join_table` - The table target of the join operation - /// * `col1` - The left side of the ON operator for the join - /// * `col2` - The right side of the ON operator for the join - /// - /// > Note: The order on the column parameters is irrelevant - pub fn inner_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { - self._inner - .query - .sql - .push_str(&format!(" INNER JOIN {join_table} ON {col1} = {col2}")); - self - } - - /// Adds a *RIGHT JOIN* SQL statement to the underlying - /// [`Query`] held by the [`QueryBuilder`], where: - /// - /// * `join_table` - The table target of the join operation - /// * `col1` - The left side of the ON operator for the join - /// * `col2` - The right side of the ON operator for the join - /// - /// > Note: The order on the column parameters is irrelevant - pub fn right_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { - self._inner - .query - .sql - .push_str(&format!(" RIGHT JOIN {join_table} ON {col1} = {col2}")); - self - } - - /// Adds a *FULL JOIN* SQL statement to the underlying - /// [`Query`] held by the [`QueryBuilder`], where: - /// - /// * `join_table` - The table target of the join operation - /// * `col1` - The left side of the ON operator for the join - /// * `col2` - The right side of the ON operator for the join - /// - /// > Note: The order on the column parameters is irrelevant - pub fn full_join(&mut self, join_table: &str, col1: &str, col2: &str) -> &mut Self { - self._inner - .query - .sql - .push_str(&format!(" FULL JOIN {join_table} ON {col1} = {col2}")); - self - } -} - -impl<'a, T> ops::QueryBuilder<'a, T> for SelectQueryBuilder<'a, T> -where - T: Debug + CrudOperations + Transaction + RowMapper + Send, -{ - #[inline] - fn read_sql(&'a self) -> &'a str { - self._inner.query.sql.as_str() - } - - #[inline(always)] - fn push_sql(&mut self, sql: &str) { - self._inner.query.sql.push_str(sql); - } - - #[inline] - fn r#where>( - &mut self, - r#where: Z, - op: impl Operator, - ) -> &mut Self { - self._inner.r#where(r#where, op); - self - } - - #[inline] - fn and>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.and(column, op); - self - } - - #[inline] - fn and_values_in(&mut self, r#and: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - self._inner.and_values_in(and, values); - self - } - - #[inline] - fn or_values_in(&mut self, r#and: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - self._inner.or_values_in(and, values); - self - } - - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - - #[inline] - fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { - self._inner.order_by(order_by, desc); - self - } -} - -/// Contains the specific database operations of the *UPDATE* SQL statements. -/// -/// * `set` - To construct a new `SET` clause to determine the columns to -/// update with the provided values -#[derive(Debug, Clone)] -pub struct UpdateQueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - _inner: QueryBuilder<'a, T>, -} - -impl<'a, T> UpdateQueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - /// Generates a new public instance of the [`UpdateQueryBuilder`] - pub fn new(table_schema_data: &str, datasource_name: &'a str) -> Self { - Self { - _inner: QueryBuilder::::new( - Query::new(format!("UPDATE {table_schema_data}")), - datasource_name, - ), - } - } - - /// Launches the generated query to the database pointed by the - /// selected datasource - #[inline] - pub async fn query( - &'a mut self, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> { - self._inner.query().await - } - - /// Creates an SQL `SET` clause to especify the columns that must be updated in the sentence - pub fn set(&mut self, columns: &'a [(Z, Q)]) -> &mut Self - where - Z: FieldIdentifier + Clone, - Q: QueryParameter<'a>, - { - if columns.is_empty() { - return self; - } - if self._inner.query.sql.contains("SET") { - panic!( - "\n{}", - String::from("\t[PANIC!] - Don't use chained calls of the .set(...) method. ") - + "\n\tPass all the values in a unique call within the 'columns' " - + "array of tuples parameter\n" - ) - } - - let mut set_clause = String::new(); - set_clause.push_str(" SET "); - - for (idx, column) in columns.iter().enumerate() { - set_clause.push_str(&format!( - "{} = ${}", - column.0.as_str(), - self._inner.query.params.len() + 1 - )); - - if idx < columns.len() - 1 { - set_clause.push_str(", "); - } - self._inner.query.params.push(&column.1); - } - - self._inner.query.sql.push_str(&set_clause); - self - } -} - -impl<'a, T> ops::QueryBuilder<'a, T> for UpdateQueryBuilder<'a, T> -where - T: Debug + CrudOperations + Transaction + RowMapper + Send, -{ - #[inline] - fn read_sql(&'a self) -> &'a str { - self._inner.query.sql.as_str() - } - - #[inline(always)] - fn push_sql(&mut self, sql: &str) { - self._inner.query.sql.push_str(sql); - } - - #[inline] - fn r#where>( - &mut self, - r#where: Z, - op: impl Operator, - ) -> &mut Self { - self._inner.r#where(r#where, op); - self - } - - #[inline] - fn and>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.and(column, op); - self - } - - #[inline] - fn and_values_in(&mut self, r#and: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - self._inner.and_values_in(and, values); - self - } - - #[inline] - fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - self._inner.or_values_in(or, values); - self - } - - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - - #[inline] - fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { - self._inner.order_by(order_by, desc); - self - } -} - -/// Contains the specific database operations associated with the -/// *DELETE* SQL statements. -/// -/// * `set` - To construct a new `SET` clause to determine the columns to -/// update with the provided values -#[derive(Debug, Clone)] -pub struct DeleteQueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - _inner: QueryBuilder<'a, T>, -} - -impl<'a, T> DeleteQueryBuilder<'a, T> -where - T: CrudOperations + Transaction + RowMapper, -{ - /// Generates a new public instance of the [`DeleteQueryBuilder`] - pub fn new(table_schema_data: &str, datasource_name: &'a str) -> Self { - Self { - _inner: QueryBuilder::::new( - Query::new(format!("DELETE FROM {table_schema_data}")), - datasource_name, - ), - } - } - - /// Launches the generated query to the database pointed by the - /// selected datasource - #[inline] - pub async fn query( - &'a mut self, - ) -> Result, Box<(dyn std::error::Error + Sync + Send + 'static)>> { - self._inner.query().await - } -} - -impl<'a, T> ops::QueryBuilder<'a, T> for DeleteQueryBuilder<'a, T> -where - T: Debug + CrudOperations + Transaction + RowMapper + Send, -{ - #[inline] - fn read_sql(&'a self) -> &'a str { - self._inner.query.sql.as_str() - } - - #[inline(always)] - fn push_sql(&mut self, sql: &str) { - self._inner.query.sql.push_str(sql); - } - - #[inline] - fn r#where>( - &mut self, - r#where: Z, - op: impl Operator, - ) -> &mut Self { - self._inner.r#where(r#where, op); - self - } - - #[inline] - fn and>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.and(column, op); - self - } - - #[inline] - fn and_values_in(&mut self, r#and: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - self._inner.or_values_in(and, values); - self - } - - #[inline] - fn or_values_in(&mut self, r#or: Z, values: &'a [Q]) -> &mut Self - where - Z: FieldIdentifier, - Q: QueryParameter<'a>, - { - self._inner.or_values_in(or, values); - self - } - - #[inline] - fn or>(&mut self, column: Z, op: impl Operator) -> &mut Self { - self._inner.or(column, op); - self - } - - #[inline] - fn order_by>(&mut self, order_by: Z, desc: bool) -> &mut Self { - self._inner.order_by(order_by, desc); - self - } -} diff --git a/canyon_crud/src/rows.rs b/canyon_crud/src/rows.rs deleted file mode 100644 index 517592a6..00000000 --- a/canyon_crud/src/rows.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::crud::Transaction; -use crate::mapper::RowMapper; -use std::marker::PhantomData; - -/// Lightweight wrapper over the collection of results of the different crates -/// supported by Canyon-SQL. -/// -/// Even tho the wrapping seems meaningless, this allows us to provide internal -/// operations that are too difficult or to ugly to implement in the macros that -/// will call the query method of Crud. -pub enum CanyonRows { - #[cfg(feature = "postgres")] - Postgres(Vec), - #[cfg(feature = "mssql")] - Tiberius(Vec), - #[cfg(feature = "mysql")] - MySQL(Vec), - - UnusableTypeMarker(PhantomData), -} - -impl CanyonRows { - #[cfg(feature = "postgres")] - pub fn get_postgres_rows(&self) -> &Vec { - match self { - Self::Postgres(v) => v, - _ => panic!("This branch will never ever should be reachable"), - } - } - - #[cfg(feature = "mssql")] - pub fn get_tiberius_rows(&self) -> &Vec { - match self { - Self::Tiberius(v) => v, - _ => panic!("This branch will never ever should be reachable"), - } - } - - #[cfg(feature = "mysql")] - pub fn get_mysql_rows(&self) -> &Vec { - match self { - Self::MySQL(v) => v, - _ => panic!("This branch will never ever should be reachable"), - } - } - - /// Consumes `self` and returns the wrapped [`std::vec::Vec`] with the instances of T - pub fn into_results>(self) -> Vec - where - T: Transaction, - { - match self { - #[cfg(feature = "postgres")] - Self::Postgres(v) => v.iter().map(|row| Z::deserialize_postgresql(row)).collect(), - #[cfg(feature = "mssql")] - Self::Tiberius(v) => v.iter().map(|row| Z::deserialize_sqlserver(row)).collect(), - #[cfg(feature = "mysql")] - Self::MySQL(v) => v.iter().map(|row| Z::deserialize_mysql(row)).collect(), - _ => panic!("This branch will never ever should be reachable"), - } - } - - /// Returns the number of elements present on the wrapped collection - pub fn len(&self) -> usize { - match self { - #[cfg(feature = "postgres")] - Self::Postgres(v) => v.len(), - #[cfg(feature = "mssql")] - Self::Tiberius(v) => v.len(), - #[cfg(feature = "mysql")] - Self::MySQL(v) => v.len(), - _ => panic!("This branch will never ever should be reachable"), - } - } - - /// Returns true whenever the wrapped collection of Rows does not contains any elements - pub fn is_empty(&self) -> bool { - match self { - #[cfg(feature = "postgres")] - Self::Postgres(v) => v.is_empty(), - #[cfg(feature = "mssql")] - Self::Tiberius(v) => v.is_empty(), - #[cfg(feature = "mysql")] - Self::MySQL(v) => v.is_empty(), - _ => panic!("This branch will never ever should be reachable"), - } - } -} diff --git a/canyon_entities/Cargo.toml b/canyon_entities/Cargo.toml index 374e2e98..376bc4d3 100644 --- a/canyon_entities/Cargo.toml +++ b/canyon_entities/Cargo.toml @@ -14,4 +14,4 @@ regex = { workspace = true } partialdebug = { workspace = true } quote = { workspace = true } proc-macro2 = { workspace = true } -syn = { version = "1.0.86", features = ["full", "parsing"] } # TODO Pending to refactor and upgrade +syn = { version = "1.0.109", features = ["full", "parsing"] } # TODO Pending to refactor and upgrade diff --git a/canyon_entities/src/entity.rs b/canyon_entities/src/entity.rs index 8604d0e8..938a9b9c 100644 --- a/canyon_entities/src/entity.rs +++ b/canyon_entities/src/entity.rs @@ -3,8 +3,8 @@ use proc_macro2::{Ident, TokenStream}; use quote::quote; use std::convert::TryFrom; use syn::{ - parse::{Parse, ParseBuffer}, Attribute, Generics, ItemStruct, Visibility, + parse::{Parse, ParseBuffer}, }; use super::entity_fields::EntityField; @@ -44,13 +44,14 @@ impl CanyonEntity { /// which this enum is related to. /// /// Makes a variant `#field_name(#ty)` where `#ty` it's a trait object - /// of type [`canyon_crud::bounds::QueryParameter`] + /// of type `canyon_core::QueryParameter` TODO: correct the comment when refactored pub fn get_fields_as_enum_variants_with_value(&self) -> Vec { self.fields .iter() .map(|f| { let field_name = &f.name; - quote! { #field_name(&'a dyn canyon_sql::crud::bounds::QueryParameter<'a>) } + let field_ty = &f.field_type; + quote! { #field_name(#field_ty) } }) .collect::>() } @@ -103,7 +104,7 @@ impl CanyonEntity { let field_name_as_string = f.name.to_string(); quote! { - #enum_name::#field_name(v) => (#field_name_as_string, v) + #enum_name::#field_name(v) => (#field_name_as_string, v as &dyn canyon_sql::query::QueryParameter) } }) .collect::>() diff --git a/canyon_entities/src/field_annotation.rs b/canyon_entities/src/field_annotation.rs index 8c01615d..abaf80e9 100644 --- a/canyon_entities/src/field_annotation.rs +++ b/canyon_entities/src/field_annotation.rs @@ -1,6 +1,6 @@ use proc_macro2::Ident; use std::{collections::HashMap, convert::TryFrom}; -use syn::{punctuated::Punctuated, Attribute, MetaNameValue, Token}; +use syn::{Attribute, MetaNameValue, Token, punctuated::Punctuated}; /// The available annotations for a field that belongs to any struct /// annotaded with `#[canyon_entity]` @@ -46,7 +46,7 @@ impl EntityFieldAnnotation { "Only bool literals are supported for the `{}` attribute", &attr_value_ident ), - )) + )); } }; data.insert(attr_value_ident, attr_value); @@ -87,12 +87,12 @@ impl EntityFieldAnnotation { // TODO Implement the option (or change it to) to use a Rust Ident instead a Str Lit syn::Lit::Str(v) => v.value(), _ => { - return Err( - syn::Error::new_spanned( - nv.path.clone(), - format!("Only string literals are supported for the `{attr_value_ident}` attribute") - ) - ) + return Err(syn::Error::new_spanned( + nv.path.clone(), + format!( + "Only string literals are supported for the `{attr_value_ident}` attribute" + ), + )); } }; data.insert(attr_value_ident, attr_value); @@ -105,7 +105,7 @@ impl EntityFieldAnnotation { return Err(syn::Error::new_spanned( ident, "Missed `table` argument on the Foreign Key annotation".to_string(), - )) + )); } }, match data.get("column") { @@ -115,7 +115,7 @@ impl EntityFieldAnnotation { ident, "Missed `column` argument on the Foreign Key annotation" .to_string(), - )) + )); } }, )) @@ -143,7 +143,7 @@ impl TryFrom<&&Attribute> for EntityFieldAnnotation { return Err(syn::Error::new_spanned( ident.clone(), format!("Unknown attribute `{}`", &ident), - )) + )); } }) } diff --git a/canyon_entities/src/helpers.rs b/canyon_entities/src/helpers.rs new file mode 100644 index 00000000..2f26fc9b --- /dev/null +++ b/canyon_entities/src/helpers.rs @@ -0,0 +1,24 @@ +/// Autogenerates a default table name for an entity given their struct name +/// TODO: This is duplicated from the macro's crate. We should be able to join both crates in +/// one later, but now, for developing purposes, we need to maintain here for a while this here +pub fn default_database_table_name_from_entity_name(ty: &str) -> String { + let mut table_name: String = String::new(); + + let mut index = 0; + for char in ty.chars() { + if index < 1 { + table_name.push(char.to_ascii_lowercase()); + index += 1; + } else { + match char { + n if n.is_ascii_uppercase() => { + table_name.push('_'); + table_name.push(n.to_ascii_lowercase()); + } + _ => table_name.push(char), + } + } + } + + table_name +} diff --git a/canyon_entities/src/lib.rs b/canyon_entities/src/lib.rs index 8b3abd6c..3ba272bb 100644 --- a/canyon_entities/src/lib.rs +++ b/canyon_entities/src/lib.rs @@ -4,6 +4,7 @@ use std::sync::Mutex; pub mod entity; pub mod entity_fields; pub mod field_annotation; +mod helpers; pub mod manager_builder; pub mod register_types; diff --git a/canyon_entities/src/manager_builder.rs b/canyon_entities/src/manager_builder.rs index d717909f..e0d6a90e 100644 --- a/canyon_entities/src/manager_builder.rs +++ b/canyon_entities/src/manager_builder.rs @@ -1,9 +1,9 @@ +use super::entity::CanyonEntity; +use crate::helpers; use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; use syn::{Attribute, Generics, Visibility}; -use super::entity::CanyonEntity; - /// Builds the TokenStream that contains the user defined struct pub fn generate_user_struct(canyon_entity: &CanyonEntity) -> TokenStream { let fields = &canyon_entity.get_attrs_as_token_stream(); @@ -21,18 +21,82 @@ pub fn generate_user_struct(canyon_entity: &CanyonEntity) -> TokenStream { } } +pub fn generated_enum_type_for_struct_data(canyon_entity: &CanyonEntity) -> TokenStream { + let struct_name = canyon_entity.struct_name.to_string(); + let enum_name = Ident::new(&(String::from(&struct_name) + "Table"), Span::call_site()); + let db_target_table_name = helpers::default_database_table_name_from_entity_name(&struct_name); + + let generics = &canyon_entity.generics; + let visibility = &canyon_entity.vis; + + quote! { + /// Auto-generated enum to represent compile-time metadata + /// about a Canyon entity type. + /// + /// The enum is named by appending `Table` to the struct name and contains + /// variants for retrieving metadata associated with the entity. Currently, + /// it includes: + /// + /// - `name`: The struct's identifier as a string. + /// - `DbName`: The name of the database table derived from the struct's name, + /// but adapted to the `snake_case` convention, which is the standard adopted + /// by Canyon these early days to transform type Idents into table names + /// + /// This enum implements the `TableMetadata` trait, providing the `as_str` method, + /// which is useful in code that needs to retrieve such metadata dynamically while + /// keeping strong typing and avoiding magic strings. + /// + /// # Example + /// ``` + /// pub struct League { + /// id: i32, + /// name: String, + /// } + /// + /// // This is the auto-generated by Canyon with the `Fields` macro + /// pub enum LeagueTable { + /// Name, + /// DbName + /// } + /// + /// assert_eq!(LeagueTable::Name.to_string(), "League"); + /// assert_eq!(LeagueTable::DbName.to_string(), "league"); + /// ``` + #visibility enum #enum_name #generics { + Name, + DbName + } + + impl #generics std::fmt::Display for #enum_name #generics { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } + } + + impl #generics canyon_sql::query::bounds::TableMetadata for #generics #enum_name #generics { + fn as_str(&self) -> &'static str { + match *self { + #enum_name::Name => #struct_name, + #enum_name::DbName => #db_target_table_name, + } + } + } + } +} + /// Auto-generated enum to represent every field of the related type /// as a variant of an enum that it's named with the concatenation /// of the type identifier + Field /// /// The idea it's to have a representation of the field name as an enum -/// variant, avoiding to let the user passing around Strings and instead, +/// variant, letting the user passing around Strings and instead, /// passing variants of a concrete enumeration type, that when required, /// will be called though macro code to obtain the &str representation /// of the field name. pub fn generate_enum_with_fields(canyon_entity: &CanyonEntity) -> TokenStream { - let ty = &canyon_entity.struct_name; let struct_name = canyon_entity.struct_name.to_string(); + let db_target_table_name = helpers::default_database_table_name_from_entity_name(&struct_name); + let enum_name = Ident::new((struct_name + "Field").as_str(), Span::call_site()); let fields_names = &canyon_entity.get_fields_as_enum_variants(); @@ -42,7 +106,6 @@ pub fn generate_enum_with_fields(canyon_entity: &CanyonEntity) -> TokenStream { let generics = &canyon_entity.generics; quote! { - #[derive(Clone, Debug)] #[allow(non_camel_case_types)] #[allow(unused_variables)] #[allow(dead_code)] @@ -77,7 +140,18 @@ pub fn generate_enum_with_fields(canyon_entity: &CanyonEntity) -> TokenStream { #(#fields_names),* } - impl #generics canyon_sql::crud::bounds::FieldIdentifier<#ty> for #generics #enum_name #generics { + impl #generics std::fmt::Display for #enum_name #generics { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.as_str()) + } + } + + impl #generics canyon_sql::query::bounds::FieldIdentifier for #generics #enum_name #generics { + #[inline(always)] + fn table_and_column_name(&self) -> String { + format!("{}.{}", #db_target_table_name, self.as_str()) + } + fn as_str(&self) -> &'static str { match *self { #(#match_arms_str),* @@ -93,7 +167,6 @@ pub fn generate_enum_with_fields(canyon_entity: &CanyonEntity) -> TokenStream { /// The type of the inner value `(Enum::Variant(SomeType))` is the same /// that the field that the variant represents pub fn generate_enum_with_fields_values(canyon_entity: &CanyonEntity) -> TokenStream { - let ty = &canyon_entity.struct_name; let struct_name = canyon_entity.struct_name.to_string(); let enum_name = Ident::new((struct_name + "FieldValue").as_str(), Span::call_site()); @@ -103,7 +176,6 @@ pub fn generate_enum_with_fields_values(canyon_entity: &CanyonEntity) -> TokenSt let visibility = &canyon_entity.vis; quote! { - #[derive(Debug)] #[allow(non_camel_case_types)] #[allow(unused_variables)] #[allow(dead_code)] @@ -121,16 +193,16 @@ pub fn generate_enum_with_fields_values(canyon_entity: &CanyonEntity) -> TokenSt /// #[allow(non_camel_case_types)] /// pub enum LeagueFieldValue { /// id(i32), - /// name(String) + /// name(String), /// opt(Option) /// } /// ``` - #visibility enum #enum_name<'a> { + #visibility enum #enum_name { #(#fields_names),* } - impl<'a> canyon_sql::crud::bounds::FieldValueIdentifier<'a, #ty> for #enum_name<'a> { - fn value(self) -> (&'static str, &'a dyn QueryParameter<'a>) { + impl canyon_sql::query::bounds::FieldValueIdentifier for #enum_name { + fn value(&self) -> (&'static str, &dyn canyon_sql::query::QueryParameter) { match self { #(#match_arms),* } diff --git a/canyon_macros/Cargo.toml b/canyon_macros/Cargo.toml index 8b8a2852..8acd9f0e 100755 --- a/canyon_macros/Cargo.toml +++ b/canyon_macros/Cargo.toml @@ -13,20 +13,21 @@ description.workspace = true proc-macro = true [dependencies] -syn = { version = "1.0.109", features = ["full"] } # TODO Pending to upgrade and refactor +syn = { version = "1.0.109", features = ["full", "parsing"] } # TODO Pending to upgrade and refactor quote = { workspace = true } proc-macro2 = { workspace = true } futures = { workspace = true } tokio = { workspace = true } +regex = { workspace = true } -canyon_connection = { workspace = true } +canyon_core = { workspace = true } canyon_crud = { workspace = true } canyon_entities = { workspace = true } canyon_migrations = { workspace = true, optional = true } [features] -postgres = ["canyon_connection/postgres", "canyon_crud/postgres", "canyon_migrations/postgres"] -mssql = ["canyon_connection/mssql", "canyon_crud/mssql", "canyon_migrations/mssql"] -mysql = ["canyon_connection/mysql", "canyon_crud/mysql", "canyon_migrations/mysql"] +postgres = ["canyon_core/postgres", "canyon_crud/postgres", "canyon_migrations/postgres"] +mssql = ["canyon_core/mssql", "canyon_crud/mssql", "canyon_migrations/mssql"] +mysql = ["canyon_core/mysql", "canyon_crud/mysql", "canyon_migrations/mysql"] migrations = ["canyon_migrations"] diff --git a/canyon_macros/src/canyon_entity_macro.rs b/canyon_macros/src/canyon_entity_macro.rs index 483f8f8e..9210c516 100644 --- a/canyon_macros/src/canyon_entity_macro.rs +++ b/canyon_macros/src/canyon_entity_macro.rs @@ -1,7 +1,80 @@ +use crate::utils::helpers; +use canyon_entities::CANYON_REGISTER_ENTITIES; +use canyon_entities::entity::CanyonEntity; +use canyon_entities::manager_builder::generate_user_struct; +use canyon_entities::register_types::{CanyonRegisterEntity, CanyonRegisterEntityField}; +use proc_macro::TokenStream as CompilerTokenStream; use proc_macro2::{Span, TokenStream}; -use syn::NestedMeta; +use quote::quote; +use syn::{AttributeArgs, NestedMeta}; -pub(crate) fn parse_canyon_entity_proc_macro_attr( +pub fn generate_canyon_entity_tokens( + attrs: AttributeArgs, + input: CompilerTokenStream, +) -> TokenStream { + let (table_name, schema_name, parsing_attribute_error) = + parse_canyon_entity_proc_macro_attr(attrs); + + let entity_res = syn::parse::(input); + + if entity_res.is_err() { + return entity_res + .expect_err("Unexpected error parsing the struct") + .into_compile_error(); + } + + // No errors detected on the parsing, so we can safely unwrap the parse result + let entity = entity_res.unwrap(); + let generated_user_struct = generate_user_struct(&entity); + + // The identifier of the entities + let mut new_entity = CanyonRegisterEntity::default(); + let e = Box::leak(entity.struct_name.to_string().into_boxed_str()); + new_entity.entity_name = e; + new_entity.entity_db_table_name = table_name.unwrap_or(Box::leak( + helpers::default_database_table_name_from_entity_name(e).into_boxed_str(), + )); + new_entity.user_schema_name = schema_name; + + // The entity fields + for field in entity.fields.iter() { + let mut new_entity_field = CanyonRegisterEntityField { + field_name: field.name.to_string(), + field_type: field.get_field_type_as_string().replace(' ', ""), + ..Default::default() + }; + + field + .attributes + .iter() + .for_each(|attr| new_entity_field.annotations.push(attr.get_as_string())); + + new_entity.entity_fields.push(new_entity_field); + } + + // Fill the register with the data of the attached struct + CANYON_REGISTER_ENTITIES + .lock() + .expect("Error acquiring Mutex guard on Canyon Entity macro") + .push(new_entity); + + // Assemble everything + let tokens = quote! { + #generated_user_struct + }; + + // Pass the result back to the compiler + if let Some(macro_error) = parsing_attribute_error { + quote! { + #macro_error + #generated_user_struct + } + } else { + tokens + } +} + +fn parse_canyon_entity_proc_macro_attr( attrs: Vec, ) -> ( Option<&'static str>, @@ -16,7 +89,7 @@ pub(crate) fn parse_canyon_entity_proc_macro_attr( // The parse of the available options to configure the Canyon Entity for element in attrs { match element { - syn::NestedMeta::Meta(m) => { + NestedMeta::Meta(m) => { match m { syn::Meta::NameValue(nv) => { let attr_arg_ident = nv @@ -62,7 +135,7 @@ pub(crate) fn parse_canyon_entity_proc_macro_attr( } } } - syn::NestedMeta::Lit(_) => { + NestedMeta::Lit(_) => { parsing_attribute_error = Some(syn::Error::new( Span::call_site(), "No literal values allowed on the `canyon_macros::canyon_entity` proc macro" diff --git a/canyon_macros/src/canyon_macro.rs b/canyon_macros/src/canyon_macro.rs index 95379581..b005e10a 100644 --- a/canyon_macros/src/canyon_macro.rs +++ b/canyon_macros/src/canyon_macro.rs @@ -1,15 +1,18 @@ //! Provides helpers to build the `#[canyon_macros::canyon]` procedural like attribute macro #![cfg(feature = "migrations")] -use canyon_connection::CANYON_TOKIO_RUNTIME; +use canyon_core::connection::get_canyon_tokio_runtime; use canyon_migrations::migrations::handler::Migrations; use canyon_migrations::{CM_QUERIES_TO_EXECUTE, QUERIES_TO_EXECUTE}; use proc_macro2::TokenStream; use quote::quote; pub fn main_with_queries() -> TokenStream { - CANYON_TOKIO_RUNTIME.block_on(async { - canyon_connection::init_connections_cache().await; + // TODO: migrations on main instead of main_with_queries + get_canyon_tokio_runtime().block_on(async { + canyon_core::canyon::Canyon::init() + .await + .expect("Error initializing the connections POOL"); Migrations::migrate().await; }); @@ -26,15 +29,29 @@ pub fn main_with_queries() -> TokenStream { /// Creates a TokenScream that is used to load the data generated at compile-time /// by the `CanyonManaged` macros again on the queries register fn wire_queries_to_execute(canyon_manager_tokens: &mut Vec) { - let cm_data = CM_QUERIES_TO_EXECUTE.lock().unwrap(); - let data = QUERIES_TO_EXECUTE.lock().unwrap(); + let data_to_wire = if let Some(mutex) = QUERIES_TO_EXECUTE.get() { + let queries = mutex.lock().expect("QUERIES_TO_EXECUTE poisoned"); + queries + .iter() + .map(|(key, value)| { + quote! { hm.insert(#key, vec![#(#value),*]); } + }) + .collect::>() + } else { + vec![] + }; - let cm_data_to_wire = cm_data.iter().map(|(key, value)| { - quote! { cm_hm.insert(#key, vec![#(#value),*]); } - }); - let data_to_wire = data.iter().map(|(key, value)| { - quote! { hm.insert(#key, vec![#(#value),*]); } - }); + let cm_data_to_wire = if let Some(mutex) = CM_QUERIES_TO_EXECUTE.get() { + let cm_queries = mutex.lock().expect("CM_QUERIES_TO_EXECUTE poisoned"); + cm_queries + .iter() + .map(|(key, value)| { + quote! { cm_hm.insert(#key, vec![#(#value),*]); } + }) + .collect::>() + } else { + vec![] + }; let tokens = quote! { use std::collections::HashMap; @@ -50,5 +67,5 @@ fn wire_queries_to_execute(canyon_manager_tokens: &mut Vec) { MigrationsProcessor::from_query_register(&hm).await; }; - canyon_manager_tokens.push(tokens) + canyon_manager_tokens.push(tokens); } diff --git a/canyon_macros/src/canyon_mapper_macro.rs b/canyon_macros/src/canyon_mapper_macro.rs new file mode 100644 index 00000000..0d321930 --- /dev/null +++ b/canyon_macros/src/canyon_mapper_macro.rs @@ -0,0 +1,375 @@ +#![allow(unused_imports)] + +use proc_macro2::{Ident, Span, TokenStream}; +use quote::quote; +use regex::Regex; +use std::iter::Map; +use std::slice::Iter; +use syn::{DeriveInput, Type, Visibility}; + +#[cfg(feature = "mssql")] +const BY_VALUE_CONVERSION_TARGETS: [&str; 1] = ["String"]; + +pub fn canyon_mapper_impl_tokens(ast: MacroTokens) -> TokenStream { + let mut row_mapper_tokens = TokenStream::new(); + + let ty = ast.ty; + let ty_str = ty.to_string(); + let fields = ast.fields(); + let (impl_generics, ty_generics, where_clause) = &ast.generics.split_for_impl(); + + let mut impl_methods = TokenStream::new(); + + #[cfg(feature = "postgres")] + let pg_implementation = create_postgres_fields_mapping(&ty_str, &fields); + #[cfg(feature = "postgres")] + impl_methods.extend(quote! { + fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> Result> { + Ok(Self { + #(#pg_implementation),* + }) + } + }); + + #[cfg(feature = "mssql")] + let sqlserver_implementation = create_sqlserver_fields_mapping(&ty_str, &fields); + #[cfg(feature = "mssql")] + impl_methods.extend(quote! { + fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> Result> { + Ok(Self { + #(#sqlserver_implementation),* + }) + } + }); + + #[cfg(feature = "mysql")] + let mysql_implementation = create_mysql_fields_mapping(&ty_str, &fields); + #[cfg(feature = "mysql")] + impl_methods.extend(quote! { + fn deserialize_mysql(row: &canyon_sql::db_clients::mysql_async::Row) -> Result> { + Ok(Self { + #(#mysql_implementation),* + }) + } + }); + + row_mapper_tokens.extend(quote! { + use crate::canyon_sql::crud::CrudOperations; + impl #impl_generics canyon_sql::core::RowMapper for #ty #ty_generics #where_clause { + type Output = #ty; + #impl_methods + } + }); + + let inspectionable_impl_tokens = + __details::inspectionable_macro::generate_inspectionable_impl_tokens(&ast); + row_mapper_tokens.extend(quote! { + #inspectionable_impl_tokens + }); + + row_mapper_tokens +} + +#[cfg(feature = "postgres")] +fn create_postgres_fields_mapping<'a>( + ty: &'a str, + fields: &'a [(Visibility, Ident, Type)], +) -> impl Iterator + use<'a> { + fields.iter().map(|(_vis, ident, _ty)| { + let ident_name = ident.to_string(); + let err = create_row_mapper_error_extracting_row(ident, ty, DatabaseType::PostgreSql); + quote! { + #ident: row.try_get::<&str, #_ty>(#ident_name).map_err(|_| #err)? + } + }) +} + +#[cfg(feature = "mysql")] +fn create_mysql_fields_mapping<'a>( + ty: &'a str, + fields: &'a [(Visibility, Ident, Type)], +) -> impl Iterator + use<'a> { + fields.iter().map(|(_vis, ident, _ty)| { + let ident_name = ident.to_string(); + let err = create_row_mapper_error_extracting_row(ident, ty, DatabaseType::MySQL); + quote! { + #ident: row.get_opt(#ident_name).ok_or_else(|| #err)?? + } + }) +} + +#[cfg(feature = "mssql")] +fn create_sqlserver_fields_mapping<'a>( + struct_ty: &'a str, + fields: &'a [(Visibility, Ident, Type)], +) -> impl Iterator + use<'a> { + fields.iter().map(move |(_vis, ident, ty)| { + let ident_name = ident.to_string(); + let err = create_row_mapper_error_extracting_row(ident, struct_ty, DatabaseType::SqlServer); + + let target_field_type_str = get_field_type_as_string(ty); + let field_deserialize_impl = + handle_stupid_tiberius_sql_conversions(&target_field_type_str, &ident_name, err); + + quote! { + #ident: #field_deserialize_impl + } + }) +} + +#[cfg(feature = "mssql")] +fn handle_stupid_tiberius_sql_conversions( + target_type: &str, + ident_name: &str, + err: String, +) -> TokenStream { + let is_opt_type = target_type.contains("Option"); + let handle_opt = if !is_opt_type { + quote! { .ok_or_else(|| #err)? } + } else { + quote! {} + }; + + let deserializing_type = get_deserializing_type(target_type); + let to_owned = if BY_VALUE_CONVERSION_TARGETS + .iter() + .any(|bv| target_type.contains(bv)) + { + if is_opt_type { + quote! { .map(|inner| inner.to_owned()) } + } else { + quote! { .to_owned() } + } + } else { + quote! {} + }; + + quote! { + // TODO: try_get + row.get::<#deserializing_type, &str>(#ident_name) + #handle_opt + #to_owned + } +} + +#[cfg(feature = "mssql")] +fn get_deserializing_type(target_type: &str) -> TokenStream { + let re = Regex::new(r"(?:Option\s*<\s*)?(?P&?\w+)(?:\s*>)?").unwrap(); + re.captures(target_type) + .map(|inner| String::from(&inner["type"])) + .map(|tt| { + if BY_VALUE_CONVERSION_TARGETS.contains(&tt.as_str()) { + quote! { &str } + // potentially others on demand on the future + } else if tt.contains("Date") || tt.contains("Time") { + let dt = Ident::new(tt.as_str(), Span::call_site()); + quote! { canyon_sql::date_time::#dt } + } else { + let tt = Ident::new(tt.as_str(), Span::call_site()); + quote! { #tt } + } + }) + .unwrap_or_else(|| { + panic!( + "Unable to process type: {} on the given struct for SqlServer", + target_type + ) + }) +} + +#[cfg(feature = "mssql")] +fn __get_deserializing_type_str(target_type: &str) -> String { + let tt = get_deserializing_type(target_type); + tt.to_string() + .chars() + .filter(|c| !c.is_whitespace()) + .collect::() +} + +use crate::utils::macro_tokens::MacroTokens; +use canyon_core::connection::database_type::DatabaseType; +#[cfg(feature = "mssql")] +use quote::ToTokens; + +#[cfg(feature = "mssql")] +fn get_field_type_as_string(typ: &Type) -> String { + match typ { + Type::Array(type_) => type_.to_token_stream().to_string(), + Type::BareFn(type_) => type_.to_token_stream().to_string(), + Type::Group(type_) => type_.to_token_stream().to_string(), + Type::ImplTrait(type_) => type_.to_token_stream().to_string(), + Type::Infer(type_) => type_.to_token_stream().to_string(), + Type::Macro(type_) => type_.to_token_stream().to_string(), + Type::Never(type_) => type_.to_token_stream().to_string(), + Type::Paren(type_) => type_.to_token_stream().to_string(), + Type::Path(type_) => type_.to_token_stream().to_string(), + Type::Ptr(type_) => type_.to_token_stream().to_string(), + Type::Reference(type_) => type_.to_token_stream().to_string(), + Type::Slice(type_) => type_.to_token_stream().to_string(), + Type::TraitObject(type_) => type_.to_token_stream().to_string(), + Type::Tuple(type_) => type_.to_token_stream().to_string(), + Type::Verbatim(type_) => type_.to_token_stream().to_string(), + _ => "".to_owned(), + } +} + +fn create_row_mapper_error_extracting_row( + field_ident: &Ident, + ty: &str, + db_ty: DatabaseType, +) -> String { + std::io::Error::other(format!( + "Failed to retrieve the `{}` field for type: {} with {}", + field_ident, ty, db_ty + )) + .to_string() +} + +#[cfg(test)] +#[cfg(feature = "mssql")] +mod mapper_macro_tests { + use crate::canyon_mapper_macro::__get_deserializing_type_str; + + #[test] + fn test_regex_extraction_for_the_tiberius_target_types() { + assert_eq!("&str", __get_deserializing_type_str("String")); + assert_eq!("&str", __get_deserializing_type_str("Option")); + assert_eq!("i64", __get_deserializing_type_str("i64")); + + assert_eq!( + "canyon_sql::date_time::DateTime", + __get_deserializing_type_str("DateTime") + ); + assert_eq!( + "canyon_sql::date_time::NaiveDateTime", + __get_deserializing_type_str("NaiveDateTime") + ); + } +} + +mod __details { + use super::*; + pub(crate) mod inspectionable_macro { + use super::*; + use syn::{Field, Fields}; + + pub(crate) fn generate_inspectionable_impl_tokens(ast: &MacroTokens) -> TokenStream { + let ty = ast.ty; + let pk = ast.get_primary_key_field_annotation(); + let pk_ident_ts = pk.map(|pk| pk.ident); + let pk_ty_ts = pk.map(|pk| pk.ty); + + let (impl_generics, ty_generics, where_clause) = &ast.generics.split_for_impl(); + + let fields = ast.get_fields_idents_pk_parsed().collect::>(); + let fields_values = get_fields_values_expr_tokens(&fields); + let fields_names = get_fields_names_expr_tokens(&fields); + + let fields_as_comma_sep_string = ast.get_struct_fields_as_comma_sep_string(); + let queries_placeholders = ast.placeholders_generator(); + + let pk_opt_val = get_pk_ident_as_str(ast); + let pk_actual_value = get_pk_actual_value_expr_tokens(ast); + + let set_pk_val_method = set_pk_val_method(&pk_ident_ts); + let pk_assoc_ty = generate_pk_associated_type_tokens(&pk_ty_ts); + + quote! { + impl #impl_generics canyon_sql::query::bounds::Inspectionable<'_> for #ty #ty_generics #where_clause { + + type PrimaryKeyType = #pk_assoc_ty; + + fn fields_actual_values(&self) -> Vec<&dyn canyon_sql::query::QueryParameter> { + vec![#(#fields_values),*] + } + + fn fields_names(&self) -> &[&'static str] { + &[#(#fields_names),*] + } + + fn fields_as_comma_sep_string(&self) -> &'static str { + #fields_as_comma_sep_string + } + + fn queries_placeholders(&self) -> &'static str { + #queries_placeholders + } + + fn primary_key(&self) -> Option<&'static str> { + #pk_opt_val + } + + fn primary_key_st() -> Option<&'static str> { + #pk_opt_val + } + + fn primary_key_actual_value(&self) -> &'_ (dyn canyon_sql::query::QueryParameter + '_) { + &#pk_actual_value + } + + fn set_primary_key_actual_value(&mut self, value: Self::PrimaryKeyType) -> Result<(), Box> { + #set_pk_val_method + } + } + } + } + } + + fn get_fields_values_expr_tokens<'a>( + fields: &'a Vec<&Ident>, + ) -> Map, fn(&'a &Ident) -> TokenStream> { + fields.iter().map(|ident| { + quote! { &self.#ident } + }) + } + + fn get_fields_names_expr_tokens(fields: &Vec<&Ident>) -> Vec { + fields + .iter() + .map(|ident| ident.to_string()) + .collect::>() + } + + fn get_pk_ident_as_str(ast: &MacroTokens) -> TokenStream { + match ast.get_primary_key_annotation() { + Some(primary_key) => quote! { Some(#primary_key) }, + None => quote! { None }, + } + } + + fn get_pk_actual_value_expr_tokens(ast: &MacroTokens) -> TokenStream { + match ast.get_primary_key_annotation() { + Some(primary_key) => { + let pk_ident = Ident::new(&primary_key, Span::call_site()); + quote! { self.#pk_ident } + } + None => quote! { -1 }, // TODO: yeah, big todo :) + } + } + + fn generate_pk_associated_type_tokens(pk_ident_ts: &Option<&Type>) -> TokenStream { + if let Some(pk_ty) = pk_ident_ts { + quote! { + #pk_ty + } + } else { + quote! { i64 } // TODO: NoPrimaryKey + } + } + + fn set_pk_val_method(pk_ident_ts: &Option<&Ident>) -> TokenStream { + if let Some(pk_ident) = pk_ident_ts { + quote! { + self.#pk_ident = value.into(); + Ok(()) + } + } else { + quote! { + Err(Box::new(std::io::Error::new( + std::io::ErrorKind::InvalidInput, + "No primary key field defined for this entity" + )) as Box) + } + } + } +} diff --git a/canyon_macros/src/foreignkeyable_macro.rs b/canyon_macros/src/foreignkeyable_macro.rs new file mode 100644 index 00000000..6d7aae7d --- /dev/null +++ b/canyon_macros/src/foreignkeyable_macro.rs @@ -0,0 +1,48 @@ +use crate::utils::helpers::filter_fields; +use proc_macro2::TokenStream; +use quote::quote; +use syn::DeriveInput; + +pub fn foreignkeyable_impl_tokens(ast: DeriveInput) -> TokenStream { + let ty = ast.ident; + + // Recovers the identifiers of the structs members + let fields = filter_fields(match ast.data { + syn::Data::Struct(ref s) => &s.fields, + _ => { + return syn::Error::new(ty.span(), "ForeignKeyable only works with Structs") + .to_compile_error(); + } + }); + + let field_idents = fields.iter().map(|(_vis, ident)| { + let i = ident.to_string(); + quote! { + #i => Some(&self.#ident as &dyn canyon_sql::query::QueryParameter) + } + }); + let field_idents_cloned = field_idents.clone(); + + quote! { + /// Implementation of the trait `ForeignKeyable` for the type + /// calling this derive proc macro + impl canyon_sql::query::bounds::ForeignKeyable for #ty { + fn get_fk_column(&self, column: &str) -> Option<&dyn canyon_sql::query::QueryParameter> { + match column { + #(#field_idents),*, + _ => None + } + } + } + /// Implementation of the trait `ForeignKeyable` for a reference of this type + /// calling this derive proc macro + impl canyon_sql::query::bounds::ForeignKeyable<&Self> for &#ty { + fn get_fk_column<'a>(&self, column: &'a str) -> Option<&dyn canyon_sql::query::QueryParameter> { + match column { + #(#field_idents_cloned),*, + _ => None + } + } + } + } +} diff --git a/canyon_macros/src/lib.rs b/canyon_macros/src/lib.rs index bd9cff0f..d1a98ff4 100755 --- a/canyon_macros/src/lib.rs +++ b/canyon_macros/src/lib.rs @@ -1,39 +1,32 @@ extern crate proc_macro; +extern crate regex; -mod canyon_entity_macro; #[cfg(feature = "migrations")] use canyon_macro::main_with_queries; +mod canyon_entity_macro; mod canyon_macro; +mod canyon_mapper_macro; +mod foreignkeyable_macro; mod query_operations; mod utils; -use canyon_entity_macro::parse_canyon_entity_proc_macro_attr; use proc_macro::TokenStream as CompilerTokenStream; -use proc_macro2::{Ident, TokenStream}; use quote::quote; -use syn::{DeriveInput, Fields, Type, Visibility}; - -use query_operations::{ - delete::{generate_delete_query_tokens, generate_delete_tokens}, - insert::{generate_insert_tokens, generate_multiple_insert_tokens}, - select::{ - generate_count_tokens, generate_find_all_query_tokens, generate_find_all_tokens, - generate_find_all_unchecked_tokens, generate_find_by_foreign_key_tokens, - generate_find_by_pk_tokens, generate_find_by_reverse_foreign_key_tokens, - }, - update::{generate_update_query_tokens, generate_update_tokens}, -}; +use syn::{DeriveInput, Error, parse_macro_input}; +use canyon_core::connection::get_canyon_tokio_runtime; use utils::{function_parser::FunctionParser, helpers, macro_tokens::MacroTokens}; +use crate::canyon_entity_macro::generate_canyon_entity_tokens; +use crate::canyon_mapper_macro::canyon_mapper_impl_tokens; +use crate::foreignkeyable_macro::foreignkeyable_impl_tokens; +use crate::query_operations::impl_crud_operations_trait_for_struct; +use canyon_entities::manager_builder::generated_enum_type_for_struct_data; use canyon_entities::{ entity::CanyonEntity, - manager_builder::{ - generate_enum_with_fields, generate_enum_with_fields_values, generate_user_struct, - }, - register_types::{CanyonRegisterEntity, CanyonRegisterEntityField}, - CANYON_REGISTER_ENTITIES, + manager_builder::{generate_enum_with_fields, generate_enum_with_fields_values}, }; +use canyon_migrations::migrations::handler::Migrations; /// Macro for handling the entry point to the program. /// @@ -45,14 +38,21 @@ use canyon_entities::{ /// the necessary operations for the migrations #[proc_macro_attribute] pub fn main(_meta: CompilerTokenStream, input: CompilerTokenStream) -> CompilerTokenStream { - let func_res = syn::parse::(input); - if func_res.is_err() { - return quote! { fn main() {} }.into(); + let func = parse_macro_input!(input as FunctionParser); + + if func.sig.ident != "main" { + // Ensure the function is literally named "main" + return Error::new( + func.sig.ident.span(), + "The #[canyon::main] macro can only be applied to `fn main()`", + ) + .to_compile_error() + .into(); } - // TODO check if the `canyon` macro it's attached only to main? - let func = func_res.ok().unwrap(); - let sign = func.sig; + let vis = func.sig; + let sign = func.vis; + let attrs = func.attrs; let body = func.block.stmts; #[allow(unused_mut, unused_assignments)] @@ -62,13 +62,14 @@ pub fn main(_meta: CompilerTokenStream, input: CompilerTokenStream) -> CompilerT migrations_tokens = main_with_queries(); } - // The final code wired in main() - quote! { - #sign { - canyon_sql::runtime::CANYON_TOKIO_RUNTIME + quote! { // The final code wired in main() + #(#attrs)* + #vis #sign { + canyon_sql::runtime::get_canyon_tokio_runtime() .handle() .block_on( async { - canyon_sql::runtime::init_connections_cache().await; + canyon_sql::core::Canyon::init().await + .expect("Error initializing the connections POOL"); #migrations_tokens #(#body)* } @@ -78,9 +79,9 @@ pub fn main(_meta: CompilerTokenStream, input: CompilerTokenStream) -> CompilerT .into() } -#[proc_macro_attribute] /// Wraps the [`test`] proc macro in a convenient way to run tests within /// the tokio's current reactor +#[proc_macro_attribute] pub fn canyon_tokio_test( _meta: CompilerTokenStream, input: CompilerTokenStream, @@ -90,6 +91,7 @@ pub fn canyon_tokio_test( quote! { fn non_valid_test_fn() {} }.into() } else { let func = func_res.ok().unwrap(); + let vis = func.vis; let sign = func.sig; let body = func.block.stmts; let attrs = func.attrs; @@ -97,11 +99,12 @@ pub fn canyon_tokio_test( quote! { #[test] #(#attrs)* - #sign { - canyon_sql::runtime::CANYON_TOKIO_RUNTIME + #vis #sign { + canyon_sql::runtime::get_canyon_tokio_runtime() .handle() .block_on( async { - canyon_sql::runtime::init_connections_cache().await; + canyon_sql::core::Canyon::init().await + .expect("Error initializing the connections POOL"); #(#body)* }); } @@ -110,266 +113,38 @@ pub fn canyon_tokio_test( } } -/// Generates the enums that contains the `TypeFields` and `TypeFieldsValues` -/// that the query-builder requires for construct its queries -#[proc_macro_derive(Fields)] -pub fn querybuilder_fields(input: CompilerTokenStream) -> CompilerTokenStream { - let entity_res = syn::parse::(input); - - if entity_res.is_err() { - return entity_res - .expect_err("Unexpected error parsing the struct") - .into_compile_error() - .into(); - } - - // No errors detected on the parsing, so we can safely unwrap the parse result - let entity = entity_res.expect("Unexpected error parsing the struct"); - let _generated_enum_type_for_fields = generate_enum_with_fields(&entity); - let _generated_enum_type_for_fields_values = generate_enum_with_fields_values(&entity); - quote! { - use canyon_sql::crud::bounds::QueryParameter; - #_generated_enum_type_for_fields - #_generated_enum_type_for_fields_values - } - .into() -} - /// Takes data from the struct annotated with the `canyon_entity` macro to fill the Canyon Register /// where lives the data that Canyon needs to work. /// -/// Also, it's the responsible of generate the tokens for all the `Crud` methods available over +/// Also, it's the responsible for generate the tokens for all the `Crud` methods available over /// your type #[proc_macro_attribute] -pub fn canyon_entity( - _meta: CompilerTokenStream, - input: CompilerTokenStream, -) -> CompilerTokenStream { - let attrs = syn::parse_macro_input!(_meta as syn::AttributeArgs); - - let (table_name, schema_name, parsing_attribute_error) = - parse_canyon_entity_proc_macro_attr(attrs); - - let entity_res = syn::parse::(input); - - if entity_res.is_err() { - return entity_res - .expect_err("Unexpected error parsing the struct") - .into_compile_error() - .into(); - } - - // No errors detected on the parsing, so we can safely unwrap the parse result - let entity = entity_res.expect("Unexpected error parsing the struct"); - // Generate the bits of code that we should give back to the compiler - let generated_user_struct = generate_user_struct(&entity); - - // The identifier of the entities - let mut new_entity = CanyonRegisterEntity::default(); - let e = Box::leak(entity.struct_name.to_string().into_boxed_str()); - new_entity.entity_name = e; - new_entity.entity_db_table_name = table_name.unwrap_or(Box::leak( - helpers::default_database_table_name_from_entity_name(e).into_boxed_str(), - )); - new_entity.user_schema_name = schema_name; - - // The entity fields - for field in entity.fields.iter() { - let mut new_entity_field = CanyonRegisterEntityField { - field_name: field.name.to_string(), - field_type: field.get_field_type_as_string().replace(' ', ""), - ..Default::default() - }; - - field - .attributes - .iter() - .for_each(|attr| new_entity_field.annotations.push(attr.get_as_string())); - - new_entity.entity_fields.push(new_entity_field); - } - - // Fill the register with the data of the attached struct - CANYON_REGISTER_ENTITIES - .lock() - .expect("Error acquiring Mutex guard on Canyon Entity macro") - .push(new_entity); - - // Assemble everything - let tokens = quote! { - #generated_user_struct - }; - - // Pass the result back to the compiler - if let Some(macro_error) = parsing_attribute_error { - quote! { - #macro_error - #generated_user_struct - } - .into() - } else { - tokens.into() - } +pub fn canyon_entity(meta: CompilerTokenStream, input: CompilerTokenStream) -> CompilerTokenStream { + let attrs = syn::parse_macro_input!(meta as syn::AttributeArgs); + generate_canyon_entity_tokens(attrs, input).into() } /// Allows the implementors to auto-derive the `CrudOperations` trait, which defines the methods /// that will perform the database communication and the implementation of the queries for every /// type, as defined in the `CrudOperations` + `Transaction` traits. -#[proc_macro_derive(CanyonCrud)] +#[proc_macro_derive(CanyonCrud, attributes(canyon_crud))] pub fn crud_operations(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - // Construct a representation of Rust code as a syntax tree - // that we can manipulate + let ast: DeriveInput = syn::parse(input).expect("Error implementing CanyonCrud AST"); - // Calls the helper struct to build the tokens that generates the final CRUD methods - let ast: DeriveInput = - syn::parse(input).expect("Error parsing `Canyon Entity for generate the CRUD methods"); let macro_data = MacroTokens::new(&ast); - - let table_name_res = helpers::table_schema_parser(¯o_data); - - let table_schema_data = if let Err(err) = table_name_res { - return err.into(); + let macro_data = if let Err(err) = macro_data { + return err.to_compile_error().into(); } else { - table_name_res.ok().unwrap() + macro_data.unwrap() }; - // Build the trait implementation - impl_crud_operations_trait_for_struct(¯o_data, table_schema_data) -} - -fn impl_crud_operations_trait_for_struct( - macro_data: &MacroTokens<'_>, - table_schema_data: String, -) -> proc_macro::TokenStream { - let ty = macro_data.ty; - - // Builds the find_all() query - let _find_all_unchecked_tokens = - generate_find_all_unchecked_tokens(macro_data, &table_schema_data); - // Builds the find_all_result() query - let _find_all_tokens = generate_find_all_tokens(macro_data, &table_schema_data); - // Builds the find_all_query() query as a QueryBuilder - let _find_all_query_tokens = generate_find_all_query_tokens(macro_data, &table_schema_data); - - // Builds a COUNT(*) query over some table - let _count_tokens = generate_count_tokens(macro_data, &table_schema_data); - - // Builds the find_by_pk() query - let _find_by_pk_tokens = generate_find_by_pk_tokens(macro_data, &table_schema_data); - - // Builds the insert() query - let _insert_tokens = generate_insert_tokens(macro_data, &table_schema_data); - // Builds the insert_multi() query - let _insert_multi_tokens = generate_multiple_insert_tokens(macro_data, &table_schema_data); - - // Builds the update() queries - let _update_tokens = generate_update_tokens(macro_data, &table_schema_data); - // Builds the update() query as a QueryBuilder - let _update_query_tokens = generate_update_query_tokens(macro_data, &table_schema_data); - - // Builds the delete() queries - let _delete_tokens = generate_delete_tokens(macro_data, &table_schema_data); - - // Builds the delete() query as a QueryBuilder - let _delete_query_tokens = generate_delete_query_tokens(macro_data, &table_schema_data); - - // Search by foreign (d) key as Vec, cause Canyon supports multiple fields having FK annotation - let _search_by_fk_tokens: Vec<(TokenStream, TokenStream)> = - generate_find_by_foreign_key_tokens(macro_data); - let fk_method_signatures = _search_by_fk_tokens.iter().map(|(sign, _)| sign); - let fk_method_implementations = _search_by_fk_tokens.iter().map(|(_, m_impl)| m_impl); - - // The tokens for generating the methods that enable Canyon to retrieve the child entities that are of T type - // given a parent entity U: ForeignKeyable, as an associated function for the child type (T) - let _search_by_revese_fk_tokens: Vec<(TokenStream, TokenStream)> = - generate_find_by_reverse_foreign_key_tokens(macro_data, &table_schema_data); - let rev_fk_method_signatures = _search_by_revese_fk_tokens.iter().map(|(sign, _)| sign); - let rev_fk_method_implementations = - _search_by_revese_fk_tokens.iter().map(|(_, m_impl)| m_impl); - - // The autogenerated name for the trait that holds the fk and rev fk searches - let fk_trait_ident = Ident::new( - &format!("{}FkOperations", &ty.to_string()), - proc_macro2::Span::call_site(), - ); - - let crud_operations_tokens = quote! { - // The find_all_result impl - #_find_all_tokens - - // The find_all impl - #_find_all_unchecked_tokens - - // The find_all_query impl - #_find_all_query_tokens - - // The COUNT(*) impl - #_count_tokens - - // The find_by_pk impl - #_find_by_pk_tokens - - // The insert impl - #_insert_tokens - - // The insert of multiple entities impl - #_insert_multi_tokens - - // The update impl - #_update_tokens - - // The update as a querybuilder impl - #_update_query_tokens - - // The delete impl - #_delete_tokens - - // The delete as querybuilder impl - #_delete_query_tokens - }; - - let tokens = if !_search_by_fk_tokens.is_empty() { - quote! { - #[canyon_sql::macros::async_trait] - impl canyon_sql::crud::CrudOperations<#ty> for #ty { - #crud_operations_tokens - } - - impl canyon_sql::crud::Transaction<#ty> for #ty {} + let table_name_res = helpers::table_schema_parser(¯o_data); - /// Hidden trait for generate the foreign key operations available - /// in Canyon without have to define them before hand in CrudOperations - /// because it's just impossible with the actual system (where the methods - /// are generated dynamically based on some properties of the `foreign_key` - /// annotation) - #[canyon_sql::macros::async_trait] - pub trait #fk_trait_ident<#ty> { - #(#fk_method_signatures)* - #(#rev_fk_method_signatures)* - } - #[canyon_sql::macros::async_trait] - impl #fk_trait_ident<#ty> for #ty - where #ty: - std::fmt::Debug + - canyon_sql::crud::CrudOperations<#ty> + - canyon_sql::crud::RowMapper<#ty> - { - #(#fk_method_implementations)* - #(#rev_fk_method_implementations)* - } - } + if let Ok(ts_data) = table_name_res { + impl_crud_operations_trait_for_struct(¯o_data, &ts_data) } else { - quote! { - #[canyon_sql::macros::async_trait] - impl canyon_sql::crud::CrudOperations<#ty> for #ty { - #crud_operations_tokens - } - - impl canyon_sql::crud::Transaction<#ty> for #ty {} - } - }; - - tokens.into() + table_name_res.unwrap_err().into() + } } /// proc-macro for annotate struct fields that holds a foreign key relation. @@ -381,252 +156,46 @@ fn impl_crud_operations_trait_for_struct( pub fn implement_foreignkeyable_for_type( input: proc_macro::TokenStream, ) -> proc_macro::TokenStream { - // Gets the data from the AST let ast: DeriveInput = syn::parse(input).unwrap(); - let ty = ast.ident; - - // Recovers the identifiers of the structs members - let fields = filter_fields(match ast.data { - syn::Data::Struct(ref s) => &s.fields, - _ => { - return syn::Error::new(ty.span(), "ForeignKeyable only works with Structs") - .to_compile_error() - .into() - } - }); - - let field_idents = fields.iter().map(|(_vis, ident)| { - let i = ident.to_string(); - quote! { - #i => Some(&self.#ident as &dyn canyon_sql::crud::bounds::QueryParameter<'_>) - } - }); - let field_idents_cloned = field_idents.clone(); - - quote! { - /// Implementation of the trait `ForeignKeyable` for the type - /// calling this derive proc macro - impl canyon_sql::crud::bounds::ForeignKeyable for #ty { - fn get_fk_column(&self, column: &str) -> Option<&dyn canyon_sql::crud::bounds::QueryParameter<'_>> { - match column { - #(#field_idents),*, - _ => None - } - } - } - /// Implementation of the trait `ForeignKeyable` for a reference of this type - /// calling this derive proc macro - impl canyon_sql::crud::bounds::ForeignKeyable<&Self> for &#ty { - fn get_fk_column<'a>(&self, column: &'a str) -> Option<&dyn canyon_sql::crud::bounds::QueryParameter<'_>> { - match column { - #(#field_idents_cloned),*, - _ => None - } - } - } - }.into() + foreignkeyable_impl_tokens(ast).into() } #[proc_macro_derive(CanyonMapper)] pub fn implement_row_mapper_for_type(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - // Gets the data from the AST let ast: DeriveInput = syn::parse(input).unwrap(); - - // Recovers the identifiers of the structs members - let fields = fields_with_types(match ast.data { - syn::Data::Struct(ref s) => &s.fields, - _ => { - return syn::Error::new(ast.ident.span(), "CanyonMapper only works with Structs") - .to_compile_error() - .into() - } - }); - - // TODO: refactor the code below after the current bugfixes, to conditinally generate - // the required methods and populate the CanyonMapper trait dependencing on the cfg flags - // enabled with a more elegant solution (a fn for feature, for ex) - #[cfg(feature = "postgres")] - // Here it's where the incoming values of the DatabaseResult are wired into a new - // instance, mapping the fields of the type against the columns - let init_field_values = fields.iter().map(|(_vis, ident, _ty)| { - let ident_name = ident.to_string(); - quote! { - #ident: row.try_get(#ident_name) - .expect(format!("Failed to retrieve the {} field", #ident_name).as_ref()) - } - }); - - #[cfg(feature = "mssql")] - let init_field_values_sqlserver = fields.iter().map(|(_vis, ident, ty)| { - let ident_name = ident.to_string(); - - if get_field_type_as_string(ty) == "String" { - quote! { - #ident: row.get::<&str, &str>(#ident_name) - .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) - .to_string() - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::<&str, &str>(#ident_name) - .map( |x| x.to_owned() ) - } - } else if get_field_type_as_string(ty) == "NaiveDate" { - quote! { - #ident: row.get::(#ident_name) - .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty) == "NaiveTime" { - quote! { - #ident: row.get::(#ident_name) - .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty) == "NaiveDateTime" { - quote! { - #ident: row.get::(#ident_name) - .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else if get_field_type_as_string(ty) == "DateTime" { - quote! { - #ident: row.get::(#ident_name) - .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) - } - } else if get_field_type_as_string(ty).replace(' ', "") == "Option" { - quote! { - #ident: row.get::(#ident_name) - } - } else { - quote! { - #ident: row.get::<#ty, &str>(#ident_name) - .expect(format!("Failed to retrieve the `{}` field", #ident_name).as_ref()) - } - } - }); - - #[cfg(feature = "mysql")] - let init_field_values_mysql = fields.iter().map(|(_vis, ident, _ty)| { - let ident_name = ident.to_string(); - quote! { - #ident: row.get(#ident_name) - .expect(format!("Failed to retrieve the {} field", #ident_name).as_ref()) - } - }); - - // The type of the Struct - let ty = ast.ident; - - let mut impl_methods = quote! {}; // Collect methods conditionally - - #[cfg(feature = "postgres")] - impl_methods.extend(quote! { - fn deserialize_postgresql(row: &canyon_sql::db_clients::tokio_postgres::Row) -> #ty { - Self { - #(#init_field_values),* - } - } - }); - - #[cfg(feature = "mssql")] - impl_methods.extend(quote! { - fn deserialize_sqlserver(row: &canyon_sql::db_clients::tiberius::Row) -> #ty { - Self { - #(#init_field_values_sqlserver),* - } - } - }); - - #[cfg(feature = "mysql")] - impl_methods.extend(quote! { - fn deserialize_mysql(row: &canyon_sql::db_clients::mysql_async::Row) -> #ty { - Self { - #(#init_field_values_mysql),* - } - } - }); - - // Wrap everything in the shared `impl` block - let tokens = quote! { - impl canyon_sql::crud::RowMapper for #ty { - #impl_methods - } + let macro_data = MacroTokens::new(&ast); + let macro_data = if let Err(err) = macro_data { + return err.to_compile_error().into(); + } else { + macro_data.unwrap() }; - - tokens.into() + canyon_mapper_impl_tokens(macro_data).into() } -/// Helper for generate the fields data for the Custom Derives Macros -fn filter_fields(fields: &Fields) -> Vec<(Visibility, Ident)> { - fields - .iter() - .map(|field| (field.vis.clone(), field.ident.as_ref().unwrap().clone())) - .collect::>() -} +#[proc_macro_derive(Fields)] +pub fn querybuilder_fields(input: CompilerTokenStream) -> CompilerTokenStream { + let entity_res = syn::parse::(input); -fn fields_with_types(fields: &Fields) -> Vec<(Visibility, Ident, Type)> { - fields - .iter() - .map(|field| { - ( - field.vis.clone(), - field.ident.as_ref().unwrap().clone(), - field.ty.clone(), - ) - }) - .collect::>() -} + if entity_res.is_err() { + return entity_res + .expect_err("Unexpected error parsing the struct") + .into_compile_error() + .into(); + } + + // No errors detected on the parsing, so we can safely unwrap the parse result + let entity = entity_res.expect("Unexpected error parsing the struct"); + let generated_enum_type_for_struct_data = generated_enum_type_for_struct_data(&entity); + let generated_enum_type_for_fields = generate_enum_with_fields(&entity); + let generated_enum_type_for_fields_values = generate_enum_with_fields_values(&entity); -#[cfg(feature = "mssql")] -use quote::ToTokens; -#[cfg(feature = "mssql")] -fn get_field_type_as_string(typ: &Type) -> String { - match typ { - Type::Array(type_) => type_.to_token_stream().to_string(), - Type::BareFn(type_) => type_.to_token_stream().to_string(), - Type::Group(type_) => type_.to_token_stream().to_string(), - Type::ImplTrait(type_) => type_.to_token_stream().to_string(), - Type::Infer(type_) => type_.to_token_stream().to_string(), - Type::Macro(type_) => type_.to_token_stream().to_string(), - Type::Never(type_) => type_.to_token_stream().to_string(), - Type::Paren(type_) => type_.to_token_stream().to_string(), - Type::Path(type_) => type_.to_token_stream().to_string(), - Type::Ptr(type_) => type_.to_token_stream().to_string(), - Type::Reference(type_) => type_.to_token_stream().to_string(), - Type::Slice(type_) => type_.to_token_stream().to_string(), - Type::TraitObject(type_) => type_.to_token_stream().to_string(), - Type::Tuple(type_) => type_.to_token_stream().to_string(), - Type::Verbatim(type_) => type_.to_token_stream().to_string(), - _ => "".to_owned(), + quote! { + use canyon_sql::query::bounds::TableMetadata; + use canyon_sql::query::bounds::FieldIdentifier; + + #generated_enum_type_for_struct_data + #generated_enum_type_for_fields + #generated_enum_type_for_fields_values } + .into() } diff --git a/canyon_macros/src/query_operations/consts.rs b/canyon_macros/src/query_operations/consts.rs new file mode 100644 index 00000000..a91170c2 --- /dev/null +++ b/canyon_macros/src/query_operations/consts.rs @@ -0,0 +1,69 @@ +#![allow(dead_code)] + +use std::cell::RefCell; + +use proc_macro2::{Span, TokenStream}; +use quote::quote; +use syn::{Ident, Type}; + +pub const UNAVAILABLE_CRUD_OP_ON_INSTANCE: &str = "Operation is unavailable. T doesn't contain a #[primary_key]\ + annotation. You must construct the query with the QueryBuilder type\ + (_query method for the CrudOperations implementors"; + +pub(crate) fn generate_no_pk_error() -> TokenStream { + let err_msg = UNAVAILABLE_CRUD_OP_ON_INSTANCE; + quote! { + return Err( + std::io::Error::new( + std::io::ErrorKind::Unsupported, + #err_msg + ).into_inner().unwrap() + ); + } +} + +pub(crate) fn generate_default_db_conn_tokens() -> TokenStream { + quote! { + let default_db_conn = canyon_sql::core::Canyon::instance()? + .get_default_connection()?; + default_db_conn + } +} + +thread_local! { + pub static USER_MOCK_TY: RefCell = RefCell::new(Ident::new("User", Span::call_site())); + pub static USER_MOCK_MAPPER_TY: RefCell = RefCell::new(Ident::new("User", Span::call_site())); + pub static VOID_RET_TY: RefCell = RefCell::new({ + let ret_ty: Type = syn::parse_str("()").expect("Failed to parse unit type"); + quote! { #ret_ty } + }); + pub static PK_MOCK_FIELD_VALUE: RefCell = RefCell::new({ + quote! { 1 } + }); +} + +pub const RAW_RET_TY: &str = "Vec < User >"; +pub const RES_RET_TY: &str = + "Result < Vec < User > , Box < (dyn std :: error :: Error + Send + Sync) >>"; +pub const RES_VOID_RET_TY: &str = + "Result < () , Box < (dyn std :: error :: Error + Send + Sync) >>"; +pub const RES_RET_TY_LT: &str = + "Result < Vec < User > , Box < (dyn std :: error :: Error + Send + Sync + 'a) >>"; +pub const RES_VOID_RET_TY_LT: &str = + "Result < () , Box < (dyn std :: error :: Error + Send + Sync + 'a) >>"; +pub const OPT_RET_TY_LT: &str = + "Result < Option < User > , Box < (dyn std :: error :: Error + Send + Sync + 'a) >>"; +pub const I64_RET_TY: &str = "Result < i64 , Box < (dyn std :: error :: Error + Send + Sync) >>"; +pub const I64_RET_TY_LT: &str = + "Result < i64 , Box < (dyn std :: error :: Error + Send + Sync + 'a) >>"; + +pub const MAPS_TO: &str = "into_results :: < User > ()"; +pub const LT_CONSTRAINT: &str = "< 'a "; +pub const INPUT_PARAM: &str = "input : I"; +pub const VALUE_PARAM: &str = "& 'a dyn canyon_sql :: core :: QueryParameter < 'a >"; + +pub const WITH_WHERE_BOUNDS: &str = "where I : canyon_sql :: core :: DbConnection + Send + 'a "; + +pub const FIND_BY_PK_ERR_NO_PK: &str = "You can't use the 'find_by_pk' associated function on a \ + CanyonEntity that does not have a #[primary_key] annotation. \ + If you need to perform an specific search, use the Querybuilder instead."; diff --git a/canyon_macros/src/query_operations/delete.rs b/canyon_macros/src/query_operations/delete.rs index cabfa37f..579cdf54 100644 --- a/canyon_macros/src/query_operations/delete.rs +++ b/canyon_macros/src/query_operations/delete.rs @@ -1,105 +1,139 @@ -use proc_macro2::TokenStream; +use crate::utils::macro_tokens::MacroTokens; +use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; +use canyon_core::query::querybuilder::TableMetadata; -use crate::utils::macro_tokens::MacroTokens; +pub fn generate_delete_tokens( + macro_data: &MacroTokens, + table_schema_data: &TableMetadata, +) -> TokenStream { + let delete_method_ops = generate_delete_method_tokens(macro_data, table_schema_data); + let delete_entity_ops = generate_delete_entity_tokens(table_schema_data); + let delete_querybuilder_tokens = generate_delete_querybuilder_tokens(&table_schema_data.sql()); + + quote! { + #delete_method_ops + #delete_entity_ops + #delete_querybuilder_tokens + } +} /// Generates the TokenStream for the __delete() CRUD operation /// returning a result, indicating a possible failure querying the database -pub fn generate_delete_tokens(macro_data: &MacroTokens, table_schema_data: &String) -> TokenStream { - let ty = macro_data.ty; +pub fn generate_delete_method_tokens( + macro_data: &MacroTokens, + table_schema_data: &TableMetadata, +) -> TokenStream { + let mut delete_ops_tokens = TokenStream::new(); - let fields = macro_data.get_struct_fields(); + let ty = macro_data.ty; + let (_, ty_generics, _) = macro_data.generics.split_for_impl(); let pk = macro_data.get_primary_key_annotation(); - if let Some(primary_key) = pk { - let pk_field = fields - .iter() - .find(|f| *f.to_string() == primary_key) - .expect( - "Something really bad happened finding the Ident for the pk field on the delete", - ); - let pk_field_value = - quote! { &self.#pk_field as &dyn canyon_sql::crud::bounds::QueryParameter<'_> }; + let delete_signature = quote! { + /// Deletes from a database entity the row that matches + /// the current instance of a T type, returning a result + /// indicating a possible failure querying the database. + async fn delete(&self) -> Result<(), Box> + }; + let delete_with_signature = quote! { + /// Deletes from a database entity the row that matches + /// the current instance of a T type, returning a result + /// indicating a possible failure querying the database with the specified datasource. + async fn delete_with<'canyon, 'err, I>(&self, input: I) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'err)>> + where I: canyon_sql::connection::DbConnection + Send + 'canyon + }; - quote! { - /// Deletes from a database entity the row that matches - /// the current instance of a T type, returning a result - /// indicating a possible failure querying the database. - async fn delete(&self) -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>> { - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - format!("DELETE FROM {} WHERE {:?} = $1", #table_schema_data, #primary_key), - &[#pk_field_value], - "" - ).await?; + if let Some(primary_key) = pk { + let pk_field = Ident::new(&primary_key, Span::call_site()); + let pk_field_value = quote! { &self.#pk_field as &dyn canyon_sql::query::QueryParameter }; + let delete_stmt = format!( + "DELETE FROM {} WHERE {:?} = $1", + table_schema_data, primary_key + ); + delete_ops_tokens.extend(quote! { + #delete_signature { + <#ty #ty_generics as canyon_sql::core::Transaction>::execute(#delete_stmt, &[#pk_field_value], "").await?; Ok(()) } - /// Deletes from a database entity the row that matches - /// the current instance of a T type, returning a result - /// indicating a possible failure querying the database with the specified datasource. - async fn delete_datasource<'a>(&self, datasource_name: &'a str) - -> Result<(), Box<(dyn std::error::Error + Send + Sync + 'static)>> - { - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - format!("DELETE FROM {} WHERE {:?} = $1", #table_schema_data, #primary_key), - &[#pk_field_value], - datasource_name - ).await?; - + #delete_with_signature { + input.execute(#delete_stmt, &[#pk_field_value]).await?; Ok(()) } - } + }); } else { // Delete operation over an instance isn't available without declaring a primary key. // The delete querybuilder variant must be used for the case when there's no pk declared - quote! { - async fn delete(&self) - -> Result<(), Box> - { - Err(std::io::Error::new( - std::io::ErrorKind::Unsupported, - "You can't use the 'delete' method on a \ - CanyonEntity that does not have a #[primary_key] annotation. \ - If you need to perform an specific search, use the Querybuilder instead." - ).into_inner().unwrap()) - } + let no_pk_error = quote! { + Err(std::io::Error::new( + std::io::ErrorKind::Unsupported, + "You can't use the 'delete' method on a \ + CanyonEntity that does not have a #[primary_key] annotation. \ + If you need to perform an specific search, use the Querybuilder instead." + ).into_inner().unwrap()) + }; - async fn delete_datasource<'a>(&self, datasource_name: &'a str) - -> Result<(), Box> - { - Err(std::io::Error::new( - std::io::ErrorKind::Unsupported, - "You can't use the 'delete_datasource' method on a \ - CanyonEntity that does not have a #[primary_key] annotation. \ - If you need to perform an specific search, use the Querybuilder instead." - ).into_inner().unwrap()) - } - } + delete_ops_tokens.extend(quote! { + #delete_signature { #no_pk_error } + #delete_with_signature { #no_pk_error } + }); + } + + delete_ops_tokens +} + +pub fn generate_delete_entity_tokens(table_schema_data: &TableMetadata,) -> TokenStream { + let delete_entity_signature = quote! { + async fn delete_entity<'canyon, 'err, Entity>(entity: &'canyon Entity) + -> Result<(), Box> + where Entity: canyon_sql::core::RowMapper + + canyon_sql::query::bounds::Inspectionable<'canyon> + + Sync + + 'canyon + }; + + let delete_entity_with_signature = quote! { + async fn delete_entity_with<'canyon, 'err, Entity, Input>(entity: &'canyon Entity, input: Input) + -> Result<(), Box> + where + Entity: canyon_sql::core::RowMapper + + canyon_sql::query::bounds::Inspectionable<'canyon> + + Sync + + 'canyon, + Input: canyon_sql::connection::DbConnection + Send + 'canyon + }; + + let delete_entity_body = __details::generate_delete_entity_body(&table_schema_data.sql()); + let delete_entity_with_body = __details::generate_delete_entity_with_body(&table_schema_data.sql()); + + quote! { + #delete_entity_signature { #delete_entity_body } + #delete_entity_with_signature { #delete_entity_with_body } } } /// Generates the TokenStream for the __delete() CRUD operation as a /// [`query_elements::query_builder::QueryBuilder<'a, #ty>`] -pub fn generate_delete_query_tokens( - macro_data: &MacroTokens, - table_schema_data: &String, -) -> TokenStream { - let ty = macro_data.ty; - +fn generate_delete_querybuilder_tokens(table_schema_data: &str) -> TokenStream { quote! { - /// Generates a [`canyon_sql::query::DeleteQueryBuilder`] + /// Generates a [`canyon_sql::query::querybuilder::DeleteQueryBuilder`] /// that allows you to customize the query by adding parameters and constrains dynamically. /// /// It performs an `DELETE FROM table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` - fn delete_query<'a>() -> canyon_sql::query::DeleteQueryBuilder<'a, #ty> { - canyon_sql::query::DeleteQueryBuilder::new(#table_schema_data, "") + fn delete_query<'canyon, 'err>() -> Result< + canyon_sql::query::querybuilder::DeleteQueryBuilder<'canyon>, + Box<(dyn std::error::Error + Send + Sync + 'err)> + > where + 'canyon: 'err { + canyon_sql::query::querybuilder::DeleteQueryBuilder::new(#table_schema_data) } - /// Generates a [`canyon_sql::query::DeleteQueryBuilder`] + /// Generates a [`canyon_sql::query::querybuilder::DeleteQueryBuilder`] /// that allows you to customize the query by adding parameters and constrains dynamically. /// /// It performs an `DELETE FROM table_name`, where `table_name` it's the name of your @@ -108,10 +142,206 @@ pub fn generate_delete_query_tokens( /// `canyon_macro(table_name = "table_name", schema = "schema")` /// /// The query it's made against the database with the configured datasource - /// described in the configuration file, and selected with the [`&str`] - /// passed as parameter. - fn delete_query_datasource<'a>(datasource_name: &'a str) -> canyon_sql::query::DeleteQueryBuilder<'a, #ty> { - canyon_sql::query::DeleteQueryBuilder::new(#table_schema_data, datasource_name) + /// described in the configuration file, selected with the input parameter + fn delete_query_with<'canyon, 'err>(database_type: canyon_sql::connection::DatabaseType) + -> Result< + canyon_sql::query::querybuilder::DeleteQueryBuilder<'canyon>, + Box<(dyn std::error::Error + Send + Sync + 'err)> + > where + 'canyon: 'err { + canyon_sql::query::querybuilder::DeleteQueryBuilder::new(#table_schema_data, database_type) + } + } +} + +mod __details { + use super::*; + use crate::query_operations::consts; + + pub(crate) fn generate_delete_entity_body(table_schema_data: &str) -> TokenStream { + let delete_entity_core_logic = generate_delete_entity_pk_body_logic(table_schema_data); + let no_pk_err = consts::generate_no_pk_error(); + + quote! { + if let Some(primary_key) = entity.primary_key() { + #delete_entity_core_logic + let default_db_conn = canyon_sql::core::Canyon::instance()? + .get_default_connection()?; + let _ = default_db_conn.execute(&delete_stmt, &[pk_actual_value]).await?; + Ok(()) + } else { + #no_pk_err + } + } + } + + pub(crate) fn generate_delete_entity_with_body( + table_schema_data: &str + ) -> TokenStream { + let delete_entity_core_logic = generate_delete_entity_pk_body_logic(table_schema_data); + let no_pk_err = consts::generate_no_pk_error(); + + quote! { + if let Some(primary_key) = entity.primary_key() { + #delete_entity_core_logic + let _ = input.execute(&delete_stmt, &[pk_actual_value]).await?; + Ok(()) + } else { + #no_pk_err + } + } + } + + fn generate_delete_entity_pk_body_logic(table_schema_data: &str) -> TokenStream { + quote! { + // let pk_actual_value = &entity.primary_key_actual_value() as &dyn canyon_sql::query::QueryParameter; + let pk_actual_value = entity.primary_key_actual_value(); + let delete_stmt = format!( + "DELETE FROM {} WHERE {:?} = $1", + #table_schema_data, primary_key + ); } } } + +// +// // NOTE: The delete operations shouldn't be using TransactionMethod::QueryRows +// // This should be refactored on the future +// mod __details { +// +// use super::*; +// use crate::query_operations::doc_comments; +// use crate::query_operations::macro_template::{MacroOperationBuilder, TransactionMethod}; +// +// pub fn create_delete_macro( +// ty: &Ident, +// stmt: &str, +// pk_field_value: &TokenStream, +// ret_ty: &TokenStream, +// ) -> TokenStream { +// MacroOperationBuilder::new() +// .fn_name("delete") +// .with_self_as_ref() +// .user_type(ty) +// .return_type_ts(ret_ty) +// .raw_return() +// .add_doc_comment(doc_comments::DELETE) +// .query_string(stmt) +// .forwarded_parameters(quote! {&[#pk_field_value]}) +// .propagate_transaction_result() +// .with_transaction_method(TransactionMethod::Execute) +// .raw_return() +// .with_no_result_value() +// +// } +// +// pub fn create_delete_with_macro( +// ty: &Ident, +// stmt: &str, +// pk_field_value: &TokenStream, +// ret_ty: &TokenStream, +// ) -> MacroOperationBuilder { +// MacroOperationBuilder::new() +// .fn_name("delete_with") +// .with_self_as_ref() +// .with_input_param() +// .user_type(ty) +// .return_type_ts(ret_ty) +// .raw_return() +// .add_doc_comment(doc_comments::DELETE) +// .add_doc_comment(doc_comments::DS_ADVERTISING) +// .query_string(stmt) +// .forwarded_parameters(quote! {&[#pk_field_value]}) +// .propagate_transaction_result() +// .with_transaction_method(TransactionMethod::Execute) +// .raw_return() +// .with_no_result_value() +// } +// +// pub fn create_delete_err_macro(ty: &Ident, ret_ty: &TokenStream) -> MacroOperationBuilder { +// MacroOperationBuilder::new() +// .fn_name("delete") +// .with_self_as_ref() +// .user_type(ty) +// .return_type_ts(ret_ty) +// .raw_return() +// .add_doc_comment(doc_comments::UNAVAILABLE_CRUD_OP_ON_INSTANCE) +// .with_direct_error_return(doc_comments::UNAVAILABLE_CRUD_OP_ON_INSTANCE) +// } +// +// pub fn create_delete_err_with_macro(ty: &Ident, ret_ty: &TokenStream) -> MacroOperationBuilder { +// MacroOperationBuilder::new() +// .fn_name("delete_with") +// .with_self_as_ref() +// .with_input_param() +// .user_type(ty) +// .return_type_ts(ret_ty) +// .raw_return() +// .add_doc_comment(doc_comments::UNAVAILABLE_CRUD_OP_ON_INSTANCE) +// .with_direct_error_return(doc_comments::UNAVAILABLE_CRUD_OP_ON_INSTANCE) +// } +// } +// +// #[cfg(test)] +// mod delete_tests { +// use super::__details::*; +// use crate::query_operations::consts::*; +// +// const DELETE_MOCK_STMT: &str = "DELETE FROM public.user WHERE user.id = 1"; +// +// #[test] +// fn test_macro_builder_delete() { +// let delete_builder = create_delete_macro( +// &USER_MOCK_TY.with(|user_mock_ty| user_mock_ty.borrow().clone()), +// DELETE_MOCK_STMT, +// &PK_MOCK_FIELD_VALUE.with(|pk_field_mock_value| pk_field_mock_value.borrow().clone()), +// &VOID_RET_TY.with(|void_ret_ty| void_ret_ty.borrow().clone()), +// ); +// let delete = delete_builder.generate_tokens().to_string(); +// +// assert!(delete.contains("async fn delete")); +// assert!(delete.contains(RES_VOID_RET_TY)); +// } +// +// #[test] +// fn test_macro_builder_delete_with() { +// let delete_builder = create_delete_with_macro( +// &USER_MOCK_TY.with(|user_mock_ty| user_mock_ty.borrow().clone()), +// DELETE_MOCK_STMT, +// &PK_MOCK_FIELD_VALUE.with(|pk_field_mock_value| pk_field_mock_value.borrow().clone()), +// &VOID_RET_TY.with(|void_ret_ty| void_ret_ty.borrow().clone()), +// ); +// let delete_with = delete_builder.generate_tokens().to_string(); +// +// assert!(delete_with.contains("async fn delete_with")); +// assert!(delete_with.contains(RES_VOID_RET_TY_LT)); +// assert!(delete_with.contains(LT_CONSTRAINT)); +// assert!(delete_with.contains(INPUT_PARAM)); +// } +// +// #[test] +// fn test_macro_builder_delete_err() { +// let delete_err_builder = create_delete_err_macro( +// &USER_MOCK_TY.with(|user_mock_ty| user_mock_ty.borrow().clone()), +// &VOID_RET_TY.with(|void_ret_ty| void_ret_ty.borrow().clone()), +// ); +// let delete_err = delete_err_builder.generate_tokens().to_string(); +// +// assert!(delete_err.contains("async fn delete")); +// assert!(delete_err.contains(RES_VOID_RET_TY)); +// } +// +// #[test] +// fn test_macro_builder_delete_err_with() { +// let delete_err_with_builder = create_delete_err_with_macro( +// &USER_MOCK_TY.with(|user_mock_ty| user_mock_ty.borrow().clone()), +// &VOID_RET_TY.with(|void_ret_ty| void_ret_ty.borrow().clone()), +// ); +// let delete_err_with = delete_err_with_builder.generate_tokens().to_string(); +// +// assert!(delete_err_with.contains("async fn delete_with")); +// assert!(delete_err_with.contains(RES_VOID_RET_TY_LT)); +// assert!(delete_err_with.contains(LT_CONSTRAINT)); +// assert!(delete_err_with.contains(INPUT_PARAM)); +// } +// } diff --git a/canyon_macros/src/query_operations/doc_comments.rs b/canyon_macros/src/query_operations/doc_comments.rs new file mode 100644 index 00000000..401e5b5e --- /dev/null +++ b/canyon_macros/src/query_operations/doc_comments.rs @@ -0,0 +1,36 @@ +#![allow(dead_code)] + +pub const SELECT_ALL_BASE_DOC_COMMENT: &str = "Performs a `SELECT * FROM table_name`, where `table_name` it's \ + the name of your entity but converted to the corresponding \ + database convention. P.ej. PostgreSQL prefers table names declared \ + with snake_case identifiers."; + +pub const SELECT_QUERYBUILDER_DOC_COMMENT: &str = "Generates a [`canyon_sql::query::querybuilder::SelectQueryBuilder`] \ + that allows you to customize the query by adding parameters and constrains dynamically. \ + \ + It performs a `SELECT * FROM table_name`, where `table_name` it's the name of your \ + entity but converted to the corresponding database convention, \ + unless concrete values are set on the available parameters of the \ + `canyon_macro => table_name = \"table_name\", schema = \"schema\")`"; + +pub const FIND_BY_PK: &str = "Finds an element on the queried table that matches the \ + value of the field annotated with the `primary_key` attribute, \ + filtering by the column that it's declared as the primary \ + key on the database. \ + \ + *NOTE:* This operation it's only available if the [`CanyonEntity`] contains \ + some field declared as primary key. \ + \ + *returns:* a [`Result, Error>`], wrapping a possible failure \ + querying the database, or, if no errors happens, a success containing \ + and Option with the data found wrapped in the Some(T) variant, \ + or None if the value isn't found on the table."; + +pub const DS_ADVERTISING: &str = "The query it's made against the database with the configured datasource \ + described in the configuration file, and selected with the [`&str`] \ + passed as parameter."; + +pub const DELETE: &str = "Deletes from a database entity the row that matches + the current instance of a T type based on the actual value of the primary + key field, returning a result + indicating a possible failure querying the database."; diff --git a/canyon_macros/src/query_operations/foreign_key.rs b/canyon_macros/src/query_operations/foreign_key.rs new file mode 100644 index 00000000..56790d5f --- /dev/null +++ b/canyon_macros/src/query_operations/foreign_key.rs @@ -0,0 +1,217 @@ +use crate::utils::helpers::database_table_name_to_struct_ident; +use crate::utils::macro_tokens::MacroTokens; +use canyon_entities::field_annotation::EntityFieldAnnotation; +use proc_macro2::{Ident, TokenStream}; +use quote::quote; +use canyon_core::query::querybuilder::TableMetadata; + +pub fn generate_find_by_fk_ops( + macro_data: &MacroTokens<'_>, + table_schema_data: &TableMetadata +) -> TokenStream { + let ty = ¯o_data.ty; + + // Search by foreign (d) key as Vec, cause Canyon supports multiple fields having FK annotation + let _search_by_fk_tokens: Vec<(TokenStream, TokenStream)> = + generate_find_by_foreign_key_tokens(macro_data); + let fk_method_signatures = _search_by_fk_tokens.iter().map(|(sign, _)| sign); + let fk_method_implementations = _search_by_fk_tokens.iter().map(|(_, m_impl)| m_impl); + + // The tokens for generating the methods that enable Canyon to retrieve the child entities that are of T type + // given a parent entity U: ForeignKeyable, as an associated function for the child type (T) + let search_by_reverse_fk_tokens: Vec<(TokenStream, TokenStream)> = + generate_find_by_reverse_foreign_key_tokens(macro_data, &table_schema_data.sql()); + let rev_fk_method_signatures = search_by_reverse_fk_tokens.iter().map(|(sign, _)| sign); + let rev_fk_method_implementations = + search_by_reverse_fk_tokens.iter().map(|(_, m_impl)| m_impl); + + // The autogenerated name for the trait that holds the fk and rev fk searches + let fk_trait_ident = Ident::new( + &format!("{}FkOperations", &ty.to_string()), + proc_macro2::Span::call_site(), + ); + + if search_by_reverse_fk_tokens.is_empty() { + return quote! {}; // early guard + } + + quote! { + /// Hidden trait for generate the foreign key operations available + /// in Canyon without have to define them beforehand in CrudOperations + /// because it's just impossible with the actual system (where the methods + /// are generated dynamically based on some properties of the `foreign_key` + /// annotation) + pub trait #fk_trait_ident<#ty> { + #(#fk_method_signatures)* + #(#rev_fk_method_signatures)* + } + impl #fk_trait_ident<#ty> for #ty + where #ty: + std::fmt::Debug + + canyon_sql::core::RowMapper + { + #(#fk_method_implementations)* + #(#rev_fk_method_implementations)* + } + } +} + +/// Generates the TokenStream for build the search by foreign key feature, also as a method instance +/// of a T type of as an associated function of same T type, but wrapped as a Result, representing +/// a possible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable +/// derive macro on the parent side of the relation +fn generate_find_by_foreign_key_tokens( + macro_data: &MacroTokens<'_>, +) -> Vec<(TokenStream, TokenStream)> { + let mut fk_quotes: Vec<(TokenStream, TokenStream)> = Vec::new(); + + for (field_ident, fk_annot) in macro_data.get_fk_annotations().iter() { + if let EntityFieldAnnotation::ForeignKey(table, column) = fk_annot { + let method_name = "search_".to_owned() + table; + + // TODO this is not a good implementation. We must try to capture the + // related entity in some way, and compare it with something else + let fk_ty = database_table_name_to_struct_ident(table); + + // Generate and identifier for the method based on the convention of "search_related_types" + // where types is a placeholder for the plural name of the type referenced + let method_name_ident = Ident::new(&method_name, proc_macro2::Span::call_site()); + let method_name_ident_with = Ident::new( + &format!("{}_with", &method_name), + proc_macro2::Span::call_site(), + ); + let quoted_method_signature: TokenStream = quote! { + async fn #method_name_ident<'a>(&self) -> + Result, Box> + }; + let quoted_with_method_signature: TokenStream = quote! { + async fn #method_name_ident_with<'a, I>(&self, input: I) -> + Result, Box> + where I: canyon_sql::connection::DbConnection + Send + 'a + }; + + let stmt = format!( + "SELECT * FROM {} WHERE {} = $1", + table, + format!("\"{column}\"").as_str(), + ); + fk_quotes.push(( + quote! { #quoted_method_signature; }, + quote! { + /// Searches the parent entity (if exists) for this type + #quoted_method_signature { + let default_db_conn = canyon_sql::core::Canyon::instance()? + .get_default_connection()?; + default_db_conn.query_one::<#fk_ty>( + #stmt, + &[&self.#field_ident as &dyn canyon_sql::query::QueryParameter] + ).await + } + }, + )); + + fk_quotes.push(( + quote! { #quoted_with_method_signature; }, + quote! { + /// Searches the parent entity (if exists) for this type with the specified datasource + #quoted_with_method_signature { + input.query_one::<#fk_ty>( + #stmt, + &[&self.#field_ident as &dyn canyon_sql::query::QueryParameter] + ).await + } + }, + )); + } + } + + fk_quotes +} + +/// Generates the TokenStream for build the __search_by_foreign_key() CRUD +/// associated function, but wrapped as a Result, representing +/// a possible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable +/// derive macro on the parent side of the relation +fn generate_find_by_reverse_foreign_key_tokens( + macro_data: &MacroTokens<'_>, + table_schema_data: &str +) -> Vec<(TokenStream, TokenStream)> { + let mut rev_fk_quotes: Vec<(TokenStream, TokenStream)> = Vec::new(); + let ty = macro_data.ty; + let mapper_ty = macro_data + .retrieve_mapping_target_type() + .as_ref() + .unwrap_or(ty); + + for (field_ident, fk_annot) in macro_data.get_fk_annotations().iter() { + if let EntityFieldAnnotation::ForeignKey(table, column) = fk_annot { + let method_name = format!("search_{table}_childrens"); + + // Generate and identifier for the method based on the convention of "search_by__" (note the double underscore) + // plus the 'table_name' property of the ForeignKey annotation + let method_name_ident = Ident::new(&method_name, proc_macro2::Span::call_site()); + let method_name_ident_with = Ident::new( + &format!("{}_with", &method_name), + proc_macro2::Span::call_site(), + ); + let quoted_method_signature: TokenStream = quote! { + async fn #method_name_ident<'a, F>(value: &F) + -> Result, Box> + where + F: canyon_sql::query::bounds::ForeignKeyable + Send + Sync + }; + let quoted_with_method_signature: TokenStream = quote! { + async fn #method_name_ident_with<'a, F, I> (value: &F, input: I) + -> Result, Box> + where + F: canyon_sql::query::bounds::ForeignKeyable + Send + Sync, + I: canyon_sql::connection::DbConnection + Send + 'a + }; + + let f_ident = field_ident.to_string(); + let lookup_value = quote! { + value.get_fk_column(#column) + .ok_or_else(|| format!( + "Column: {:?} not found in type: {:?}", #column, #table + ))?; + }; + + let stmt = quote! {&format!( + "SELECT * FROM {} WHERE {} = $1", + #table_schema_data, + format!("\"{}\"", #f_ident).as_str() + )}; + + rev_fk_quotes.push(( + quote! { #quoted_method_signature; }, + quote! { + /// Given a parent entity T annotated with the derive proc macro `ForeignKeyable`, + /// performs a search to find the children that belong to that concrete parent. + #quoted_method_signature + { + let lookup_value = #lookup_value; + let default_db_conn = canyon_sql::core::Canyon::instance()? + .get_default_connection()?; + default_db_conn.query::<&str, #mapper_ty>(#stmt, &[lookup_value]).await + } + }, + )); + + rev_fk_quotes.push(( + quote! { #quoted_with_method_signature; }, + quote! { + /// Given a parent entity T annotated with the derive proc macro `ForeignKeyable`, + /// performns a search to find the children that belong to that concrete parent + /// with the specified datasource. + #quoted_with_method_signature + { + let lookup_value = #lookup_value; + input.query::<&str, #mapper_ty>(#stmt, &[lookup_value]).await + } + }, + )); + } + } + + rev_fk_quotes +} diff --git a/canyon_macros/src/query_operations/insert.rs b/canyon_macros/src/query_operations/insert.rs index c6e5e205..12e70e55 100644 --- a/canyon_macros/src/query_operations/insert.rs +++ b/canyon_macros/src/query_operations/insert.rs @@ -1,202 +1,243 @@ +use crate::utils::macro_tokens::MacroTokens; use proc_macro2::TokenStream; use quote::quote; +use canyon_core::query::querybuilder::TableMetadata; -use crate::utils::macro_tokens::MacroTokens; +pub fn generate_insert_tokens( + macro_data: &MacroTokens, + table_schema_data: &TableMetadata, +) -> TokenStream { + let insert_method_ops = generate_insert_method_tokens(macro_data, &table_schema_data.sql()); + let insert_entity_ops = generate_insert_entity_function_tokens(&table_schema_data.sql()); + // let multi_insert_tokens = generate_multiple_insert_tokens(macro_data, table_schema_data); -/// Generates the TokenStream for the _insert_result() CRUD operation -pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &String) -> TokenStream { - let ty = macro_data.ty; + quote! { + #insert_method_ops + #insert_entity_ops + // #multi_insert_tokens + } +} - // Retrieves the fields of the Struct as a collection of Strings, already parsed - // the condition of remove the primary key if it's present and it's autoincremental - let insert_columns = macro_data.get_column_names_pk_parsed().join(", "); +// Generates the TokenStream for the _insert operation +pub fn generate_insert_method_tokens( + macro_data: &MacroTokens, + table_schema_data: &str +) -> TokenStream { + let insert_signature = quote! { + async fn insert<'a>(&mut self) + -> Result<(), Box> + }; + let insert_with_signature = quote! { + async fn insert_with<'a, I>(&mut self, input: I) + -> Result<(), Box> + where + I: canyon_sql::connection::DbConnection + Send + 'a + }; - // Returns a String with the generic $x placeholder for the query parameters. - let placeholders = macro_data.placeholders_generator(); + let insert_body; + let insert_with_body; + let insert_values; - // Retrieves the fields of the Struct - let fields = macro_data.get_struct_fields(); + let is_mapper_ty_present = macro_data.retrieve_mapping_target_type().is_some(); + if is_mapper_ty_present { + let raised_err = __details::generate_unsupported_operation_err(); + insert_body = raised_err.clone(); // TODO: Can't we do it better? + insert_with_body = raised_err; + insert_values = quote! {}; + } else { + let stmt = __details::generate_insert_sql_statement(macro_data, table_schema_data); + insert_values = __details::generate_insert_fn_values_slice_expr(macro_data); + insert_body = __details::generate_insert_fn_body_tokens(macro_data, &stmt, false); + insert_with_body = __details::generate_insert_fn_body_tokens(macro_data, &stmt, true); + }; - let insert_values = fields.iter().map(|ident| { - quote! { &self.#ident } - }); - let insert_values_cloned = insert_values.clone(); + quote! { + #insert_signature { + #insert_values + #insert_body + } - let primary_key = macro_data.get_primary_key_annotation(); + #insert_with_signature { + #insert_values + #insert_with_body + } + } +} - let remove_pk_value_from_fn_entry = if let Some(pk_index) = macro_data.get_pk_index() { - quote! { values.remove(#pk_index) } - } else { - quote! {} +pub fn generate_insert_entity_function_tokens(table_schema_data: &str,) -> TokenStream { + let insert_entity_signature = quote! { + async fn insert_entity<'canyon_lt, 'err_lt, Entity>(entity: &'canyon_lt mut Entity) + -> Result<(), Box> + where Entity: canyon_sql::core::RowMapper + + canyon_sql::query::bounds::Inspectionable<'canyon_lt> + + Sync + + 'canyon_lt }; - let pk_ident_type = macro_data - ._fields_with_types() - .into_iter() - .find(|(i, _t)| Some(i.to_string()) == primary_key); - let insert_transaction = if let Some(pk_data) = &pk_ident_type { - let pk_ident = &pk_data.0; - let pk_type = &pk_data.1; + let insert_entity_with_signature = quote! { + async fn insert_entity_with<'canyon_lt, 'err_lt, Entity, Input>(entity: &'canyon_lt mut Entity, input: Input) + -> Result<(), Box> + where + Entity: canyon_sql::core::RowMapper + + canyon_sql::query::bounds::Inspectionable<'canyon_lt> + + Sync + + 'canyon_lt, + Input: canyon_sql::connection::DbConnection + Send + 'canyon_lt + }; - quote! { - #remove_pk_value_from_fn_entry; + let no_fields_to_insert_err = __details::no_fields_to_insert_err(); - let stmt = format!( - "INSERT INTO {} ({}) VALUES ({}) RETURNING {}", - #table_schema_data, - #insert_columns, - #placeholders, - #primary_key - ); + let stmt_ctr = quote! { + let insert_columns = entity.fields_as_comma_sep_string(); - let rows = <#ty as canyon_sql::crud::Transaction<#ty>>::query( - stmt, - values, - datasource_name - ).await?; + if insert_columns.is_empty() { + return #no_fields_to_insert_err; + } + let values = entity.fields_actual_values(); + let placeholders = entity.queries_placeholders(); - match rows { - #[cfg(feature = "postgres")] - canyon_sql::crud::CanyonRows::Postgres(mut v) => { - self.#pk_ident = v - .get(0) - .ok_or("Failed getting the returned IDs for an insert")? - .get::<&str, #pk_type>(#primary_key); - Ok(()) - }, - #[cfg(feature = "mssql")] - canyon_sql::crud::CanyonRows::Tiberius(mut v) => { - self.#pk_ident = v - .get(0) - .ok_or("Failed getting the returned IDs for a multi insert")? - .get::<#pk_type, &str>(#primary_key) - .ok_or("SQL Server primary key type failed to be set as value")?; - Ok(()) - }, - #[cfg(feature = "mysql")] - canyon_sql::crud::CanyonRows::MySQL(mut v) => { - self.#pk_ident = v - .get(0) - .ok_or("Failed getting the returned IDs for a multi insert")? - .get::<#pk_type,usize>(0) - .ok_or("MYSQL primary key type failed to be set as value")?; - Ok(()) - }, - _ => panic!("Reached the panic match arm of insert for the DatabaseConnection type") // TODO remove when the generics will be refactored + let mut stmt = format!( // TODO: use the InsertQueryBuilder when created ;) + "INSERT INTO {} ({}) VALUES ({})", + #table_schema_data, insert_columns, placeholders + ); + }; + let add_returning_clause = quote! { + stmt.push_str(" RETURNING "); + stmt.push_str(pk); + }; + + quote! { + #insert_entity_signature { + let default_db_conn = canyon_sql::core::Canyon::instance()?.get_default_connection()?; + #stmt_ctr; + + if let Some(pk) = entity.primary_key() { + #add_returning_clause + let pk = default_db_conn.query_one_for::<::PrimaryKeyType>(&stmt, &values).await?; + entity.set_primary_key_actual_value(pk)?; + } else { + let _ = default_db_conn.execute(&stmt, &values).await?; } + Ok(()) } - } else { - quote! { - let stmt = format!( - "INSERT INTO {} ({}) VALUES ({})", - #table_schema_data, - #insert_columns, - #placeholders, - #primary_key - ); - - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - stmt, - values, - datasource_name - ).await?; + #insert_entity_with_signature { + #stmt_ctr; + if let Some(pk) = entity.primary_key() { + #add_returning_clause + let pk = input.query_one_for::<::PrimaryKeyType>(&stmt, &values).await?; + entity.set_primary_key_actual_value(pk)?; + } else { + let _ = input.execute(&stmt, &values).await?; + } Ok(()) } - }; + } +} - quote! { - /// Inserts into a database entity the current data in `self`, generating a new - /// entry (row), returning the `PRIMARY KEY` = `self.` with the specified - /// datasource by it's `datasouce name`, defined in the configuration file. - /// - /// This `insert` operation needs a `&mut` reference. That's because typically, - /// an insert operation represents *new* data stored in the database, so, when - /// inserted, the database will generate a unique new value for the - /// `pk` field, having a unique identifier for every record, and it will - /// automatically assign that returned pk to `self.`. So, after the `insert` - /// operation, you instance will have the correct value that is the *PRIMARY KEY* - /// of the database row that represents. - /// - /// This operation returns a result type, indicating a possible failure querying the database. - /// - /// ## *Examples* - ///``` - /// let mut lec: League = League { - /// id: Default::default(), - /// ext_id: 1, - /// slug: "LEC".to_string(), - /// name: "League Europe Champions".to_string(), - /// region: "EU West".to_string(), - /// image_url: "https://lec.eu".to_string(), - /// }; - /// - /// println!("LEC before: {:?}", &lec); - /// - /// let ins_result = lec.insert_result().await; - /// - /// Now, we can handle the result returned, because it can contains a - /// critical error that may leads your program to panic - /// if let Ok(_) = ins_result { - /// println!("LEC after: {:?}", &lec); - /// } else { - /// eprintln!("{:?}", ins_result.err()) - /// } - /// ``` - /// - async fn insert<'a>(&mut self) - -> Result<(), Box> - { - let datasource_name = ""; - let mut values: Vec<&dyn canyon_sql::crud::bounds::QueryParameter<'_>> = vec![#(#insert_values),*]; - #insert_transaction +mod __details { + use super::*; + + pub(crate) fn generate_insert_fn_body_tokens( + macro_data: &MacroTokens, + stmt: &str, + is_with_method: bool, + ) -> TokenStream { + let pk_ident_and_type = macro_data.get_primary_key_ident_and_type(); + + let db_conn = if is_with_method { + quote! { input } + } else { + quote! { default_db_conn } + }; + + let mut insert_body_tokens = TokenStream::new(); + if !is_with_method { + insert_body_tokens.extend(quote! { + let default_db_conn = canyon_sql::core::Canyon::instance()? + .get_default_connection()?; + }); } - /// Inserts into a database entity the current data in `self`, generating a new - /// entry (row), returning the `PRIMARY KEY` = `self.` with the specified - /// datasource by it's `datasouce name`, defined in the configuration file. - /// - /// This `insert` operation needs a `&mut` reference. That's because typically, - /// an insert operation represents *new* data stored in the database, so, when - /// inserted, the database will generate a unique new value for the - /// `pk` field, having a unique identifier for every record, and it will - /// automatically assign that returned pk to `self.`. So, after the `insert` - /// operation, you instance will have the correct value that is the *PRIMARY KEY* - /// of the database row that represents. - /// - /// This operation returns a result type, indicating a possible failure querying the database. - /// - /// ## *Examples* - ///``` - /// let mut lec: League = League { - /// id: Default::default(), - /// ext_id: 1, - /// slug: "LEC".to_string(), - /// name: "League Europe Champions".to_string(), - /// region: "EU West".to_string(), - /// image_url: "https://lec.eu".to_string(), - /// }; - /// - /// println!("LEC before: {:?}", &lec); - /// - /// let ins_result = lec.insert_result().await; - /// - /// Now, we can handle the result returned, because it can contains a - /// critical error that may leads your program to panic - /// if let Ok(_) = ins_result { - /// println!("LEC after: {:?}", &lec); - /// } else { - /// eprintln!("{:?}", ins_result.err()) - /// } - /// ``` - /// - async fn insert_datasource<'a>(&mut self, datasource_name: &'a str) - -> Result<(), Box> - { - let mut values: Vec<&dyn canyon_sql::crud::bounds::QueryParameter<'_>> = vec![#(#insert_values_cloned),*]; - #insert_transaction + if let Some(pk_data) = pk_ident_and_type { + let pk_ident = pk_data.0; + let pk_type = pk_data.1; + + insert_body_tokens.extend(quote! { + self.#pk_ident = #db_conn.query_one_for::<#pk_type>(#stmt, values).await?; + Ok(()) + }); + } else { + insert_body_tokens.extend(quote! { + let _ = #db_conn.execute(#stmt, values).await?; + Ok(()) + }); + } + + insert_body_tokens + } + + pub(crate) fn generate_insert_fn_values_slice_expr(macro_data: &MacroTokens) -> TokenStream { + // Retrieves the fields of the Struct + let fields = macro_data.get_columns_pk_parsed(); + + let insert_values = fields.map(|field| { + let field = field.ident.as_ref().unwrap(); + quote! { &self.#field } + }); + + quote! { + let values: &[&dyn canyon_sql::query::QueryParameter] = &[#(#insert_values),*]; + } + } + + pub(crate) fn generate_insert_sql_statement( + macro_data: &MacroTokens, + table_schema_data: &str + ) -> String { + // Retrieves the fields of the Struct as a collection of Strings, already parsed + // the condition of remove the primary key if it's present, and it's auto incremental + let insert_columns = macro_data.get_struct_fields_as_comma_sep_string(); + + // Returns a String with the generic $x placeholder for the query parameters. + // Already takes in consideration if there's pk annotation + let placeholders = macro_data.placeholders_generator(); + + let mut stmt = format!( + "INSERT INTO {} ({}) VALUES ({})", + table_schema_data, insert_columns, placeholders + ); + + if let Some(primary_key) = macro_data.get_primary_key_annotation() { + stmt.push_str(format!(" RETURNING {}", primary_key).as_str()); + } + + stmt + } + + pub(crate) fn generate_unsupported_operation_err() -> TokenStream { + quote! { + Err( + std::io::Error::new( + std::io::ErrorKind::Unsupported, + "Can't use the 'Insert' family transactions as a method (that receives self as first parameter) \ + if your T type in CrudOperations is NOT the same type that implements RowMapper. \ + Consider to use instead the provided insert_entity or insert_entity_with functions." + ).into_inner().unwrap() + ) } + } + pub(crate) fn no_fields_to_insert_err() -> TokenStream { + quote! { + Err( + std::io::Error::new( + std::io::ErrorKind::Unsupported, + "The type has either zero fields or exactly one that is annotated with #[primary_key].\ + That's makes it ineligibly to be used in the insert_entity family of operations." + ).into_inner().unwrap() + ) + } } } @@ -204,29 +245,30 @@ pub fn generate_insert_tokens(macro_data: &MacroTokens, table_schema_data: &Stri /// as a [`QueryBuilder`] object, and instead of being a method over some [`T`] type, /// as an associated function for [`T`] /// -/// This, also lets the user to have the option to be able to insert multiple +/// This, also lets the user have the option to be able to insert multiple /// [`T`] objects in only one query -pub fn generate_multiple_insert_tokens( +fn _generate_multiple_insert_tokens( macro_data: &MacroTokens, - table_schema_data: &String, + table_schema_data: &str ) -> TokenStream { let ty = macro_data.ty; + let (_, ty_generics, _) = macro_data.generics.split_for_impl(); // Retrieves the fields of the Struct as continuous String - let column_names = macro_data.get_struct_fields_as_strings(); + let column_names = macro_data.get_struct_fields_as_comma_sep_string(); // Retrieves the fields of the Struct let fields = macro_data.get_struct_fields(); - let macro_fields = fields.iter().map(|field| quote! { &instance.#field }); + let macro_fields: Vec = fields.map(|field| quote! { &instance.#field }).collect(); let macro_fields_cloned = macro_fields.clone(); let pk = macro_data.get_primary_key_annotation().unwrap_or_default(); let pk_ident_type = macro_data - ._fields_with_types() + .fields_with_types() .into_iter() - .find(|(i, _t)| *i == pk); + .find(|(i, _t)| *i == &pk); let multi_insert_transaction = if let Some(pk_data) = &pk_ident_type { let pk_ident = &pk_data.0; @@ -242,9 +284,18 @@ pub fn generate_multiple_insert_tokens( let mut split = mapped_fields.split(", ") .collect::>(); + mapped_fields = #column_names + .split(", ") + .map( |column_name| format!("\"{}\"", column_name)) + .collect::>() + .join(", "); + + let mut split = mapped_fields.split(", ") + .collect::>(); + let pk_value_index = split.iter() .position(|pk| *pk == format!("\"{}\"", #pk).as_str()) - .expect("Error. No primary key found when should be there"); + .unwrap(); // ensured that is there split.retain(|pk| *pk != format!("\"{}\"", #pk).as_str()); mapped_fields = split.join(", ").to_string(); @@ -294,15 +345,15 @@ pub fn generate_multiple_insert_tokens( } } - let multi_insert_result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( + let multi_insert_result = <#ty #ty_generics as canyon_sql::core::Transaction>::query_rows( stmt, v_arr, - datasource_name + input ).await?; match multi_insert_result { #[cfg(feature="postgres")] - canyon_sql::crud::CanyonRows::Postgres(mut v) => { + canyon_sql::core::CanyonRows::Postgres(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v .get(idx) @@ -313,7 +364,7 @@ pub fn generate_multiple_insert_tokens( Ok(()) }, #[cfg(feature="mssql")] - canyon_sql::crud::CanyonRows::Tiberius(mut v) => { + canyon_sql::core::CanyonRows::Tiberius(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v .get(idx) @@ -325,7 +376,7 @@ pub fn generate_multiple_insert_tokens( Ok(()) }, #[cfg(feature="mysql")] - canyon_sql::crud::CanyonRows::MySQL(mut v) => { + canyon_sql::core::CanyonRows::MySQL(mut v) => { for (idx, instance) in instances.iter_mut().enumerate() { instance.#pk_ident = v .get(idx) @@ -393,10 +444,10 @@ pub fn generate_multiple_insert_tokens( } } - <#ty as canyon_sql::crud::Transaction<#ty>>::query( + <#ty #ty_generics as canyon_sql::core::Transaction>::query_rows( stmt, v_arr, - datasource_name + input ).await?; Ok(()) @@ -425,43 +476,42 @@ pub fn generate_multiple_insert_tokens( /// }; /// let mut new_league3 = League { /// id: Default::default(), - /// ext_id: 9687392489032, + /// ext_id: 9687392489032, /// slug: "League3".to_owned(), /// name: "3League".to_owned(), - /// region: "EU".to_owned(), + /// region: "EU".to_owned(), /// image_url: "https://www.lag.com".to_owned() - /// }; + ///}; /// /// League::insert_multiple( /// &mut [&mut new_league, &mut new_league2, &mut new_league3] /// ).await - /// .ok(); + ///.ok(); /// ``` - async fn multi_insert<'a>(instances: &'a mut [&'a mut #ty]) -> ( - Result<(), Box> - ) { - use canyon_sql::crud::bounds::QueryParameter; - let datasource_name = ""; + async fn multi_insert<'a, T>(instances: &'a mut [&'a mut T]) -> ( + Result<(), Box> + ) { + let input = ""; - let mut final_values: Vec>> = Vec::new(); - for instance in instances.iter() { - let intermediate: &[&dyn QueryParameter<'_>] = &[#(#macro_fields),*]; + let mut final_values: Vec> = Vec::new(); + for instance in instances.iter() { + let intermediate: &[&dyn canyon_sql::query::QueryParameter] = &[#(#macro_fields),*]; - let mut longer_lived: Vec<&dyn QueryParameter<'_>> = Vec::new(); - for value in intermediate.into_iter() { - longer_lived.push(*value) - } + let mut longer_lived: Vec<&dyn canyon_sql::query::QueryParameter> = Vec::new(); + for value in intermediate.into_iter() { + longer_lived.push(*value) + } - final_values.push(longer_lived) - } + final_values.push(longer_lived) + } - let mut mapped_fields: String = String::new(); + let mut mapped_fields: String = String::new(); - #multi_insert_transaction - } + #multi_insert_transaction + } /// Inserts multiple instances of some type `T` into its related table with the specified - /// datasource by it's `datasouce name`, defined in the configuration file. + /// datasource by its `datasource name`, defined in the configuration file. /// /// ``` /// let mut new_league = League { @@ -494,16 +544,16 @@ pub fn generate_multiple_insert_tokens( /// ).await /// .ok(); /// ``` - async fn multi_insert_datasource<'a>(instances: &'a mut [&'a mut #ty], datasource_name: &'a str) -> ( - Result<(), Box> - ) { - use canyon_sql::crud::bounds::QueryParameter; - - let mut final_values: Vec>> = Vec::new(); + async fn multi_insert_with<'a, T, I>(instances: &'a mut [&'a mut T], input: I) -> + Result<(), Box> + where + I: canyon_sql::connection::DbConnection + Send + 'a + { + let mut final_values: Vec> = Vec::new(); for instance in instances.iter() { - let intermediate: &[&dyn QueryParameter<'_>] = &[#(#macro_fields_cloned),*]; + let intermediate: &[&dyn canyon_sql::query::QueryParameter] = &[#(#macro_fields_cloned),*]; - let mut longer_lived: Vec<&dyn QueryParameter<'_>> = Vec::new(); + let mut longer_lived: Vec<&dyn canyon_sql::query::QueryParameter> = Vec::new(); for value in intermediate.into_iter() { longer_lived.push(*value) } diff --git a/canyon_macros/src/query_operations/mod.rs b/canyon_macros/src/query_operations/mod.rs index dbba723f..412a0696 100644 --- a/canyon_macros/src/query_operations/mod.rs +++ b/canyon_macros/src/query_operations/mod.rs @@ -1,4 +1,64 @@ +use crate::query_operations::delete::generate_delete_tokens; +use crate::query_operations::foreign_key::generate_find_by_fk_ops; +use crate::query_operations::insert::generate_insert_tokens; +use crate::query_operations::read::generate_read_operations_tokens; +use crate::query_operations::update::generate_update_tokens; +use crate::utils::helpers::compute_crud_ops_mapping_target_type_with_generics; +use crate::utils::macro_tokens::MacroTokens; +use proc_macro2::TokenStream; +use quote::quote; +use canyon_core::canyon::Canyon; +use canyon_core::query::querybuilder::TableMetadata; + pub mod delete; +pub mod foreign_key; pub mod insert; -pub mod select; +pub mod read; pub mod update; + +mod consts; +mod doc_comments; + +pub fn impl_crud_operations_trait_for_struct( + macro_data: &MacroTokens<'_>, + table_schema_data: &TableMetadata, +) -> proc_macro::TokenStream { + let mut crud_ops_tokens = TokenStream::new(); + + let ty = macro_data.ty; + let (impl_generics, ty_generics, where_clause) = macro_data.generics.split_for_impl(); + let mapper_ty = compute_crud_ops_mapping_target_type_with_generics( + ty, + &ty_generics, + macro_data.retrieve_mapping_target_type().as_ref(), + ); + + let read_operations_tokens = generate_read_operations_tokens(macro_data, table_schema_data); + let insert_tokens = generate_insert_tokens(macro_data, table_schema_data); + let update_tokens = generate_update_tokens(macro_data, table_schema_data); + let delete_tokens = generate_delete_tokens(macro_data, table_schema_data); + + let crud_operations_tokens = quote! { + #read_operations_tokens + #insert_tokens + #update_tokens + #delete_tokens + }; + + crud_ops_tokens.extend(quote! { + use canyon_sql::connection::DbConnection; + use canyon_sql::core::RowMapper; + + impl #impl_generics canyon_sql::crud::CrudOperations<#mapper_ty> for #ty #ty_generics #where_clause { + #crud_operations_tokens + } + + impl #impl_generics canyon_sql::core::Transaction for #ty #ty_generics #where_clause {} + }); + + // NOTE: this extends should be documented WHY is needed to be after the base impl of CrudOperations + let foreign_key_ops_tokens = generate_find_by_fk_ops(macro_data, table_schema_data); + crud_ops_tokens.extend(quote! { #foreign_key_ops_tokens }); + + crud_ops_tokens.into() +} diff --git a/canyon_macros/src/query_operations/read.rs b/canyon_macros/src/query_operations/read.rs new file mode 100644 index 00000000..40e14677 --- /dev/null +++ b/canyon_macros/src/query_operations/read.rs @@ -0,0 +1,381 @@ +use crate::utils::macro_tokens::MacroTokens; +use canyon_core::query::querybuilder::{SelectQueryBuilder, SelectQueryBuilderOps}; +use canyon_core::query::querybuilder::types::TableMetadata; +use proc_macro2::{Ident, TokenStream}; +use quote::{ToTokens, quote}; + +/// Facade function that acts as the unique API for export to the real macro implementation +/// of all the generated macros for the READ operations +pub fn generate_read_operations_tokens( + macro_data: &MacroTokens<'_>, + table_schema_data: &TableMetadata, +) -> TokenStream { + let ty = macro_data.ty; + let mapper_ty = macro_data + .retrieve_mapping_target_type() + .as_ref() + .unwrap_or(ty); + + let cols = macro_data.get_column_names_pk_parsed().collect::>(); + let find_all_query = SelectQueryBuilder::new(table_schema_data.clone()) + .expect("Unexpected error creating a SelectQueryBuilder for the find_all operations") + .with_columns(&cols); + + match find_all_query.build() { + Ok(query) => { + let sql = query.as_ref(); + + let find_all_tokens = generate_find_all_operations_tokens(mapper_ty, sql); + let count_tokens = generate_count_operations_tokens(sql); + let find_by_pk_tokens = generate_find_by_pk_operations_tokens(macro_data, sql); + let read_querybuilder_ops = generate_select_querybuilder_tokens(sql); + + quote! { + #find_all_tokens + #read_querybuilder_ops + #count_tokens + #find_by_pk_tokens + } + }, + Err(e) => { + syn::Error::new(mapper_ty.span(), format!("Failed to build query: {e}")) + .to_compile_error() + } + } +} + +fn generate_find_all_operations_tokens( + mapper_ty: &Ident, + find_all_query: &str +) -> TokenStream { + let find_all = __details::find_all_generators::create_find_all_macro(mapper_ty, &find_all_query); + let find_all_with = + __details::find_all_generators::create_find_all_with_macro(mapper_ty, &find_all_query); + + quote! { + #find_all + #find_all_with + } +} + +fn generate_select_querybuilder_tokens(table_schema_data: &str) -> TokenStream { + quote! { + /// Generates a [`canyon_sql::query::querybuilder::SelectQueryBuilder`] + /// that allows you to customize the query by adding parameters and constrains dynamically. + /// + /// It generates a Query `SELECT * FROM table_name`, where `table_name` it's the name of your + /// entity but converted to the corresponding database convention, + /// unless concrete values are set on the available parameters of the + /// `canyon_macro(table_name = "table_name", schema = "schema")` + fn select_query<'a>() + -> Result< + canyon_sql::query::querybuilder::SelectQueryBuilder<'a>, + Box + > + { + canyon_sql::query::querybuilder::SelectQueryBuilder::new(#table_schema_data) + } + + /// Generates a [`canyon_sql::query::querybuilder::SelectQueryBuilder`] + /// that allows you to customize the query by adding parameters and constrains dynamically. + /// + /// It generates a Query `SELECT * FROM table_name`, where `table_name` it's the name of your + /// entity but converted to the corresponding database convention, + /// unless concrete values are set on the available parameters of the + /// `canyon_macro(table_name = "table_name", schema = "schema")` + fn select_query_with<'a>(database_type: canyon_sql::connection::DatabaseType) + -> Result< + canyon_sql::query::querybuilder::SelectQueryBuilder<'a>, + Box + > { + canyon_sql::query::querybuilder::SelectQueryBuilder::new_for(#table_schema_data, database_type) + } + } +} + +fn generate_count_operations_tokens(table_schema_data: &str) -> TokenStream { + let count_stmt = format!("SELECT COUNT(*) FROM {table_schema_data}"); + let count = __details::count_generators::create_count_macro(&count_stmt); + let count_with = __details::count_generators::create_count_with_macro(&count_stmt); + + quote! { + #count + #count_with + } +} + +fn generate_find_by_pk_operations_tokens( + macro_data: &MacroTokens<'_>, + table_schema_data: &str +) -> TokenStream { + let ty = macro_data.ty; + let mapper_ty = macro_data.retrieve_mapping_target_type().as_ref(); + let pk = macro_data.get_primary_key_annotation(); + + let base_body = if let Some(compile_time_known_pk) = pk { + Some(quote! { + let stmt = format!( + "SELECT * FROM {} WHERE {} = $1", + #table_schema_data, #compile_time_known_pk + ); + }) + } else { + let tt = mapper_ty.unwrap_or(ty).to_token_stream(); + Some(quote! { + use canyon_sql::query::bounds::Inspectionable; + let pk = <#tt as Inspectionable>::primary_key_st() + .ok_or_else(|| "No primary key found for this instance")?; + let stmt = format!( + "SELECT * FROM {} WHERE {} = $1", + #table_schema_data, + pk + ); + }) + }; + + let mapper_ty = mapper_ty.unwrap_or(ty); + let find_by_pk = + __details::find_by_pk_generators::create_find_by_pk_macro(mapper_ty, &base_body); + let find_by_pk_with = + __details::find_by_pk_generators::create_find_by_pk_with(mapper_ty, &base_body); + + quote! { + #find_by_pk + #find_by_pk_with + } +} + +mod __details { + use quote::quote; + use syn::Ident; + + pub mod find_all_generators { + use super::*; + use proc_macro2::TokenStream; + + pub fn create_find_all_macro(mapper_ty: &Ident, stmt: &str) -> TokenStream { + quote! { + async fn find_all() + -> Result, Box<(dyn std::error::Error + Send + Sync)>> + { + let default_db_conn = canyon_sql::core::Canyon::instance()?.get_default_connection()?; + default_db_conn.query(#stmt, &[]).await + } + } + } + + pub fn create_find_all_with_macro(mapper_ty: &Ident, stmt: &str) -> TokenStream { + quote! { + async fn find_all_with<'a, I>(input: I) + -> Result, Box<(dyn std::error::Error + Send + Sync)>> + where + I: canyon_sql::connection::DbConnection + Send + 'a + { + input.query::<&str, #mapper_ty>(#stmt, &[]).await + } + } + } + } + + pub mod count_generators { + use super::*; + use proc_macro2::TokenStream; + + pub fn create_count_macro(stmt: &str) -> TokenStream { + let mssql_arm = get_mssql_arm_tokens_if_enabled(stmt, false); + + quote! { + async fn count() -> Result> { + let default_db_conn = canyon_sql::core::Canyon::instance()?.get_default_connection()?; + let db_type = default_db_conn.get_database_type()?; + match db_type { + #mssql_arm + _ => { + default_db_conn.query_one_for::(#stmt, &[]).await + } + } + } + } + } + + pub fn create_count_with_macro(stmt: &str) -> TokenStream { + let mssql_arm = get_mssql_arm_tokens_if_enabled(stmt, true); + + quote! { + async fn count_with<'a, I>(input: I) + -> Result> + where + I: canyon_sql::connection::DbConnection + Send + 'a + { + let db_type = input.get_database_type()?; + match db_type { + #mssql_arm + _ => { + // PostgreSQL and MySQL COUNT(*) return i64 + input.query_one_for::(#stmt, &[]).await + } + } + } + } + } + + fn get_mssql_arm_tokens_if_enabled(stmt: &str, is_with_input: bool) -> TokenStream { + let db_conn = if is_with_input { + quote! {input} + } else { + quote! {default_db_conn} + }; + if cfg!(feature = "mssql") { + quote! { + canyon_sql::connection::DatabaseType::SqlServer => { + let count_i32: i32 = #db_conn.query_one_for::(#stmt, &[]).await?; + Ok(count_i32 as i64) + } + } + } else { + quote! {} // nothing emitted + } + } + } + + pub mod find_by_pk_generators { + use super::*; + use crate::query_operations::consts; + use proc_macro2::TokenStream; + + pub fn create_find_by_pk_macro( + mapper_ty: &Ident, + base_body: &Option, + ) -> TokenStream { + let body = if let Some(body) = base_body { + let default_db_conn_call = consts::generate_default_db_conn_tokens(); + quote! { + #body; + #default_db_conn_call + .query_one::<#mapper_ty>(&stmt, &[value]) + .await + } + } else { + let unsupported_op_err = consts::generate_no_pk_error(); + quote! { #unsupported_op_err } + }; + + quote! { + async fn find_by_pk<'canyon_lt, 'err_lt>(value: &'canyon_lt dyn canyon_sql::query::QueryParameter) + -> Result, Box<(dyn std::error::Error + Send + Sync + 'err_lt)>> + { + #body + } + } + } + + pub fn create_find_by_pk_with( + mapper_ty: &Ident, + base_body: &Option, + ) -> TokenStream { + let body = if let Some(body) = base_body { + quote! { + #body; + input.query_one::<#mapper_ty>(&stmt, &[value]).await + } + } else { + let unsupported_op_err = consts::generate_no_pk_error(); + quote! { #unsupported_op_err } + }; + + quote! { + async fn find_by_pk_with<'canyon_lt, 'err_lt, I>(value: &'canyon_lt dyn canyon_sql::query::QueryParameter, input: I) + -> Result, Box<(dyn std::error::Error + Send + Sync + 'err_lt)>> + where + I: canyon_sql::connection::DbConnection + Send + 'canyon_lt + { + #body + } + } + } + } +} + +#[cfg(test)] +mod macro_builder_read_ops_tests { + use super::__details::{count_generators::*, find_all_generators::*}; + + use crate::query_operations::consts; + use crate::query_operations::read::__details::find_by_pk_generators::{ + create_find_by_pk_macro, create_find_by_pk_with, + }; + use proc_macro2::Ident; + + const SELECT_ALL_STMT: &str = "SELECT * FROM public.user"; // TODO: introduce the const_format crate + const COUNT_STMT: &str = "SELECT COUNT(*) FROM public.user"; + const FIND_BY_PK_STMT: &str = "SELECT * FROM public.user WHERE id = $1"; + + #[test] + fn test_create_find_all_macro() { + let mapper_ty = syn::parse_str::("User").unwrap(); + let tokens = create_find_all_macro(&mapper_ty, SELECT_ALL_STMT); + let generated = tokens.to_string(); + + assert!(generated.contains("async fn find_all")); + assert!(generated.contains(consts::RES_RET_TY)); + assert!(generated.contains(SELECT_ALL_STMT)); + } + + #[test] + fn test_create_find_all_with_macro() { + let mapper_ty = syn::parse_str::("User").unwrap(); + let tokens = create_find_all_with_macro(&mapper_ty, SELECT_ALL_STMT); + let generated = tokens.to_string(); + + assert!(generated.contains("async fn find_all_with")); + assert!(generated.contains(consts::RES_RET_TY)); + assert!(generated.contains(consts::LT_CONSTRAINT)); + assert!(generated.contains(consts::INPUT_PARAM)); + assert!(generated.contains(SELECT_ALL_STMT)); + } + + #[test] + fn test_create_count_macro() { + let tokens = create_count_macro(COUNT_STMT); + let generated = tokens.to_string(); + + assert!(generated.contains("async fn count")); + assert!(generated.contains(consts::I64_RET_TY)); + assert!(generated.contains(COUNT_STMT)); + } + + #[test] + fn test_create_count_with_macro() { + let tokens = create_count_with_macro(COUNT_STMT); + let generated = tokens.to_string(); + + assert!(generated.contains("async fn count_with")); + assert!(generated.contains(consts::I64_RET_TY_LT)); + assert!(generated.contains(COUNT_STMT)); + assert!(generated.contains(consts::LT_CONSTRAINT)); + assert!(generated.contains(consts::INPUT_PARAM)); + } + + #[test] + fn test_create_find_by_pk_macro() { + let mapper_ty = syn::parse_str::("User").unwrap(); + let tokens = create_find_by_pk_macro(&mapper_ty, &None); + let generated = tokens.to_string(); + + assert!(generated.contains("async fn find_by_pk")); + assert!(generated.contains(consts::OPT_RET_TY_LT)); + assert!(generated.contains(FIND_BY_PK_STMT)); + } + + #[test] + fn test_create_find_by_pk_with_macro() { + let mapper_ty = syn::parse_str::("User").unwrap(); + let tokens = create_find_by_pk_with(&mapper_ty, &None); + let generated = tokens.to_string(); + + assert!(generated.contains("async fn find_by_pk_with")); + assert!(generated.contains(consts::OPT_RET_TY_LT)); + assert!(generated.contains(consts::LT_CONSTRAINT)); + assert!(generated.contains(FIND_BY_PK_STMT)); + } +} diff --git a/canyon_macros/src/query_operations/select.rs b/canyon_macros/src/query_operations/select.rs deleted file mode 100644 index 82a1a5b5..00000000 --- a/canyon_macros/src/query_operations/select.rs +++ /dev/null @@ -1,488 +0,0 @@ -use canyon_entities::field_annotation::EntityFieldAnnotation; - -use proc_macro2::TokenStream; -use quote::quote; - -use crate::utils::helpers::*; -use crate::utils::macro_tokens::MacroTokens; - -/// Generates the TokenStream for build the __find_all() CRUD -/// associated function -pub fn generate_find_all_unchecked_tokens( - macro_data: &MacroTokens<'_>, - table_schema_data: &String, -) -> TokenStream { - let ty = macro_data.ty; - let stmt = format!("SELECT * FROM {table_schema_data}"); - - quote! { - /// Performs a `SELECT * FROM table_name`, where `table_name` it's - /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL prefers table names declared - /// with snake_case identifiers. - async fn find_all_unchecked<'a>() -> Vec<#ty> { - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - &[], - "" - ).await - .unwrap() - .into_results::<#ty>() - } - - /// Performs a `SELECT * FROM table_name`, where `table_name` it's - /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL prefers table names declared - /// with snake_case identifiers. - /// - /// The query it's made against the database with the configured datasource - /// described in the configuration file, and selected with the [`&str`] - /// passed as parameter. - async fn find_all_unchecked_datasource<'a>(datasource_name: &'a str) -> Vec<#ty> { - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - &[], - datasource_name - ).await - .unwrap() - .into_results::<#ty>() - } - } -} - -/// Generates the TokenStream for build the __find_all_result() CRUD -/// associated function -pub fn generate_find_all_tokens( - macro_data: &MacroTokens<'_>, - table_schema_data: &String, -) -> TokenStream { - let ty = macro_data.ty; - let stmt = format!("SELECT * FROM {table_schema_data}"); - - quote! { - /// Performs a `SELECT * FROM table_name`, where `table_name` it's - /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL prefers table names declared - /// with snake_case identifiers. - async fn find_all<'a>() -> - Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - { - Ok( - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - &[], - "" - ).await? - .into_results::<#ty>() - ) - } - - /// Performs a `SELECT * FROM table_name`, where `table_name` it's - /// the name of your entity but converted to the corresponding - /// database convention. P.ej. PostgreSQL prefers table names declared - /// with snake_case identifiers. - /// - /// The query it's made against the database with the configured datasource - /// described in the configuration file, and selected with the [`&str`] - /// passed as parameter. - /// - /// Also, returns a [`Vec, Error>`], wrapping a possible failure - /// querying the database, or, if no errors happens, a Vec containing - /// the data found. - async fn find_all_datasource<'a>(datasource_name: &'a str) -> - Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - { - Ok( - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - &[], - datasource_name - ).await? - .into_results::<#ty>() - ) - } - } -} - -/// Same as above, but with a [`canyon_sql::query::QueryBuilder`] -pub fn generate_find_all_query_tokens( - macro_data: &MacroTokens<'_>, - table_schema_data: &String, -) -> TokenStream { - let ty = macro_data.ty; - - quote! { - /// Generates a [`canyon_sql::query::SelectQueryBuilder`] - /// that allows you to customize the query by adding parameters and constrains dynamically. - /// - /// It performs a `SELECT * FROM table_name`, where `table_name` it's the name of your - /// entity but converted to the corresponding database convention, - /// unless concrete values are set on the available parameters of the - /// `canyon_macro(table_name = "table_name", schema = "schema")` - fn select_query<'a>() -> canyon_sql::query::SelectQueryBuilder<'a, #ty> { - canyon_sql::query::SelectQueryBuilder::new(#table_schema_data, "") - } - - /// Generates a [`canyon_sql::query::SelectQueryBuilder`] - /// that allows you to customize the query by adding parameters and constrains dynamically. - /// - /// It performs a `SELECT * FROM table_name`, where `table_name` it's the name of your - /// entity but converted to the corresponding database convention, - /// unless concrete values are set on the available parameters of the - /// `canyon_macro(table_name = "table_name", schema = "schema")` - /// - /// The query it's made against the database with the configured datasource - /// described in the configuration file, and selected with the [`&str`] - /// passed as parameter. - fn select_query_datasource<'a>(datasource_name: &'a str) -> canyon_sql::query::SelectQueryBuilder<'a, #ty> { - canyon_sql::query::SelectQueryBuilder::new(#table_schema_data, datasource_name) - } - } -} - -/// Performs a COUNT(*) query over some table, returning a [`Result`] wrapping -/// a possible success or error coming from the database -pub fn generate_count_tokens( - macro_data: &MacroTokens<'_>, - table_schema_data: &String, -) -> TokenStream { - let ty = macro_data.ty; - let ty_str = &ty.to_string(); - let stmt = format!("SELECT COUNT(*) FROM {table_schema_data}"); - - let result_handling = quote! { - #[cfg(feature="postgres")] - canyon_sql::crud::CanyonRows::Postgres(mut v) => Ok( - v.remove(0).get::<&str, i64>("count") - ), - #[cfg(feature="mssql")] - canyon_sql::crud::CanyonRows::Tiberius(mut v) => - v.remove(0) - .get::(0) - .map(|c| c as i64) - .ok_or(format!("Failure in the COUNT query for MSSQL for: {}", #ty_str).into()) - .into(), - #[cfg(feature="mysql")] - canyon_sql::crud::CanyonRows::MySQL(mut v) => v.remove(0) - .get::(0) - .ok_or(format!("Failure in the COUNT query for MYSQL for: {}", #ty_str).into()), - _ => panic!() // TODO remove when the generics will be refactored - }; - - quote! { - /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, - /// wrapping a possible success or error coming from the database - async fn count() -> Result> { - let count = <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - &[], - "" - ).await?; - - match count { - #result_handling - } - } - - /// Performs a COUNT(*) query over some table, returning a [`Result`] rather than panicking, - /// wrapping a possible success or error coming from the database with the specified datasource - async fn count_datasource<'a>(datasource_name: &'a str) -> Result> { - let count = <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - &[], - datasource_name - ).await?; - - match count { - #result_handling - } - } - } -} - -/// Generates the TokenStream for build the __find_by_pk() CRUD operation -pub fn generate_find_by_pk_tokens( - macro_data: &MacroTokens<'_>, - table_schema_data: &String, -) -> TokenStream { - let ty = macro_data.ty; - let pk = macro_data.get_primary_key_annotation().unwrap_or_default(); - let stmt = format!("SELECT * FROM {table_schema_data} WHERE {pk} = $1"); - - // Disabled if there's no `primary_key` annotation - if pk.is_empty() { - return quote! { - async fn find_by_pk<'a>(value: &'a dyn canyon_sql::crud::bounds::QueryParameter<'a>) - -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - { - Err( - std::io::Error::new( - std::io::ErrorKind::Unsupported, - "You can't use the 'find_by_pk' associated function on a \ - CanyonEntity that does not have a #[primary_key] annotation. \ - If you need to perform an specific search, use the Querybuilder instead." - ).into_inner().unwrap() - ) - } - - async fn find_by_pk_datasource<'a>( - value: &'a dyn canyon_sql::crud::bounds::QueryParameter<'a>, - datasource_name: &'a str - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { - Err( - std::io::Error::new( - std::io::ErrorKind::Unsupported, - "You can't use the 'find_by_pk_datasource' associated function on a \ - CanyonEntity that does not have a #[primary_key] annotation. \ - If you need to perform an specific search, use the Querybuilder instead." - ).into_inner().unwrap() - ) - } - }; - } - - let result_handling = quote! { - match result { - n if n.len() == 0 => Ok(None), - _ => Ok( - Some(result.into_results::<#ty>().remove(0)) - ) - } - }; - - quote! { - /// Finds an element on the queried table that matches the - /// value of the field annotated with the `primary_key` attribute, - /// filtering by the column that it's declared as the primary - /// key on the database. - /// - /// This operation it's only available if the [`CanyonEntity`] contains - /// some field declared as primary key. - /// - /// Also, returns a [`Result, Error>`], wrapping a possible failure - /// querying the database, or, if no errors happens, a success containing - /// and Option with the data found wrapped in the Some(T) variant, - /// or None if the value isn't found on the table. - async fn find_by_pk<'a>(value: &'a dyn canyon_sql::crud::bounds::QueryParameter<'a>) -> - Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - { - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - vec![value], - "" - ).await?; - - #result_handling - } - - /// Finds an element on the queried table that matches the - /// value of the field annotated with the `primary_key` attribute, - /// filtering by the column that it's declared as the primary - /// key on the database. - /// - /// The query it's made against the database with the configured datasource - /// described in the configuration file, and selected with the [`&str`] - /// passed as parameter. - /// - /// This operation it's only available if the [`CanyonEntity`] contains - /// some field declared as primary key. - /// - /// Also, returns a [`Result, Error>`], wrapping a possible failure - /// querying the database, or, if no errors happens, a success containing - /// and Option with the data found wrapped in the Some(T) variant, - /// or None if the value isn't found on the table. - async fn find_by_pk_datasource<'a>( - value: &'a dyn canyon_sql::crud::bounds::QueryParameter<'a>, - datasource_name: &'a str - ) -> Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> { - - let result = <#ty as canyon_sql::crud::Transaction<#ty>>::query( - #stmt, - vec![value], - datasource_name - ).await?; - - #result_handling - } - } -} - -/// Generates the TokenStream for build the search by foreign key feature, also as a method instance -/// of a T type of as an associated function of same T type, but wrapped as a Result, representing -/// a possible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable -/// derive macro on the parent side of the relation -pub fn generate_find_by_foreign_key_tokens( - macro_data: &MacroTokens<'_>, -) -> Vec<(TokenStream, TokenStream)> { - let mut fk_quotes: Vec<(TokenStream, TokenStream)> = Vec::new(); - - for (field_ident, fk_annot) in macro_data.get_fk_annotations().iter() { - if let EntityFieldAnnotation::ForeignKey(table, column) = fk_annot { - let method_name = "search_".to_owned() + table; - - // TODO this is not a good implementation. We must try to capture the - // related entity in some way, and compare it with something else - let fk_ty = database_table_name_to_struct_ident(table); - - // Generate and identifier for the method based on the convention of "search_related_types" - // where types is a placeholder for the plural name of the type referenced - let method_name_ident = - proc_macro2::Ident::new(&method_name, proc_macro2::Span::call_site()); - let method_name_ident_ds = proc_macro2::Ident::new( - &format!("{}_datasource", &method_name), - proc_macro2::Span::call_site(), - ); - let quoted_method_signature: TokenStream = quote! { - async fn #method_name_ident(&self) -> - Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - }; - let quoted_datasource_method_signature: TokenStream = quote! { - async fn #method_name_ident_ds<'a>(&self, datasource_name: &'a str) -> - Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - }; - - let stmt = format!( - "SELECT * FROM {} WHERE {} = $1", - table, - format!("\"{column}\"").as_str(), - ); - let result_handler = quote! { - match result { - n if n.len() == 0 => Ok(None), - _ => Ok(Some( - result.into_results::<#fk_ty>().remove(0) - )) - } - }; - - fk_quotes.push(( - quote! { #quoted_method_signature; }, - quote! { - /// Searches the parent entity (if exists) for this type - #quoted_method_signature { - let result = <#fk_ty as canyon_sql::crud::Transaction<#fk_ty>>::query( - #stmt, - &[&self.#field_ident as &dyn canyon_sql::crud::bounds::QueryParameter<'_>], - "" - ).await?; - - #result_handler - } - } - )); - - fk_quotes.push(( - quote! { #quoted_datasource_method_signature; }, - quote! { - /// Searches the parent entity (if exists) for this type with the specified datasource - #quoted_datasource_method_signature { - let result = <#fk_ty as canyon_sql::crud::Transaction<#fk_ty>>::query( - #stmt, - &[&self.#field_ident as &dyn canyon_sql::crud::bounds::QueryParameter<'_>], - datasource_name - ).await?; - - #result_handler - } - } - )); - } - } - - fk_quotes -} - -/// Generates the TokenStream for build the __search_by_foreign_key() CRUD -/// associated function, but wrapped as a Result, representing -/// a possible failure querying the database, a bad or missing FK annotation or a missed ForeignKeyable -/// derive macro on the parent side of the relation -pub fn generate_find_by_reverse_foreign_key_tokens( - macro_data: &MacroTokens<'_>, - table_schema_data: &String, -) -> Vec<(TokenStream, TokenStream)> { - let mut rev_fk_quotes: Vec<(TokenStream, TokenStream)> = Vec::new(); - let ty = macro_data.ty; - - for (field_ident, fk_annot) in macro_data.get_fk_annotations().iter() { - if let EntityFieldAnnotation::ForeignKey(table, column) = fk_annot { - let method_name = format!("search_{table}_childrens"); - - // Generate and identifier for the method based on the convention of "search_by__" (note the double underscore) - // plus the 'table_name' property of the ForeignKey annotation - let method_name_ident = - proc_macro2::Ident::new(&method_name, proc_macro2::Span::call_site()); - let method_name_ident_ds = proc_macro2::Ident::new( - &format!("{}_datasource", &method_name), - proc_macro2::Span::call_site(), - ); - let quoted_method_signature: TokenStream = quote! { - async fn #method_name_ident<'a, F: canyon_sql::crud::bounds::ForeignKeyable + Sync + Send>(value: &F) -> - Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - }; - let quoted_datasource_method_signature: TokenStream = quote! { - async fn #method_name_ident_ds<'a, F: canyon_sql::crud::bounds::ForeignKeyable + Sync + Send> - (value: &F, datasource_name: &'a str) -> - Result, Box<(dyn std::error::Error + Send + Sync + 'static)>> - }; - - let f_ident = field_ident.to_string(); - - rev_fk_quotes.push(( - quote! { #quoted_method_signature; }, - quote! { - /// Given a parent entity T annotated with the derive proc macro `ForeignKeyable`, - /// performns a search to find the children that belong to that concrete parent. - #quoted_method_signature - { - let lookage_value = value.get_fk_column(#column) - .expect(format!( - "Column: {:?} not found in type: {:?}", #column, #table - ).as_str()); - - let stmt = format!( - "SELECT * FROM {} WHERE {} = $1", - #table_schema_data, - format!("\"{}\"", #f_ident).as_str() - ); - - Ok(<#ty as canyon_sql::crud::Transaction<#ty>>::query( - stmt, - &[lookage_value], - "" - ).await?.into_results::<#ty>()) - } - }, - )); - - rev_fk_quotes.push(( - quote! { #quoted_datasource_method_signature; }, - quote! { - /// Given a parent entity T annotated with the derive proc macro `ForeignKeyable`, - /// performns a search to find the children that belong to that concrete parent - /// with the specified datasource. - #quoted_datasource_method_signature - { - let lookage_value = value.get_fk_column(#column) - .expect(format!( - "Column: {:?} not found in type: {:?}", #column, #table - ).as_str()); - - let stmt = format!( - "SELECT * FROM {} WHERE {} = $1", - #table_schema_data, - format!("\"{}\"", #f_ident).as_str() - ); - - Ok(<#ty as canyon_sql::crud::Transaction<#ty>>::query( - stmt, - &[lookage_value], - datasource_name - ).await?.into_results::<#ty>()) - } - }, - )); - } - } - - rev_fk_quotes -} diff --git a/canyon_macros/src/query_operations/update.rs b/canyon_macros/src/query_operations/update.rs index 5837325a..8c0459e4 100644 --- a/canyon_macros/src/query_operations/update.rs +++ b/canyon_macros/src/query_operations/update.rs @@ -1,130 +1,147 @@ +use crate::query_operations::consts; +use crate::utils::macro_tokens::MacroTokens; +use crate::utils::primary_key_attribute::PrimaryKeyIndex; use proc_macro2::TokenStream; use quote::quote; +use canyon_core::query::operators::Comp; +use canyon_core::query::querybuilder::{QueryBuilderOps, TableMetadata, UpdateQueryBuilder, UpdateQueryBuilderOps}; -use crate::utils::macro_tokens::MacroTokens; - -/// Generates the TokenStream for the __update() CRUD operation -pub fn generate_update_tokens(macro_data: &MacroTokens, table_schema_data: &String) -> TokenStream { - let ty = macro_data.ty; - - let update_columns = macro_data.get_column_names_pk_parsed(); - - // Retrieves the fields of the Struct - let fields = macro_data.get_struct_fields(); +pub fn generate_update_tokens( + macro_data: &MacroTokens, + table_schema_data: &TableMetadata, +) -> TokenStream { + let update_method_ops = generate_update_method_tokens(macro_data, table_schema_data); + let update_entity_ops = generate_update_entity_tokens(&table_schema_data.sql()); + let update_querybuilder_tokens = generate_update_querybuilder_tokens(&table_schema_data.sql()); - let mut vec_columns_values: Vec = Vec::new(); - for (i, column_name) in update_columns.iter().enumerate() { - let column_equal_value = format!("{} = ${}", column_name.to_owned(), i + 2); - vec_columns_values.push(column_equal_value) + quote! { + #update_method_ops + #update_entity_ops + #update_querybuilder_tokens } +} - let str_columns_values = vec_columns_values.join(", "); - - let update_values = fields.iter().map(|ident| { - quote! { &self.#ident } - }); - let update_values_cloned = update_values.clone(); +fn generate_update_method_tokens( + macro_data: &MacroTokens, + table_schema_data: &TableMetadata, +) -> TokenStream { + let mut update_ops_tokens = TokenStream::new(); + let ty = macro_data.ty; - if let Some(primary_key) = macro_data.get_primary_key_annotation() { - let pk_index = macro_data - .get_pk_index() - .expect("Update method failed to retrieve the index of the primary key"); + let update_signature = quote! { + async fn update(&self) -> Result> + }; + let update_with_signature = quote! { + async fn update_with<'a, I>(&self, input: I) + -> Result> + where I: canyon_sql::connection::DbConnection + Send + 'a + }; - quote! { - /// Updates a database record that matches - /// the current instance of a T type, returning a result - /// indicating a possible failure querying the database. - async fn update(&self) -> Result<(), Box> { - let stmt = format!( - "UPDATE {} SET {} WHERE {} = ${:?}", - #table_schema_data, #str_columns_values, #primary_key, #pk_index + 1 - ); - let update_values: &[&dyn canyon_sql::crud::bounds::QueryParameter<'_>] = &[#(#update_values),*]; - - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - stmt, update_values, "" - ).await?; + if let Some(primary_key) = macro_data.get_primary_key_field_annotation() { + let (_, ty_generics, _) = macro_data.generics.split_for_impl(); + let update_columns = macro_data.get_column_names_pk_parsed(); + let fields = macro_data.get_struct_fields(); - Ok(()) - } + let pk_name = &primary_key.name; + let pk_index = >::into(primary_key.index) + 1usize; + let update_stmt = UpdateQueryBuilder::new(table_schema_data.clone()) + .expect("Failed to create a UpdateQueryBuilder") + .set(&update_columns.collect::>()) + .expect("Failed to generate a SET clause") + .r#where(pk_name, Comp::Eq, &(pk_index as i32)) + .build() + .expect("Failed to construct a Query from a UpdateQueryBuilder"); - /// Updates a database record that matches - /// the current instance of a T type, returning a result - /// indicating a possible failure querying the database with the - /// specified datasource - async fn update_datasource<'a>(&self, datasource_name: &'a str) - -> Result<(), Box> - { - let stmt = format!( - "UPDATE {} SET {} WHERE {} = ${:?}", - #table_schema_data, #str_columns_values, #primary_key, #pk_index + 1 - ); - let update_values: &[&dyn canyon_sql::crud::bounds::QueryParameter<'_>] = &[#(#update_values_cloned),*]; + let update_values = fields.map(|ident| { + quote! { &self.#ident } + }); - <#ty as canyon_sql::crud::Transaction<#ty>>::query( - stmt, update_values, datasource_name - ).await?; + let update_values = quote! { + &[#(#update_values),*] + }; - Ok(()) + update_ops_tokens.extend(quote! { + #update_signature { + let update_values: &[&dyn canyon_sql::query::QueryParameter] = #update_values; + <#ty #ty_generics as canyon_sql::core::Transaction>::execute(#&update_stmt, update_values, "").await } - } + #update_with_signature { + let update_values: &[&dyn canyon_sql::query::QueryParameter] = #update_values; + input.execute(#&update_stmt, update_values).await + } + }); } else { // If there's no primary key, update method over self won't be available. // Use instead the update associated function of the querybuilder + let err_msg = consts::UNAVAILABLE_CRUD_OP_ON_INSTANCE; + let no_pk_err = quote! { + Err( + std::io::Error::new( + std::io::ErrorKind::Unsupported, + #err_msg + ).into_inner().unwrap() + ) + }; // TODO: waiting for creating our custom error types - // TODO Returning an error should be a provisional way of doing this - quote! { - async fn update(&self) - -> Result<(), Box> - { - Err( - std::io::Error::new( - std::io::ErrorKind::Unsupported, - "You can't use the 'update' method on a \ - CanyonEntity that does not have a #[primary_key] annotation. \ - If you need to perform an specific search, use the Querybuilder instead." - ).into_inner().unwrap() - ) - } + update_ops_tokens.extend(quote! { + #update_signature { #no_pk_err } + #update_with_signature{ #no_pk_err } + }); + } - async fn update_datasource<'a>(&self, datasource_name: &'a str) - -> Result<(), Box> - { - Err( - std::io::Error::new( - std::io::ErrorKind::Unsupported, - "You can't use the 'update_datasource' method on a \ - CanyonEntity that does not have a #[primary_key] annotation. \ - If you need to perform an specific search, use the Querybuilder instead." - ).into_inner().unwrap() - ) - } - } + update_ops_tokens +} + +fn generate_update_entity_tokens(table_schema_data: &str) -> TokenStream { + let update_entity_signature = quote! { + async fn update_entity<'canyon_lt, 'err_lt, Entity>(entity: &'canyon_lt Entity) + -> Result<(), Box> + where Entity: canyon_sql::core::RowMapper + + canyon_sql::query::bounds::Inspectionable<'canyon_lt> + + Sync + + 'canyon_lt + }; + + let update_entity_with_signature = quote! { + async fn update_entity_with<'canyon_lt, 'err_lt, Entity, Input>(entity: &'canyon_lt Entity, input: Input) + -> Result<(), Box> + where + Entity: canyon_sql::core::RowMapper + + canyon_sql::query::bounds::Inspectionable<'canyon_lt> + + Sync + + 'canyon_lt, + Input: canyon_sql::connection::DbConnection + Send + 'canyon_lt + }; + + let update_entity_body = __details::generate_update_entity_body(table_schema_data); + let update_entity_with_body = __details::generate_update_entity_with_body(table_schema_data); + + quote! { + #update_entity_signature { #update_entity_body } + #update_entity_with_signature { #update_entity_with_body } } } /// Generates the TokenStream for the __update() CRUD operation /// being the query generated with the [`QueryBuilder`] -pub fn generate_update_query_tokens( - macro_data: &MacroTokens, - table_schema_data: &String, -) -> TokenStream { - let ty = macro_data.ty; - +fn generate_update_querybuilder_tokens(table_schema_data: &str) -> TokenStream { quote! { - /// Generates a [`canyon_sql::query::UpdateQueryBuilder`] + /// Generates a [`canyon_sql::query::querybuilder::UpdateQueryBuilder`] /// that allows you to customize the query by adding parameters and constrains dynamically. /// /// It performs an `UPDATE table_name`, where `table_name` it's the name of your /// entity but converted to the corresponding database convention, /// unless concrete values are set on the available parameters of the /// `canyon_macro(table_name = "table_name", schema = "schema")` - fn update_query<'a>() -> canyon_sql::query::UpdateQueryBuilder<'a, #ty> { - canyon_sql::query::UpdateQueryBuilder::new(#table_schema_data, "") + fn update_query<'a>() -> Result< + canyon_sql::query::querybuilder::UpdateQueryBuilder<'a>, + Box + > { + canyon_sql::query::querybuilder::UpdateQueryBuilder::new(#table_schema_data) } - /// Generates a [`canyon_sql::query::UpdateQueryBuilder`] + /// Generates a [`canyon_sql::query::querybuilder::UpdateQueryBuilder`] /// that allows you to customize the query by adding parameters and constrains dynamically. /// /// It performs an `UPDATE table_name`, where `table_name` it's the name of your @@ -133,10 +150,125 @@ pub fn generate_update_query_tokens( /// `canyon_macro(table_name = "table_name", schema = "schema")` /// /// The query it's made against the database with the configured datasource - /// described in the configuration file, and selected with the [`&str`] - /// passed as parameter. - fn update_query_datasource<'a>(datasource_name: &'a str) -> canyon_sql::query::UpdateQueryBuilder<'a, #ty> { - canyon_sql::query::UpdateQueryBuilder::new(#table_schema_data, datasource_name) + /// described in the configuration file, and selected with the input parameter + fn update_query_with<'a>(database_type: canyon_sql::connection::DatabaseType) -> Result< + canyon_sql::query::querybuilder::UpdateQueryBuilder<'a>, + Box + > { + canyon_sql::query::querybuilder::UpdateQueryBuilder::new(#table_schema_data, database_type) } } } + +mod __details { + use super::*; + + pub(crate) fn generate_update_entity_body(table_schema_data: &str) -> TokenStream { + let update_entity_core_logic = generate_update_entity_pk_body_logic(table_schema_data); + let no_pk_err = generate_no_pk_error(); + + quote! { + if let Some(primary_key) = entity.primary_key() { + #update_entity_core_logic + + let default_db_conn = canyon_sql::core::Canyon::instance()? + .get_default_connection()?; + let _ = default_db_conn.execute(&stmt, &update_values).await?; + Ok(()) + } else { + #no_pk_err + } + } + } + + pub(crate) fn generate_update_entity_with_body( + table_schema_data: &str + ) -> TokenStream { + let update_entity_core_logic = generate_update_entity_pk_body_logic(table_schema_data); + let no_pk_err = generate_no_pk_error(); + + quote! { + if let Some(primary_key) = entity.primary_key() { + #update_entity_core_logic + + let _ = input.execute(&stmt, &update_values).await?; + Ok(()) + } else { + #no_pk_err + } + } + } + + fn generate_update_entity_pk_body_logic(table_schema_data: &str) -> TokenStream { + quote! { + let pk_actual_value = entity.primary_key_actual_value(); + let update_columns = entity.fields_names(); + let update_values_pk_parsed = entity.fields_actual_values(); + + let mut vec_columns_values: Vec = Vec::new(); + for (i, column_name) in update_columns.to_vec().iter().enumerate() { + let column_equal_value = format!("{} = ${}", column_name, i + 2); + vec_columns_values.push(column_equal_value) + } + let col_vals_placeholders = vec_columns_values.join(", "); + + // Efficiently build argument list: pk first, then values + let mut update_values: Vec<&dyn canyon_sql::query::QueryParameter> = + Vec::with_capacity(1 + update_values_pk_parsed.len()); + update_values.push(pk_actual_value); + update_values.extend(update_values_pk_parsed); + + let stmt = format!( + "UPDATE {} SET {} WHERE {:?} = $1", + #table_schema_data, col_vals_placeholders, primary_key + ); + } + } + + pub(crate) fn generate_no_pk_error() -> TokenStream { + let err_msg = consts::UNAVAILABLE_CRUD_OP_ON_INSTANCE; + quote! { + return Err( + std::io::Error::new( + std::io::ErrorKind::Unsupported, + #err_msg + ).into_inner().unwrap() + ); + } + } +} + +// +// #[cfg(test)] +// mod update_tokens_tests { +// use proc_macro2::Ident; +// use crate::query_operations::consts; +// +// // use crate::query_operations::consts::{ +// // INPUT_PARAM, LT_CONSTRAINT, RES_VOID_RET_TY, RES_VOID_RET_TY_LT, USER_MOCK_TY, +// // }; +// #[test] +// fn test_create_update_macro() { +// let ty = syn::parse_str::("User").unwrap(); +// let mapper_ty = syn::parse_str::("User").unwrap(); +// let tokens = crate::query_operations::read::__details::find_all_generators::create_find_all_macro(&ty, &mapper_ty, crate::query_operations::read::macro_builder_read_ops_tests::SELECT_ALL_STMT); +// let generated = tokens.to_string(); +// +// assert!(generated.contains("async fn find_all")); +// assert!(generated.contains(consts::RES_RET_TY)); +// assert!(generated.contains(crate::query_operations::read::macro_builder_read_ops_tests::SELECT_ALL_STMT)); +// } +// +// #[test] +// fn test_create_find_all_with_macro() { +// let mapper_ty = syn::parse_str::("User").unwrap(); +// let tokens = crate::query_operations::read::__details::find_all_generators::create_find_all_with_macro(&mapper_ty, crate::query_operations::read::macro_builder_read_ops_tests::SELECT_ALL_STMT); +// let generated = tokens.to_string(); +// +// assert!(generated.contains("async fn find_all_with")); +// assert!(generated.contains(consts::RES_RET_TY)); +// assert!(generated.contains(consts::LT_CONSTRAINT)); +// assert!(generated.contains(consts::INPUT_PARAM)); +// assert!(generated.contains(crate::query_operations::read::macro_builder_read_ops_tests::SELECT_ALL_STMT)); +// } +// } diff --git a/canyon_macros/src/utils/canyon_crud_attribute.rs b/canyon_macros/src/utils/canyon_crud_attribute.rs new file mode 100644 index 00000000..265affe4 --- /dev/null +++ b/canyon_macros/src/utils/canyon_crud_attribute.rs @@ -0,0 +1,33 @@ +use proc_macro2::Ident; +use syn::Token; +use syn::parse::{Parse, ParseStream}; + +/// Type that helps to parse the: `#[canyon_crud(maps_to = Ident)]` proc macro attribute +/// +/// The ident value of the `maps_to` argument brings a type that is the target type for which +/// `CrudOperations` will write the queries as the implementor of [`RowMapper`] +pub(crate) struct CanyonCrudAttribute { + pub maps_to: Option, +} + +impl Parse for CanyonCrudAttribute { + fn parse(input: ParseStream<'_>) -> syn::Result { + let arg_name: Ident = input.parse()?; + if arg_name != "maps_to" { + return Err(syn::Error::new_spanned( + arg_name, + "unsupported 'canyon_crud' attribute, expected `maps_to`", + )); + } + + // Parse (and discard the span of) the `=` token + let _: Token![=] = input.parse()?; + + // Parse the argument value + let name = input.parse()?; + + Ok(Self { + maps_to: Some(name), + }) + } +} diff --git a/canyon_macros/src/utils/function_parser.rs b/canyon_macros/src/utils/function_parser.rs index 841e534d..7f0a294b 100644 --- a/canyon_macros/src/utils/function_parser.rs +++ b/canyon_macros/src/utils/function_parser.rs @@ -1,11 +1,10 @@ use syn::{ - parse::{Parse, ParseBuffer}, Attribute, Block, ItemFn, Signature, Visibility, + parse::{Parse, ParseBuffer}, }; /// Implementation of syn::Parse for the `#[canyon]` proc-macro #[derive(Clone)] -#[allow(dead_code)] pub struct FunctionParser { pub attrs: Vec, pub vis: Visibility, @@ -15,21 +14,13 @@ pub struct FunctionParser { impl Parse for FunctionParser { fn parse(input: &ParseBuffer) -> syn::Result { - let func = input.parse::(); - - if func.is_err() { - return Err(syn::Error::new( - input.cursor().span(), - "Error on `fn main()`", - )); - } + let func = input.parse::()?; - let func_ok = func.ok().unwrap(); Ok(Self { - attrs: func_ok.attrs, - vis: func_ok.vis, - sig: func_ok.sig, - block: func_ok.block, + attrs: func.attrs, + vis: func.vis, + sig: func.sig, + block: func.block, }) } } diff --git a/canyon_macros/src/utils/helpers.rs b/canyon_macros/src/utils/helpers.rs index 2db52be5..f994b1ee 100644 --- a/canyon_macros/src/utils/helpers.rs +++ b/canyon_macros/src/utils/helpers.rs @@ -1,140 +1,167 @@ +use super::macro_tokens::MacroTokens; +use canyon_core::query::querybuilder::types::TableMetadata; use proc_macro2::{Ident, Span, TokenStream}; -use syn::{punctuated::Punctuated, MetaNameValue, Token}; +use quote::quote; +use std::fmt::Write; +use syn::{ + Attribute, Field, Fields, MetaNameValue, Token, Type, TypeGenerics, Visibility, + punctuated::Punctuated, +}; -use super::macro_tokens::MacroTokens; +/// Given the derived type of CrudOperations, and the possible mapping type if the `#[canyon_crud(maps_to=]` exists, +/// returns a [`TokenStream`] with the final `RowMapper` implementor. +pub fn compute_crud_ops_mapping_target_type_with_generics( + row_mapper_ty: &Ident, + row_mapper_ty_generics: &TypeGenerics, + crud_ops_ty: Option<&Ident>, +) -> TokenStream { + if let Some(crud_ops_ty) = crud_ops_ty { + quote! { #crud_ops_ty } + } else { + quote! { #row_mapper_ty #row_mapper_ty_generics } + } +} + +pub fn filter_fields(fields: &Fields) -> Vec<(Visibility, Ident)> { + fields + .iter() + .map(|field| (field.vis.clone(), field.ident.as_ref().unwrap().clone())) + .collect::>() +} + +pub fn __fields_with_types(fields: &Fields) -> Vec<(Visibility, Ident, Type)> { + fields + .iter() + .map(|field| { + ( + field.vis.clone(), + field.ident.as_ref().unwrap().clone(), + field.ty.clone(), + ) + }) + .collect::>() +} + +pub fn placeholders_generator(num_values: usize) -> String { + let mut placeholders = String::new(); + for (i, n) in (1..num_values).enumerate() { + if i > 0 { + placeholders.push_str(", "); + } + write!(placeholders, "${}", n).unwrap(); + } + + placeholders +} + +pub fn field_has_target_attribute(field: &Field, target_attribute: &str) -> bool { + field.attrs.iter().any(|attr| { + attr.path + .segments + .first() + .map(|segment| segment.ident == target_attribute) + .unwrap_or(false) + }) +} /// If the `canyon_entity` macro has valid attributes attached, and those attrs are the /// user's desired `table_name` and/or the `schema_name`, this method returns its /// correct form to be wired as the table name that the CRUD methods requires for generate /// the queries -pub fn table_schema_parser(macro_data: &MacroTokens<'_>) -> Result { +pub fn table_schema_parser<'a>(macro_data: &MacroTokens<'_>) -> Result { let mut table_name: Option = None; let mut schema: Option = None; for attr in macro_data.attrs { - if attr - .path - .segments - .iter() - .any(|seg| seg.ident == "canyon_macros" || seg.ident == "canyon_entity") - { - let name_values_result: Result, syn::Error> = - attr.parse_args_with(Punctuated::parse_terminated); - - if let Ok(meta_name_values) = name_values_result { - for nv in meta_name_values { - let ident = nv.path.get_ident(); - if let Some(i) = ident { - let identifier = i; - match &nv.lit { - syn::Lit::Str(s) => { - if identifier == "table_name" { - table_name = Some(s.value()) - } else if identifier == "schema" { - schema = Some(s.value()) - } else { - return Err( - syn::Error::new_spanned( - Ident::new(&identifier.to_string(), i.span()), - "Only string literals are valid values for the attribute arguments" - ).into_compile_error() - ); - } - } - _ => return Err(syn::Error::new_spanned( - Ident::new(&identifier.to_string(), i.span()), - "Only string literals are valid values for the attribute arguments", - ) - .into_compile_error()), - } - } else { - return Err(syn::Error::new( - Span::call_site(), - "Only string literals are valid values for the attribute arguments", - ) - .into_compile_error()); - } - } - } - - let mut final_table_name = String::new(); - if schema.is_some() { - final_table_name.push_str(format!("{}.", schema.unwrap()).as_str()) - } + let mut segments = attr.path.segments.iter(); + if segments.any(|seg| seg.ident == "canyon_macros" || seg.ident == "canyon_entity") { + parse_canyon_entity_attr(attr, &mut schema, &mut table_name)?; + } + // TODO: if segments because we could parse here the canyon_crud proc_macro_attr + } - if let Some(t_name) = table_name { - final_table_name.push_str(t_name.as_str()) - } else { - let defaulted = - &default_database_table_name_from_entity_name(¯o_data.ty.to_string()); - final_table_name.push_str(defaulted) - } + let mut table_meta = TableMetadata::default(); + if let Some(schema_) = schema { + table_meta.schema(schema_); + } - return Ok(final_table_name); - } + if let Some(t_name) = table_name { + table_meta.table_name(t_name); + } else { + let target_type = if let Some(mapper_ty) = macro_data.retrieve_mapping_target_type() { + mapper_ty.to_string() + } else { + macro_data.ty.to_string() + }; + let defaulted = default_database_table_name_from_entity_name(&target_type); + table_meta.table_name(defaulted); } - Ok(macro_data.ty.to_string()) + Ok(table_meta) } -/// Parses a syn::Identifier to get a snake case database name from the type identifier -pub fn _database_table_name_from_struct(ty: &Ident) -> String { - let struct_name: String = ty.to_string(); - let mut table_name: String = String::new(); +fn parse_canyon_entity_attr( + attr: &Attribute, + schema: &mut Option, + table_name: &mut Option, +) -> Result<(), TokenStream> { + if attr + .path + .segments + .iter() + .any(|seg| seg.ident == "canyon_macros" || seg.ident == "canyon_entity") + { + let name_values_result: Result, syn::Error> = + attr.parse_args_with(Punctuated::parse_terminated); - let mut index = 0; - for char in struct_name.chars() { - if index < 1 { - table_name.push(char.to_ascii_lowercase()); - index += 1; - } else { - match char { - n if n.is_ascii_uppercase() => { - table_name.push('_'); - table_name.push(n.to_ascii_lowercase()); + if let Ok(meta_name_values) = name_values_result { + for nv in meta_name_values { + let ident = nv.path.get_ident(); + if let Some(i) = ident { + let identifier = i; + match &nv.lit { + syn::Lit::Str(s) => { + if identifier == "table_name" { + *table_name = Some(s.value()); + } else if identifier == "schema" { + *schema = Some(s.value()); + } else { + return Err( + syn::Error::new_spanned( + Ident::new(&identifier.to_string(), i.span()), + "Only string literals are valid values for the attribute arguments" + ).into_compile_error() + ); + } + } + _ => { + return Err(syn::Error::new_spanned( + Ident::new(&identifier.to_string(), i.span()), + "Only string literals are valid values for the attribute arguments", + ) + .into_compile_error()); + } + } + } else { + return Err(syn::Error::new( + Span::call_site(), + "Only string literals are valid values for the attribute arguments", + ) + .into_compile_error()); } - _ => table_name.push(char), } } } - table_name -} - -/// Parses a syn::Identifier to create a defaulted snake case database table name -#[test] -#[cfg(not(target_env = "msvc"))] -fn test_entity_database_name_defaulter() { - assert_eq!( - default_database_table_name_from_entity_name("League"), - "league".to_owned() - ); - assert_eq!( - default_database_table_name_from_entity_name("MajorLeague"), - "major_league".to_owned() - ); - assert_eq!( - default_database_table_name_from_entity_name("MajorLeagueTournament"), - "major_league_tournament".to_owned() - ); - - assert_ne!( - default_database_table_name_from_entity_name("MajorLeague"), - "majorleague".to_owned() - ); - assert_ne!( - default_database_table_name_from_entity_name("MajorLeague"), - "MajorLeague".to_owned() - ); + Ok(()) } /// Autogenerates a default table name for an entity given their struct name pub fn default_database_table_name_from_entity_name(ty: &str) -> String { - let struct_name: String = ty.to_string(); let mut table_name: String = String::new(); let mut index = 0; - for char in struct_name.chars() { + for char in ty.chars() { if index < 1 { table_name.push(char.to_ascii_lowercase()); index += 1; @@ -180,5 +207,57 @@ pub fn database_table_name_to_struct_ident(name: &str) -> Ident { } } - Ident::new(&struct_name, proc_macro2::Span::call_site()) + Ident::new(&struct_name, Span::call_site()) +} + +/// Parses a syn::Identifier to create a defaulted snake case database table name +#[test] +#[cfg(not(target_env = "msvc"))] +fn test_entity_database_name_defaulter() { + assert_eq!( + default_database_table_name_from_entity_name("League"), + "league".to_owned() + ); + assert_eq!( + default_database_table_name_from_entity_name("MajorLeague"), + "major_league".to_owned() + ); + assert_eq!( + default_database_table_name_from_entity_name("MajorLeagueTournament"), + "major_league_tournament".to_owned() + ); + + assert_ne!( + default_database_table_name_from_entity_name("MajorLeague"), + "majorleague".to_owned() + ); + assert_ne!( + default_database_table_name_from_entity_name("MajorLeague"), + "MajorLeague".to_owned() + ); +} +#[cfg(test)] +mod tests { + use super::*; + use syn::{ItemStruct, parse_str}; + + #[test] + fn detects_target_attribute_correctly() { + let input = r#" + struct Test { + #[my_attr] + field1: String, + field2: i32, + } + "#; + + // Parse the struct + let item: ItemStruct = parse_str(input).expect("Failed to parse struct"); + let fields: Vec<_> = item.fields.iter().collect(); + + // Check the field with #[my_attr] + assert!(field_has_target_attribute(fields[0], "my_attr")); + // Check the field without the attribute + assert!(!field_has_target_attribute(fields[1], "my_attr")); + } } diff --git a/canyon_macros/src/utils/macro_tokens.rs b/canyon_macros/src/utils/macro_tokens.rs index 415d9ccc..d79c0d3b 100644 --- a/canyon_macros/src/utils/macro_tokens.rs +++ b/canyon_macros/src/utils/macro_tokens.rs @@ -1,8 +1,10 @@ -use std::convert::TryFrom; - +use crate::utils::canyon_crud_attribute::CanyonCrudAttribute; +use crate::utils::helpers; +use crate::utils::primary_key_attribute::PrimaryKeyAttribute; use canyon_entities::field_annotation::EntityFieldAnnotation; use proc_macro2::Ident; -use syn::{Attribute, DeriveInput, Fields, Generics, Type, Visibility}; +use std::convert::TryFrom; +use syn::{Attribute, DeriveInput, Field, Fields, Generics, Type, Visibility}; /// Provides a convenient way of store the data for the TokenStream /// received on a macro @@ -13,32 +15,57 @@ pub struct MacroTokens<'a> { pub generics: &'a Generics, pub attrs: &'a Vec, pub fields: &'a Fields, + // -------- the new fields that must help to avoid recalculations every time that the user compiles + pub(crate) canyon_crud_attribute: Option, // Type level + pub(crate) primary_key_attribute: Option>, // Field level, quick access without iterations } impl<'a> MacroTokens<'a> { - pub fn new(ast: &'a DeriveInput) -> Self { - Self { - vis: &ast.vis, - ty: &ast.ident, - generics: &ast.generics, - attrs: &ast.attrs, - fields: match &ast.data { - syn::Data::Struct(ref s) => &s.fields, - _ => panic!("This derive macro can only be automatically derived for structs"), - }, + pub fn new(ast: &'a DeriveInput) -> Result { + // TODO: impl syn::parse instead + if let syn::Data::Struct(ref s) = ast.data { + let attrs = &ast.attrs; + + let primary_key_attribute = __details::find_primary_key_field_annotation(&s.fields) + .map(PrimaryKeyAttribute::from); + + let mut canyon_crud_attribute = None; + for attr in attrs { + if attr.path.is_ident("canyon_crud") { + canyon_crud_attribute = Some(attr.parse_args::()?); + } + } + + Ok(Self { + vis: &ast.vis, + ty: &ast.ident, + generics: &ast.generics, + attrs: &ast.attrs, + fields: &s.fields, + canyon_crud_attribute, + primary_key_attribute, + }) + } else { + __details::raise_canyon_crud_only_for_structs_err() } } - /// Gives a Vec of tuples that contains the visibility, the name and - /// the type of every field on a Struct - pub fn _fields_with_visibility_and_types(&self) -> Vec<(Visibility, Ident, Type)> { + pub fn retrieve_mapping_target_type(&self) -> &Option { + if let Some(canyon_crud_attribute) = &self.canyon_crud_attribute { + &canyon_crud_attribute.maps_to + } else { + &None + } + } + + pub fn fields(&self) -> Vec<(Visibility, Ident, Type)> { self.fields .iter() .map(|field| { ( field.vis.clone(), - field.ident.as_ref().unwrap().clone(), - field.ty.clone(), + field.ident.clone().unwrap(), + field.clone().ty, ) }) .collect::>() @@ -46,27 +73,48 @@ impl<'a> MacroTokens<'a> { /// Gives a Vec of tuples that contains the name and /// the type of every field on a Struct - pub fn _fields_with_types(&self) -> Vec<(Ident, Type)> { + pub fn fields_with_types(&self) -> Vec<(&Ident, &Type)> { self.fields .iter() - .map(|field| (field.ident.as_ref().unwrap().clone(), field.ty.clone())) + .map(|field| (field.ident.as_ref().unwrap(), &field.ty)) .collect::>() } /// Gives a Vec of Ident with the fields of a Struct - pub fn get_struct_fields(&self) -> Vec { + pub fn get_struct_fields(&self) -> impl Iterator { self.fields .iter() .map(|field| field.ident.as_ref().unwrap().clone()) - .collect::>() } - /// Gives a Vec populated with the name of the fields of the struct - pub fn _get_struct_fields_as_collection_strings(&self) -> Vec { - self.get_struct_fields() - .iter() - .map(|ident| ident.to_owned().to_string()) - .collect::>() + /// Returns a Vec populated with the fields of the struct + /// + /// If the type contains a `#[primary_key]` annotation (and), returns the + /// name of the columns without the fields that maps against the column designed as + /// primary key (if its present and its autoincremental attribute is set to true) + /// (autoincremental = true) or its without the autoincremental attribute, which leads + /// to the same behaviour. + /// + /// Returns every field if there's no PK, or if it's present but autoincremental = false + pub fn get_columns_pk_parsed(&self) -> impl Iterator { + self.fields.iter().filter(|field| { + if !field.attrs.is_empty() { + field.attrs.iter().any(|attr| { + let a = attr.path.segments[0].clone().ident; + let b = attr.tokens.to_string(); + !(a == "primary_key" || b.contains("false")) + }) + } else { + true + } + }) + } + + /// Returns a collection with all the [`syn::Ident`] for all the type members, skipping (if present) + /// the field which is annotated with #[primary_key] + pub fn get_fields_idents_pk_parsed(&self) -> impl Iterator { + self.get_columns_pk_parsed() + .map(|field| field.ident.as_ref().unwrap()) } /// Returns a Vec populated with the name of the fields of the struct @@ -79,48 +127,29 @@ impl<'a> MacroTokens<'a> { /// to the same behaviour. /// /// Returns every field if there's no PK, or if it's present but autoincremental = false - pub fn get_column_names_pk_parsed(&self) -> Vec { - self.fields - .iter() - .filter(|field| { - if !field.attrs.is_empty() { - field.attrs.iter().any(|attr| { - let a = attr.path.segments[0].clone().ident; - let b = attr.tokens.to_string(); - !(a == "primary_key" || b.contains("false")) - }) - } else { - true - } - }) + pub fn get_column_names_pk_parsed(&self) -> impl Iterator { + self.get_columns_pk_parsed() .map(|c| format!("\"{}\"", c.ident.as_ref().unwrap())) - .collect::>() } /// Retrieves the fields of the Struct as continuous String, comma separated - pub fn get_struct_fields_as_strings(&self) -> String { - let column_names: String = self - .get_struct_fields() - .iter() - .map(|ident| ident.to_owned().to_string()) + pub fn get_struct_fields_as_comma_sep_string(&self) -> String { + self.get_column_names_pk_parsed() .collect::>() - .iter() - .map(|column| column.to_owned() + ", ") - .collect::(); - - let mut column_names_as_chars = column_names.chars(); - column_names_as_chars.next_back(); - column_names_as_chars.next_back(); - - column_names_as_chars.as_str().to_owned() + .join(", ") } /// Retrieves the value of the index of an annotated field with #[primary_key] - pub fn get_pk_index(&self) -> Option { + pub fn _get_pk_index(&self) -> Option { let mut pk_index = None; for (idx, field) in self.fields.iter().enumerate() { for attr in &field.attrs { - if attr.path.segments[0].clone().ident == "primary_key" { + if attr + .path + .segments + .first() + .map(|segment| segment.ident == "primary_key")? + { pk_index = Some(idx); } } @@ -128,20 +157,26 @@ impl<'a> MacroTokens<'a> { pk_index } + pub fn get_primary_key_field_annotation(&self) -> Option<&PrimaryKeyAttribute<'a>> { + self.primary_key_attribute.as_ref() + } + /// Utility for find the primary key attribute (if exists) and the /// column name (field) which belongs pub fn get_primary_key_annotation(&self) -> Option { - let f = self.fields.iter().find(|field| { - field - .attrs - .iter() - .map(|attr| attr.path.segments[0].clone().ident) - .map(|ident| ident.to_string()) - .find(|a| a == "primary_key") - == Some("primary_key".to_string()) - }); + self.get_primary_key_field_annotation() + .map(|attr| attr.ident.clone().to_string()) + } - f.map(|v| v.ident.clone().unwrap().to_string()) + pub fn get_primary_key_ident_and_type(&self) -> Option<(&Ident, &Type)> { + let primary_key = self.get_primary_key_annotation(); + if let Some(primary_key) = primary_key { + self.fields_with_types() + .into_iter() + .find(|(i, _t)| i.to_string() == primary_key) + } else { + None + } } /// Utility for find the `foreign_key` attributes (if exists) @@ -167,42 +202,53 @@ impl<'a> MacroTokens<'a> { /// Boolean that returns true if the type contains a `#[primary_key]` /// annotation. False otherwise. pub fn type_has_primary_key(&self) -> bool { - self.fields.iter().any(|field| { - field - .attrs - .iter() - .map(|attr| attr.path.segments[0].clone().ident) - .map(|ident| ident.to_string()) - .find(|a| a == "primary_key") - == Some("primary_key".to_string()) - }) + self.fields + .iter() + .any(|field| helpers::field_has_target_attribute(field, "primary_key")) } - /// Returns an String ready to be inserted on the VALUES Sql clause + /// Returns a String ready to be inserted on the VALUES Sql clause /// representing generic query parameters ($x). /// /// Already returns the correct number of placeholders, skipping one /// entry in the type contains a `#[primary_key]` pub fn placeholders_generator(&self) -> String { - let mut placeholders = String::new(); - if self.type_has_primary_key() { - for num in 1..self.fields.len() { - if num < self.fields.len() - 1 { - placeholders.push_str(&("$".to_owned() + &(num).to_string() + ", ")); - } else { - placeholders.push_str(&("$".to_owned() + &(num).to_string())); - } - } + let range_upper_bound = if self.type_has_primary_key() { + self.fields.len() } else { - for num in 1..self.fields.len() + 1 { - if num < self.fields.len() { - placeholders.push_str(&("$".to_owned() + &(num).to_string() + ", ")); - } else { - placeholders.push_str(&("$".to_owned() + &(num).to_string())); - } + self.fields.len() + 1 + }; + + helpers::placeholders_generator(range_upper_bound) + } +} + +mod __details { + use crate::utils::helpers; + use crate::utils::macro_tokens::MacroTokens; + use crate::utils::primary_key_attribute::PrimaryKeyIndex; + use proc_macro2::Span; + use syn::{Field, Fields}; + + pub(super) fn find_primary_key_field_annotation( + fields: &Fields, + ) -> Option<(PrimaryKeyIndex, &Field)> { + fields.iter().enumerate().find_map(|index_and_field| { + let idx = index_and_field.0; + let field = index_and_field.1; + if helpers::field_has_target_attribute(field, "primary_key") { + Some((PrimaryKeyIndex(idx), field)) + } else { + None } - } + }) + } - placeholders + pub(crate) fn raise_canyon_crud_only_for_structs_err<'a>() -> Result, syn::Error> + { + Err(syn::Error::new( + Span::call_site(), + "CanyonCrud may only be implemented for structs", + )) } } diff --git a/canyon_macros/src/utils/mod.rs b/canyon_macros/src/utils/mod.rs index be2269df..f8529a05 100644 --- a/canyon_macros/src/utils/mod.rs +++ b/canyon_macros/src/utils/mod.rs @@ -1,3 +1,5 @@ +mod canyon_crud_attribute; pub mod function_parser; pub mod helpers; pub mod macro_tokens; +pub(crate) mod primary_key_attribute; diff --git a/canyon_macros/src/utils/primary_key_attribute.rs b/canyon_macros/src/utils/primary_key_attribute.rs new file mode 100644 index 00000000..36ea4b7c --- /dev/null +++ b/canyon_macros/src/utils/primary_key_attribute.rs @@ -0,0 +1,47 @@ +use proc_macro2::Ident; +use quote::ToTokens; +use std::fmt::{Display, Formatter}; +use syn::{Field, Type}; + +/// Strong type for the index numerical value of the actual position on where a `#[primary_key]` +/// annotation is declared on a struct field +#[derive(Copy, Clone)] +pub(crate) struct PrimaryKeyIndex(pub(crate) usize); +impl From for usize { + fn from(pk: PrimaryKeyIndex) -> usize { + pk.0 + } +} + +pub(crate) struct PrimaryKeyAttribute<'a> { + pub ident: &'a Ident, + pub ty: &'a Type, + pub name: String, + pub index: PrimaryKeyIndex, +} + +impl<'a> Display for &'a PrimaryKeyAttribute<'a> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let _ = f.write_fmt(format_args!( + "ident:{},ty:{},name:{}", + self.ident, + self.ty.to_token_stream(), + self.name + )); + Ok(()) + } +} + +/// Ad-hoc creation for the process of parsing a primary key attribute along with its index position +/// on the struct +impl<'a> From<(PrimaryKeyIndex, &'a Field)> for PrimaryKeyAttribute<'a> { + fn from(value: (PrimaryKeyIndex, &'a Field)) -> Self { + let field = value.1; + Self { + ident: field.ident.as_ref().unwrap(), + ty: &field.ty, + name: field.ident.as_ref().unwrap().to_string(), + index: value.0, + } + } +} diff --git a/canyon_migrations/Cargo.toml b/canyon_migrations/Cargo.toml index ec9a31db..1e6b1c0d 100644 --- a/canyon_migrations/Cargo.toml +++ b/canyon_migrations/Cargo.toml @@ -10,8 +10,8 @@ license.workspace = true description.workspace = true [dependencies] +canyon_core = { workspace = true } canyon_crud = { workspace = true } -canyon_connection = { workspace = true } canyon_entities = { workspace = true } tokio = { workspace = true } @@ -20,18 +20,13 @@ tiberius = { workspace = true, optional = true } mysql_async = { workspace = true, optional = true } mysql_common = { workspace = true, optional = true } - -async-trait = { workspace = true } - regex = { workspace = true } partialdebug = { workspace = true } walkdir = { workspace = true } -proc-macro2 = { workspace = true } -quote = { workspace = true } -syn = { version = "1.0.86", features = ["full", "parsing"] } # TODO Pending to refactor and upgrade [features] -postgres = ["tokio-postgres", "canyon_connection/postgres", "canyon_crud/postgres"] -mssql = ["tiberius", "canyon_connection/mssql", "canyon_crud/mssql"] -mysql = ["mysql_async","mysql_common", "canyon_connection/mysql", "canyon_crud/mysql"] +migrations = [] +postgres = ["tokio-postgres", "canyon_core/postgres", "canyon_crud/postgres"] +mssql = ["tiberius", "canyon_core/mssql", "canyon_crud/mssql"] +mysql = ["mysql_async", "mysql_common", "canyon_core/mysql", "canyon_crud/mysql"] diff --git a/canyon_migrations/src/constants.rs b/canyon_migrations/src/constants.rs index 9f025762..7674efe5 100644 --- a/canyon_migrations/src/constants.rs +++ b/canyon_migrations/src/constants.rs @@ -168,98 +168,3 @@ pub mod sqlserver_type { pub const TIME: &str = "TIME"; pub const DATETIME: &str = "DATETIME2"; } - -pub mod mocked_data { - use crate::migrations::information_schema::{ColumnMetadata, TableMetadata}; - use canyon_connection::lazy_static::lazy_static; - - lazy_static! { - pub static ref TABLE_METADATA_LEAGUE_EX: TableMetadata = TableMetadata { - table_name: "league".to_string(), - columns: vec![ - ColumnMetadata { - column_name: "id".to_owned(), - datatype: "int".to_owned(), - character_maximum_length: None, - is_nullable: false, - column_default: None, - foreign_key_info: None, - foreign_key_name: None, - primary_key_info: Some("PK__league__3213E83FBDA92571".to_owned()), - primary_key_name: Some("PK__league__3213E83FBDA92571".to_owned()), - is_identity: false, - identity_generation: None - }, - ColumnMetadata { - column_name: "ext_id".to_owned(), - datatype: "bigint".to_owned(), - character_maximum_length: None, - is_nullable: false, - column_default: None, - foreign_key_info: None, - foreign_key_name: None, - primary_key_info: None, - primary_key_name: None, - is_identity: false, - identity_generation: None - }, - ColumnMetadata { - column_name: "slug".to_owned(), - datatype: "nvarchar".to_owned(), - character_maximum_length: None, - is_nullable: false, - column_default: None, - foreign_key_info: None, - foreign_key_name: None, - primary_key_info: None, - primary_key_name: None, - is_identity: false, - identity_generation: None - }, - ColumnMetadata { - column_name: "name".to_owned(), - datatype: "nvarchar".to_owned(), - character_maximum_length: None, - is_nullable: false, - column_default: None, - foreign_key_info: None, - foreign_key_name: None, - primary_key_info: None, - primary_key_name: None, - is_identity: false, - identity_generation: None - }, - ColumnMetadata { - column_name: "region".to_owned(), - datatype: "nvarchar".to_owned(), - character_maximum_length: None, - is_nullable: false, - column_default: None, - foreign_key_info: None, - foreign_key_name: None, - primary_key_info: None, - primary_key_name: None, - is_identity: false, - identity_generation: None - }, - ColumnMetadata { - column_name: "image_url".to_owned(), - datatype: "nvarchar".to_owned(), - character_maximum_length: None, - is_nullable: false, - column_default: None, - foreign_key_info: None, - foreign_key_name: None, - primary_key_info: None, - primary_key_name: None, - is_identity: false, - identity_generation: None - } - ] - }; - pub static ref NON_MATCHING_TABLE_METADATA: TableMetadata = TableMetadata { - table_name: "random_name_to_assert_false".to_string(), - columns: vec![] - }; - } -} diff --git a/canyon_migrations/src/lib.rs b/canyon_migrations/src/lib.rs index 5743cc8b..757597cc 100644 --- a/canyon_migrations/src/lib.rs +++ b/canyon_migrations/src/lib.rs @@ -11,35 +11,26 @@ /// in order to perform the migrations pub mod migrations; -extern crate canyon_connection; extern crate canyon_crud; extern crate canyon_entities; mod constants; -use canyon_connection::lazy_static::lazy_static; +use std::sync::OnceLock; use std::{collections::HashMap, sync::Mutex}; -lazy_static! { - pub static ref QUERIES_TO_EXECUTE: Mutex>> = - Mutex::new(HashMap::new()); - pub static ref CM_QUERIES_TO_EXECUTE: Mutex>> = - Mutex::new(HashMap::new()); -} +pub static QUERIES_TO_EXECUTE: OnceLock>>> = OnceLock::new(); +pub static CM_QUERIES_TO_EXECUTE: OnceLock>>> = OnceLock::new(); /// Stores a newly generated SQL statement from the migrations into the register pub fn save_migrations_query_to_execute(stmt: String, ds_name: &str) { - if QUERIES_TO_EXECUTE.lock().unwrap().contains_key(ds_name) { - QUERIES_TO_EXECUTE - .lock() - .unwrap() - .get_mut(ds_name) - .unwrap() - .push(stmt); + // Access the QUERIES_TO_EXECUTE hash map and lock it for safe access + let queries_to_execute = QUERIES_TO_EXECUTE.get_or_init(|| Mutex::new(HashMap::new())); + let mut queries = queries_to_execute.lock().unwrap(); + + if queries.contains_key(ds_name) { + queries.get_mut(ds_name).unwrap().push(stmt); } else { - QUERIES_TO_EXECUTE - .lock() - .unwrap() - .insert(ds_name.to_owned(), vec![stmt]); + queries.insert(ds_name.to_owned(), vec![stmt]); } } diff --git a/canyon_migrations/src/migrations/handler.rs b/canyon_migrations/src/migrations/handler.rs index 3d00da8b..3d0bc27d 100644 --- a/canyon_migrations/src/migrations/handler.rs +++ b/canyon_migrations/src/migrations/handler.rs @@ -1,14 +1,5 @@ -use canyon_connection::{datasources::Migrations as MigrationsStatus, DATASOURCES}; -use canyon_crud::rows::CanyonRows; -use canyon_entities::CANYON_REGISTER_ENTITIES; -use partialdebug::placeholder::PartialDebug; - use crate::{ - canyon_crud::{ - bounds::{Column, Row, RowOperations}, - crud::Transaction, - DatabaseType, - }, + canyon_crud::DatabaseType, constants, migrations::{ information_schema::{ColumnMetadata, ColumnMetadataTypeValue, TableMetadata}, @@ -16,28 +7,32 @@ use crate::{ processor::MigrationsProcessor, }, }; +use canyon_core::canyon::Canyon; +use canyon_core::{ + column::Column, + connection::db_connector::DatabaseConnector, + row::{Row, RowOperations}, + rows::CanyonRows, + transaction::Transaction, +}; +use canyon_entities::CANYON_REGISTER_ENTITIES; +use partialdebug::placeholder::PartialDebug; #[derive(PartialDebug)] pub struct Migrations; // Makes this structure able to make queries to the database -impl Transaction for Migrations {} +impl Transaction for Migrations {} impl Migrations { /// Launches the mechanism to parse the Database schema, the Canyon register /// and the database table with the memory of Canyon to perform the /// migrations over the targeted database pub async fn migrate() { - for datasource in DATASOURCES.iter() { - if datasource - .properties - .migrations - .filter(|status| !status.eq(&MigrationsStatus::Disabled)) - .is_none() - { - println!( - "Skipped datasource: {:?} for being disabled (or not configured)", - datasource.name - ); + for datasource in Canyon::instance() + .expect("Failure getting datasources on migrations") + .datasources() + { + if !datasource.has_migrations_enabled() { continue; } println!( @@ -46,13 +41,22 @@ impl Migrations { ); let mut migrations_processor = MigrationsProcessor::default(); + let db_conn = Canyon::instance() + .unwrap_or_else(|_| panic!("Failure getting db connection: {}", &datasource.name)) + .get_connection(&datasource.name) + .unwrap_or_else(|_| { + panic!( + "Unable to get a database connection on the migrations processor for: {:?}", + datasource.name + ) + }); let canyon_entities = CANYON_REGISTER_ENTITIES.lock().unwrap().to_vec(); let canyon_memory = CanyonMemory::remember(datasource, &canyon_entities).await; // Tracked entities that must be migrated whenever Canyon starts let schema_status = - Self::fetch_database(&datasource.name, datasource.get_db_type()).await; + Self::fetch_database(&datasource.name, db_conn, datasource.get_db_type()).await; let database_tables_schema_info = Self::map_rows(schema_status, datasource.get_db_type()); @@ -84,11 +88,12 @@ impl Migrations { } /// Fetches a concrete schema metadata by target the database - /// chosen by it's datasource name property + /// chosen by its datasource name property async fn fetch_database( - datasource_name: &str, + ds_name: &str, + db_conn: &DatabaseConnector, db_type: DatabaseType, - ) -> CanyonRows { + ) -> CanyonRows { let query = match db_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::FETCH_PUBLIC_SCHEMA, @@ -96,27 +101,28 @@ impl Migrations { DatabaseType::SqlServer => constants::mssql_queries::FETCH_PUBLIC_SCHEMA, #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!("Not implemented fetch database in mysql"), + _ => todo!("Non-legal db-type"), }; - Self::query(query, [], datasource_name) + Self::query_rows(query, [], db_conn) .await .unwrap_or_else(|_| { - panic!( - "Error querying the schema information for the datasource: {datasource_name}" - ) + panic!("Error querying the schema information for the datasource: {ds_name}") }) } /// Handler for parse the result of query the information of some database schema, /// and extract the content of the returned rows into custom structures with /// the data well organized for every entity present on that schema - fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { + #[allow(unreachable_patterns)] + fn map_rows(db_results: CanyonRows, db_type: DatabaseType) -> Vec { match db_results { #[cfg(feature = "postgres")] CanyonRows::Postgres(v) => Self::process_tp_rows(v, db_type), #[cfg(feature = "mssql")] CanyonRows::Tiberius(v) => Self::process_tib_rows(v, db_type), - _ => panic!(), + #[cfg(feature = "mysql")] + CanyonRows::MySQL(_) => panic!("Not implemented fetch database in mysql"), } } @@ -197,10 +203,10 @@ impl Migrations { "YES" ) } - } else if column_identifier == "identity_generation" { - if let ColumnMetadataTypeValue::StringValue(value) = &column_value { - dest.identity_generation = value.to_owned() - } + } else if column_identifier == "identity_generation" + && let ColumnMetadataTypeValue::StringValue(value) = &column_value + { + dest.identity_generation = value.to_owned() }; } @@ -294,6 +300,7 @@ fn check_for_table_name( #[cfg(feature = "mssql")] DatabaseType::SqlServer => table.table_name == res_row.get_mssql::<&str>("table_name"), #[cfg(feature = "mysql")] - DatabaseType::MySQL => todo!(), + DatabaseType::MySQL => todo!("Not implemented fetch database in mysql"), + _ => todo!("Non-legal db-type"), } } diff --git a/canyon_migrations/src/migrations/information_schema.rs b/canyon_migrations/src/migrations/information_schema.rs index 9e165eee..00170ed4 100644 --- a/canyon_migrations/src/migrations/information_schema.rs +++ b/canyon_migrations/src/migrations/information_schema.rs @@ -1,8 +1,11 @@ #[cfg(feature = "mssql")] -use canyon_connection::tiberius::ColumnType as TIB_TY; +use canyon_core::connection::tiberius::ColumnType as TIB_TY; #[cfg(feature = "postgres")] -use canyon_connection::tokio_postgres::types::Type as TP_TYP; -use canyon_crud::bounds::{Column, ColumnType, Row, RowOperations}; +use canyon_core::connection::tokio_postgres::types::Type as TP_TYP; +use canyon_core::{ + column::{Column, ColumnType}, + row::{Row, RowOperations}, +}; /// Model that represents the database entities that belongs to the current schema. /// diff --git a/canyon_migrations/src/migrations/memory.rs b/canyon_migrations/src/migrations/memory.rs index 1ad6263a..6f1244a7 100644 --- a/canyon_migrations/src/migrations/memory.rs +++ b/canyon_migrations/src/migrations/memory.rs @@ -1,8 +1,13 @@ use crate::constants; -use canyon_crud::{crud::Transaction, DatabaseType, DatasourceConfig}; +use canyon_core::canyon::Canyon; +use canyon_core::connection::contracts::DbConnection; +use canyon_core::connection::db_connector::DatabaseConnector; +use canyon_core::transaction::Transaction; +use canyon_crud::{DatabaseType, DatasourceConfig}; use regex::Regex; use std::collections::HashMap; use std::fs; +use std::sync::Mutex; use walkdir::WalkDir; use canyon_entities::register_types::CanyonRegisterEntity; @@ -52,7 +57,7 @@ pub struct CanyonMemory { } // Makes this structure able to make queries to the database -impl Transaction for CanyonMemory {} +impl Transaction for CanyonMemory {} impl CanyonMemory { /// Queries the database to retrieve internal data about the structures @@ -62,11 +67,27 @@ impl CanyonMemory { datasource: &DatasourceConfig, canyon_entities: &[CanyonRegisterEntity<'_>], ) -> Self { + let db_conn = Canyon::instance() + .unwrap_or_else(|_| { + panic!( + "Failure getting db connection: {} on Canyon Memory", + &datasource.name + ) + }) + .get_connection(&datasource.name) + .unwrap_or_else(|_| { + panic!( + "Unable to get a database connection on Canyon Memory: {:?}", + datasource.name + ) + }); + // Creates the memory table if not exists - Self::create_memory(&datasource.name, &datasource.get_db_type()).await; + Self::create_memory(&datasource.name, db_conn, &datasource.get_db_type()).await; // Retrieve the last status data from the `canyon_memory` table - let res = Self::query("SELECT * FROM canyon_memory", [], &datasource.name) + let res = db_conn + .query_rows("SELECT * FROM canyon_memory", &[]) .await .expect("Error querying Canyon Memory"); @@ -126,28 +147,27 @@ impl CanyonMemory { || el.declared_table_name == _struct.declared_table_name }); - if let Some(old) = already_in_db { - if !(old.filepath == _struct.filepath + if let Some(old) = already_in_db + && !(old.filepath == _struct.filepath && old.struct_name == _struct.struct_name && old.declared_table_name == _struct.declared_table_name) - { - updates.push(&old.struct_name); - let stmt = format!( - "UPDATE canyon_memory SET filepath = '{}', struct_name = '{}', declared_table_name = '{}' \ + { + updates.push(&old.struct_name); + let stmt = format!( + "UPDATE canyon_memory SET filepath = '{}', struct_name = '{}', declared_table_name = '{}' \ WHERE id = {}", - _struct.filepath, _struct.struct_name, _struct.declared_table_name, old.id - ); - save_canyon_memory_query(stmt, &datasource.name); + _struct.filepath, _struct.struct_name, _struct.declared_table_name, old.id + ); + save_canyon_memory_query(stmt, &datasource.name); - // if the updated element is the struct name, we add it to the table_rename Hashmap - let rename_table = old.declared_table_name != _struct.declared_table_name; + // if the updated element is the struct name, we add it to the table_rename Hashmap + let rename_table = old.declared_table_name != _struct.declared_table_name; - if rename_table { - mem.renamed_entities.insert( - _struct.declared_table_name.to_string(), // The new one - old.declared_table_name.to_string(), // The old one - ); - } + if rename_table { + mem.renamed_entities.insert( + _struct.declared_table_name.to_string(), // The new one + old.declared_table_name.to_string(), // The old one + ); } } @@ -187,6 +207,7 @@ impl CanyonMemory { &mut self, canyon_entities: &[CanyonRegisterEntity<'_>], ) { + let re = Regex::new(r#"\bstruct\s+(\w+)"#).unwrap(); for file in WalkDir::new("./src") .into_iter() .filter_map(|file| file.ok()) @@ -208,7 +229,6 @@ impl CanyonMemory { canyon_entity_macro_counter += 1; } - let re = Regex::new(r#"\bstruct\s+(\w+)"#).unwrap(); if let Some(captures) = re.captures(line) { struct_name.push_str(captures.get(1).unwrap().as_str()); } @@ -240,7 +260,11 @@ impl CanyonMemory { } /// Generates, if not exists the `canyon_memory` table - async fn create_memory(datasource_name: &str, database_type: &DatabaseType) { + async fn create_memory( + datasource_name: &str, + db_conn: &DatabaseConnector, + database_type: &DatabaseType, + ) { let query = match database_type { #[cfg(feature = "postgres")] DatabaseType::PostgreSql => constants::postgresql_queries::CANYON_MEMORY_TABLE, @@ -248,30 +272,22 @@ impl CanyonMemory { DatabaseType::SqlServer => constants::mssql_queries::CANYON_MEMORY_TABLE, #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!("Memory table in mysql not implemented"), + DatabaseType::Deferred => todo!("Deferred") }; - Self::query(query, [], datasource_name) + Self::query_rows(query, [], db_conn) .await - .expect("Error creating the 'canyon_memory' table"); + .unwrap_or_else(|_| panic!("Error creating the 'canyon_memory' table while processing the datasource: {datasource_name}")); } } fn save_canyon_memory_query(stmt: String, ds_name: &str) { use crate::CM_QUERIES_TO_EXECUTE; - if CM_QUERIES_TO_EXECUTE.lock().unwrap().contains_key(ds_name) { - CM_QUERIES_TO_EXECUTE - .lock() - .unwrap() - .get_mut(ds_name) - .unwrap() - .push(stmt); - } else { - CM_QUERIES_TO_EXECUTE - .lock() - .unwrap() - .insert(ds_name.to_owned(), vec![stmt]); - } + let mutex = CM_QUERIES_TO_EXECUTE.get_or_init(|| Mutex::new(HashMap::new())); + let mut queries = mutex.lock().expect("Mutex poisoned"); + + queries.entry(ds_name.to_owned()).or_default().push(stmt); } /// Represents a single row from the `canyon_memory` table diff --git a/canyon_migrations/src/migrations/processor.rs b/canyon_migrations/src/migrations/processor.rs index 9296689f..1761548d 100644 --- a/canyon_migrations/src/migrations/processor.rs +++ b/canyon_migrations/src/migrations/processor.rs @@ -1,16 +1,18 @@ //! File that contains all the datatypes and logic to perform the migrations //! over a target database -use async_trait::async_trait; +use crate::canyon_crud::DatasourceConfig; +use crate::constants::regex_patterns; +use crate::save_migrations_query_to_execute; +use canyon_core::canyon::Canyon; +use canyon_core::connection::contracts::DbConnection; +use canyon_core::transaction::Transaction; use canyon_crud::DatabaseType; use regex::Regex; use std::collections::HashMap; use std::fmt::Debug; +use std::future::Future; use std::ops::Not; -use crate::canyon_crud::{crud::Transaction, DatasourceConfig}; -use crate::constants::regex_patterns; -use crate::save_migrations_query_to_execute; - use super::information_schema::{ColumnMetadata, TableMetadata}; use super::memory::CanyonMemory; #[cfg(feature = "postgres")] @@ -23,12 +25,16 @@ use canyon_entities::register_types::{CanyonRegisterEntity, CanyonRegisterEntity /// Rust source code managed by Canyon, for successfully make the migrations #[derive(Debug, Default)] pub struct MigrationsProcessor { - operations: Vec>, - set_primary_key_operations: Vec>, - drop_primary_key_operations: Vec>, - constraints_operations: Vec>, + table_operations: Vec, + column_operations: Vec, + set_primary_key_operations: Vec, + drop_primary_key_operations: Vec, + constraints_table_operations: Vec, + constraints_column_operations: Vec, + #[cfg(feature = "postgres")] + constraints_sequence_operations: Vec, } -impl Transaction for MigrationsProcessor {} +impl Transaction for MigrationsProcessor {} impl MigrationsProcessor { pub async fn process<'a>( @@ -66,7 +72,7 @@ impl MigrationsProcessor { db_type, ); - // For each field (column) on the this canyon register entity + // For each field (column) on the canyon register entity for canyon_register_field in canyon_register_entity.entity_fields { let current_column_metadata = MigrationsHelper::get_current_column_metadata( canyon_register_field.field_name.clone(), @@ -106,7 +112,10 @@ impl MigrationsProcessor { } } - for operation in &self.operations { + for operation in &self.table_operations { + operation.generate_sql(datasource).await; // This should be moved again to runtime + } + for operation in &self.column_operations { operation.generate_sql(datasource).await; // This should be moved again to runtime } for operation in &self.drop_primary_key_operations { @@ -115,9 +124,19 @@ impl MigrationsProcessor { for operation in &self.set_primary_key_operations { operation.generate_sql(datasource).await; // This should be moved again to runtime } - for operation in &self.constraints_operations { + for operation in &self.constraints_table_operations { + operation.generate_sql(datasource).await; // This should be moved again to runtime + } + for operation in &self.constraints_column_operations { operation.generate_sql(datasource).await; // This should be moved again to runtime } + + #[cfg(feature = "postgres")] + { + for operation in &self.constraints_sequence_operations { + operation.generate_sql(datasource).await; // This should be moved again to runtime + } + } // TODO Still pending to decouple de executions of cargo check to skip the process if this // code is not processed by cargo build or cargo run // Self::from_query_register(datasource_name).await; @@ -153,19 +172,16 @@ impl MigrationsProcessor { /// Generates a database agnostic query to change the name of a table fn create_table(&mut self, table_name: String, entity_fields: Vec) { - self.operations.push(Box::new(TableOperation::CreateTable( - table_name, - entity_fields, - ))); + self.table_operations + .push(TableOperation::CreateTable(table_name, entity_fields)); } /// Generates a database agnostic query to change the name of a table fn table_rename(&mut self, old_table_name: String, new_table_name: String) { - self.operations - .push(Box::new(TableOperation::AlterTableName( - old_table_name, - new_table_name, - ))); + self.table_operations.push(TableOperation::AlterTableName( + old_table_name, + new_table_name, + )); } // Creates or modify (currently only datatype) a column for a given canyon register entity field @@ -215,40 +231,42 @@ impl MigrationsProcessor { canyon_register_entity_field: CanyonRegisterEntityField, current_column_metadata: Option<&ColumnMetadata>, ) { - // If we do not retrieve data for this database column, it does not exist yet - // and therefore it has to be created - if current_column_metadata.is_none() { + if let Some(current_col_met) = current_column_metadata { + if !MigrationsHelper::is_same_datatype( + db_type, + &canyon_register_entity_field, + current_col_met, + ) { + self.change_column_datatype( + entity_name.to_string(), + canyon_register_entity_field.clone(), + ) + } + } else { + // If we do not retrieve data for this database column, it does not exist yet, + // and therefore it has to be created self.create_column( entity_name.to_string(), canyon_register_entity_field.clone(), ) - } else if !MigrationsHelper::is_same_datatype( - db_type, - &canyon_register_entity_field, - current_column_metadata.unwrap(), - ) { - self.change_column_datatype( - entity_name.to_string(), - canyon_register_entity_field.clone(), - ) } - if let Some(column_metadata) = current_column_metadata { - if canyon_register_entity_field.is_nullable() != column_metadata.is_nullable { - if column_metadata.is_nullable { - self.set_not_null(entity_name.to_string(), canyon_register_entity_field) - } else { - self.drop_not_null(entity_name.to_string(), canyon_register_entity_field) - } + if let Some(column_metadata) = current_column_metadata + && canyon_register_entity_field.is_nullable() != column_metadata.is_nullable + { + if column_metadata.is_nullable { + self.set_not_null(entity_name.to_string(), canyon_register_entity_field) + } else { + self.drop_not_null(entity_name.to_string(), canyon_register_entity_field) } } } fn delete_column(&mut self, table_name: &str, column_name: String) { - self.operations.push(Box::new(ColumnOperation::DeleteColumn( + self.column_operations.push(ColumnOperation::DeleteColumn( table_name.to_string(), column_name, - ))); + )); } #[cfg(feature = "mssql")] @@ -258,38 +276,32 @@ impl MigrationsProcessor { column_name: String, column_datatype: String, ) { - self.operations - .push(Box::new(ColumnOperation::DropNotNullBeforeDropColumn( + self.column_operations + .push(ColumnOperation::DropNotNullBeforeDropColumn( table_name.to_string(), column_name, column_datatype, - ))); + )); } fn create_column(&mut self, table_name: String, field: CanyonRegisterEntityField) { - self.operations - .push(Box::new(ColumnOperation::CreateColumn(table_name, field))); + self.column_operations + .push(ColumnOperation::CreateColumn(table_name, field)); } fn change_column_datatype(&mut self, table_name: String, field: CanyonRegisterEntityField) { - self.operations - .push(Box::new(ColumnOperation::AlterColumnType( - table_name, field, - ))); + self.column_operations + .push(ColumnOperation::AlterColumnType(table_name, field)); } fn set_not_null(&mut self, table_name: String, field: CanyonRegisterEntityField) { - self.operations - .push(Box::new(ColumnOperation::AlterColumnSetNotNull( - table_name, field, - ))); + self.column_operations + .push(ColumnOperation::AlterColumnSetNotNull(table_name, field)); } fn drop_not_null(&mut self, table_name: String, field: CanyonRegisterEntityField) { - self.operations - .push(Box::new(ColumnOperation::AlterColumnDropNotNull( - table_name, field, - ))); + self.column_operations + .push(ColumnOperation::AlterColumnDropNotNull(table_name, field)); } fn add_constraints( @@ -341,14 +353,14 @@ impl MigrationsProcessor { column_to_reference: String, canyon_register_entity_field: &CanyonRegisterEntityField, ) { - self.constraints_operations - .push(Box::new(TableOperation::AddTableForeignKey( + self.constraints_table_operations + .push(TableOperation::AddTableForeignKey( entity_name.to_string(), foreign_key_name, canyon_register_entity_field.field_name.clone(), table_to_reference, column_to_reference, - ))); + )); } fn add_primary_key( @@ -357,25 +369,25 @@ impl MigrationsProcessor { canyon_register_entity_field: CanyonRegisterEntityField, ) { self.set_primary_key_operations - .push(Box::new(TableOperation::AddTablePrimaryKey( + .push(TableOperation::AddTablePrimaryKey( entity_name.to_string(), canyon_register_entity_field, - ))); + )); } #[cfg(feature = "postgres")] fn add_identity(&mut self, entity_name: &str, field: CanyonRegisterEntityField) { - self.constraints_operations - .push(Box::new(ColumnOperation::AlterColumnAddIdentity( + self.constraints_column_operations + .push(ColumnOperation::AlterColumnAddIdentity( entity_name.to_string(), field.clone(), - ))); + )); - self.constraints_operations - .push(Box::new(SequenceOperation::ModifySequence( + self.constraints_sequence_operations + .push(SequenceOperation::ModifySequence( entity_name.to_string(), field, - ))); + )); } fn add_modify_or_remove_constraints( @@ -419,7 +431,7 @@ impl MigrationsProcessor { } } } - // Case when field doesn't contains a primary key annotation, but there is one in the database column + // Case when field doesn't contain a primary key annotation, but there is one in the database column else if !field_is_primary_key && current_column_metadata.primary_key_info.is_some() { Self::drop_primary_key( self, @@ -527,7 +539,7 @@ impl MigrationsProcessor { ) } } else if !field_is_foreign_key && current_column_metadata.foreign_key_name.is_some() { - // Case when field don't contains a foreign key annotation, but there is already one in the database column + // Case when field don't contain a foreign key annotation, but there is already one in the database column Self::delete_foreign_key( self, entity_name, @@ -542,10 +554,10 @@ impl MigrationsProcessor { fn drop_primary_key(&mut self, entity_name: &str, primary_key_name: String) { self.drop_primary_key_operations - .push(Box::new(TableOperation::DeleteTablePrimaryKey( + .push(TableOperation::DeleteTablePrimaryKey( entity_name.to_string(), primary_key_name, - ))); + )); } #[cfg(feature = "postgres")] @@ -554,29 +566,38 @@ impl MigrationsProcessor { entity_name: &str, canyon_register_entity_field: CanyonRegisterEntityField, ) { - self.constraints_operations - .push(Box::new(ColumnOperation::AlterColumnDropIdentity( + self.constraints_column_operations + .push(ColumnOperation::AlterColumnDropIdentity( entity_name.to_string(), canyon_register_entity_field, - ))); + )); } fn delete_foreign_key(&mut self, entity_name: &str, constrain_name: String) { - self.constraints_operations - .push(Box::new(TableOperation::DeleteTableForeignKey( + self.constraints_table_operations + .push(TableOperation::DeleteTableForeignKey( // table_with_foreign_key,constrain_name entity_name.to_string(), constrain_name, - ))); + )); } /// Make the detected migrations for the next Canyon-SQL run - #[allow(clippy::await_holding_lock)] pub async fn from_query_register(queries_to_execute: &HashMap<&str, Vec<&str>>) { for datasource in queries_to_execute.iter() { - for query_to_execute in datasource.1 { - let res = Self::query(query_to_execute, [], datasource.0).await; + let datasource_name = datasource.0; + let db_conn = Canyon::instance() + .expect("Error getting db connection on `from_query_register`") + .get_connection(datasource_name) + .unwrap_or_else(|_| { + panic!( + "Unable to get a database connection on Canyon Memory: {:?}", + datasource_name + ) + }); + for query_to_execute in datasource.1 { + let res = db_conn.query_rows(query_to_execute, &[]).await; match res { Ok(_) => println!( "\t[OK] - {:?} - Query: {:?}", @@ -716,40 +737,8 @@ impl MigrationsHelper { } } -#[cfg(test)] -mod migrations_helper_tests { - use super::*; - use crate::constants; - - const MOCKED_ENTITY_NAME: &str = "league"; - - #[test] - fn test_entity_already_on_database() { - let parse_result_empty_db_tables = - MigrationsHelper::entity_already_on_database(MOCKED_ENTITY_NAME, &[]); - // Always should be false - assert!(!parse_result_empty_db_tables); - - // Rust has a League entity. Database has a `league` entity. Case should be normalized - // and a match must raise - let mocked_league_entity_on_database = MigrationsHelper::entity_already_on_database( - MOCKED_ENTITY_NAME, - &[&constants::mocked_data::TABLE_METADATA_LEAGUE_EX], - ); - assert!(mocked_league_entity_on_database); - - let mocked_league_entity_on_database = MigrationsHelper::entity_already_on_database( - MOCKED_ENTITY_NAME, - &[&constants::mocked_data::NON_MATCHING_TABLE_METADATA], - ); - assert!(!mocked_league_entity_on_database) - } -} - -/// Trait that enables implementors to generate the migration queries -#[async_trait] trait DatabaseOperation: Debug { - async fn generate_sql(&self, datasource: &DatasourceConfig); + fn generate_sql(&self, datasource: &DatasourceConfig) -> impl Future; } /// Helper to relate the operations that Canyon should do when it's managing a schema @@ -769,73 +758,77 @@ enum TableOperation { DeleteTablePrimaryKey(String, String), } -impl Transaction for TableOperation {} +impl Transaction for TableOperation {} -#[async_trait] impl DatabaseOperation for TableOperation { async fn generate_sql(&self, datasource: &DatasourceConfig) { let db_type = datasource.get_db_type(); let stmt = match self { - TableOperation::CreateTable(table_name, table_fields) => { - match db_type { - #[cfg(feature = "postgres")] DatabaseType::PostgreSql => { - format!( - "CREATE TABLE \"{table_name}\" ({});", - table_fields - .iter() - .map(|entity_field| format!( - "\"{}\" {}", - entity_field.field_name, - to_postgres_syntax(entity_field) - )) - .collect::>() - .join(", ") - ) - } - #[cfg(feature = "mssql")] DatabaseType::SqlServer => { - format!( - "CREATE TABLE {:?} ({:?});", - table_name, - table_fields - .iter() - .map(|entity_field| format!( - "{} {}", - entity_field.field_name, - to_sqlserver_syntax(entity_field) - )) - .collect::>() - .join(", ") - ) - .replace('"', "") - }, - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + TableOperation::CreateTable(table_name, table_fields) => match db_type { + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => { + format!( + "CREATE TABLE \"{table_name}\" ({});", + table_fields + .iter() + .map(|entity_field| format!( + "\"{}\" {}", + entity_field.field_name, + to_postgres_syntax(entity_field) + )) + .collect::>() + .join(", ") + ) } - } + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => format!( + "CREATE TABLE {:?} ({:?});", + table_name, + table_fields + .iter() + .map(|entity_field| format!( + "{} {}", + entity_field.field_name, + to_sqlserver_syntax(entity_field) + )) + .collect::>() + .join(", ") + ) + .replace('"', ""), + #[cfg(feature = "mysql")] + DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") + }, TableOperation::AlterTableName(old_table_name, new_table_name) => { match db_type { - #[cfg(feature = "postgres")] DatabaseType::PostgreSql => - format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};"), - #[cfg(feature = "mssql")] DatabaseType::SqlServer => - /* - Notes: Brackets around `old_table_name`, p.e. - exec sp_rename ['league'], 'leagues' // NOT VALID! - is only allowed for compound names split by a dot. - exec sp_rename ['random.league'], 'leagues' // OK - - CARE! This doesn't mean that we are including the schema. - exec sp_rename ['dbo.random.league'], 'leagues' // OK - exec sp_rename 'dbo.league', 'leagues' // OK - Schema doesn't need brackets - - Due to the automatic mapped name from Rust to DB and vice-versa, this won't - be an allowed behaviour for now, only with the table_name parameter on the - CanyonEntity annotation. - */ - format!("exec sp_rename '{old_table_name}', '{new_table_name}';"), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => { + format!("ALTER TABLE {old_table_name} RENAME TO {new_table_name};") + } + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => + /* + Notes: Brackets around `old_table_name`, p.e. + exec sp_rename ['league'], 'leagues' // NOT VALID! + is only allowed for compound names split by a dot. + exec sp_rename ['random.league'], 'leagues' // OK + + CARE! This doesn't mean that we are including the schema. + exec sp_rename ['dbo.random.league'], 'leagues' // OK + exec sp_rename 'dbo.league', 'leagues' // OK - Schema doesn't need brackets + + Due to the automatic mapped name from Rust to DB and vice versa, this won't + be an allowed behaviour for now, only with the table_name parameter on the + CanyonEntity annotation. + */ + { + format!("exec sp_rename '{old_table_name}', '{new_table_name}';") + } + #[cfg(feature = "mysql")] + DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") } } @@ -845,57 +838,65 @@ impl DatabaseOperation for TableOperation { _column_foreign_key, _table_to_reference, _column_to_reference, - ) => { - match db_type { - #[cfg(feature = "postgres")] DatabaseType::PostgreSql => - format!( - "ALTER TABLE {_table_name} ADD CONSTRAINT {_foreign_key_name} \ + ) => match db_type { + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => format!( + "ALTER TABLE {_table_name} ADD CONSTRAINT {_foreign_key_name} \ FOREIGN KEY ({_column_foreign_key}) REFERENCES {_table_to_reference} ({_column_to_reference});" - ), - #[cfg(feature = "mssql")] DatabaseType::SqlServer => - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]"), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + ), + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => { + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } - } + #[cfg(feature = "mysql")] + DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") + }, TableOperation::DeleteTableForeignKey(_table_with_foreign_key, _constraint_name) => { match db_type { - #[cfg(feature = "postgres")] DatabaseType::PostgreSql => - format!( - "ALTER TABLE {_table_with_foreign_key} DROP CONSTRAINT {_constraint_name};", - ), - #[cfg(feature = "mssql")] DatabaseType::SqlServer => - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]"), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => format!( + "ALTER TABLE {_table_with_foreign_key} DROP CONSTRAINT {_constraint_name};", + ), + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => todo!( + "[MS-SQL -> Operation still won't supported by Canyon for Sql Server]" + ), + #[cfg(feature = "mysql")] + DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") } } - TableOperation::AddTablePrimaryKey(_table_name, _entity_field) => { - match db_type { - #[cfg(feature = "postgres")] DatabaseType::PostgreSql => - format!( - "ALTER TABLE \"{_table_name}\" ADD PRIMARY KEY (\"{}\");", - _entity_field.field_name - ), - #[cfg(feature = "mssql")] DatabaseType::SqlServer => - todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]"), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + TableOperation::AddTablePrimaryKey(_table_name, _entity_field) => match db_type { + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => format!( + "ALTER TABLE \"{_table_name}\" ADD PRIMARY KEY (\"{}\");", + _entity_field.field_name + ), + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => { + todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]") } - } - - TableOperation::DeleteTablePrimaryKey(table_name, primary_key_name) => { - match db_type { - #[cfg(feature = "postgres")] DatabaseType::PostgreSql => - format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;"), - #[cfg(feature = "mssql")] DatabaseType::SqlServer => - format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;"), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() + #[cfg(feature = "mysql")] + DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") + }, + TableOperation::DeleteTablePrimaryKey(table_name, primary_key_name) => match db_type { + #[cfg(feature = "postgres")] + DatabaseType::PostgreSql => { + format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") } - } + #[cfg(feature = "mssql")] + DatabaseType::SqlServer => { + format!("ALTER TABLE {table_name} DROP CONSTRAINT {primary_key_name} CASCADE;") + } + #[cfg(feature = "mysql")] + DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") + }, }; save_migrations_query_to_execute(stmt, &datasource.name); @@ -922,9 +923,8 @@ enum ColumnOperation { AlterColumnDropIdentity(String, CanyonRegisterEntityField), } -impl Transaction for ColumnOperation {} +impl Transaction for ColumnOperation {} -#[async_trait] impl DatabaseOperation for ColumnOperation { async fn generate_sql(&self, datasource: &DatasourceConfig) { let db_type = datasource.get_db_type(); @@ -946,8 +946,8 @@ impl DatabaseOperation for ColumnOperation { entity_field.field_name, to_sqlserver_syntax(entity_field) ), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") } ColumnOperation::DeleteColumn(table_name, column_name) => { // TODO Check if operation for SQL server is different @@ -962,8 +962,8 @@ impl DatabaseOperation for ColumnOperation { ), #[cfg(feature = "mssql")] DatabaseType::SqlServer => todo!("[MS-SQL -> Operation still won't supported by Canyon for Sql Server]"), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") } ColumnOperation::AlterColumnDropNotNull(table_name, entity_field) => @@ -975,8 +975,8 @@ impl DatabaseOperation for ColumnOperation { "ALTER TABLE \"{table_name}\" ALTER COLUMN {} {} NULL", entity_field.field_name, to_sqlserver_alter_syntax(entity_field) ), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") } #[cfg(feature = "mssql")] ColumnOperation::DropNotNullBeforeDropColumn(table_name, column_name, column_datatype) => format!( @@ -1003,8 +1003,8 @@ impl DatabaseOperation for ColumnOperation { entity_field.field_name, to_sqlserver_alter_syntax(entity_field) ), - #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!() - + #[cfg(feature = "mysql")] DatabaseType::MySQL => todo!(), + DatabaseType::Deferred => todo!("Deferred") } } @@ -1028,10 +1028,9 @@ enum SequenceOperation { ModifySequence(String, CanyonRegisterEntityField), } #[cfg(feature = "postgres")] -impl Transaction for SequenceOperation {} +impl Transaction for SequenceOperation {} #[cfg(feature = "postgres")] -#[async_trait] impl DatabaseOperation for SequenceOperation { async fn generate_sql(&self, datasource: &DatasourceConfig) { let stmt = match self { @@ -1045,3 +1044,132 @@ impl DatabaseOperation for SequenceOperation { save_migrations_query_to_execute(stmt, &datasource.name); } } + +#[cfg(test)] +mod migrations_helper_tests { + use super::*; + const MOCKED_ENTITY_NAME: &str = "league"; + + #[test] + fn test_entity_already_on_database() { + mocked_data::init_mocked_data(); + + let parse_result_empty_db_tables = + MigrationsHelper::entity_already_on_database(MOCKED_ENTITY_NAME, &[]); + // Always should be false + assert!(!parse_result_empty_db_tables); + + // Rust has a League entity. Database has a `league` entity. Case should be normalized + // and a match must raise + let mocked_league_entity_on_database = MigrationsHelper::entity_already_on_database( + MOCKED_ENTITY_NAME, + &[mocked_data::TABLE_METADATA_LEAGUE_EX.get().unwrap()], + ); + assert!(mocked_league_entity_on_database); + + let mocked_league_entity_on_database = MigrationsHelper::entity_already_on_database( + MOCKED_ENTITY_NAME, + &[mocked_data::NON_MATCHING_TABLE_METADATA.get().unwrap()], + ); + assert!(!mocked_league_entity_on_database) + } + + pub mod mocked_data { + use crate::migrations::information_schema::{ColumnMetadata, TableMetadata}; + use std::sync::OnceLock; + + pub static TABLE_METADATA_LEAGUE_EX: OnceLock = OnceLock::new(); + pub static NON_MATCHING_TABLE_METADATA: OnceLock = OnceLock::new(); + + pub fn init_mocked_data() { + TABLE_METADATA_LEAGUE_EX.get_or_init(|| TableMetadata { + table_name: "league".to_string(), + columns: vec![ + ColumnMetadata { + column_name: "id".to_owned(), + datatype: "int".to_owned(), + character_maximum_length: None, + is_nullable: false, + column_default: None, + foreign_key_info: None, + foreign_key_name: None, + primary_key_info: Some("PK__league__3213E83FBDA92571".to_owned()), + primary_key_name: Some("PK__league__3213E83FBDA92571".to_owned()), + is_identity: false, + identity_generation: None, + }, + ColumnMetadata { + column_name: "ext_id".to_owned(), + datatype: "bigint".to_owned(), + character_maximum_length: None, + is_nullable: false, + column_default: None, + foreign_key_info: None, + foreign_key_name: None, + primary_key_info: None, + primary_key_name: None, + is_identity: false, + identity_generation: None, + }, + ColumnMetadata { + column_name: "slug".to_owned(), + datatype: "nvarchar".to_owned(), + character_maximum_length: None, + is_nullable: false, + column_default: None, + foreign_key_info: None, + foreign_key_name: None, + primary_key_info: None, + primary_key_name: None, + is_identity: false, + identity_generation: None, + }, + ColumnMetadata { + column_name: "name".to_owned(), + datatype: "nvarchar".to_owned(), + character_maximum_length: None, + is_nullable: false, + column_default: None, + foreign_key_info: None, + foreign_key_name: None, + primary_key_info: None, + primary_key_name: None, + is_identity: false, + identity_generation: None, + }, + ColumnMetadata { + column_name: "region".to_owned(), + datatype: "nvarchar".to_owned(), + character_maximum_length: None, + is_nullable: false, + column_default: None, + foreign_key_info: None, + foreign_key_name: None, + primary_key_info: None, + primary_key_name: None, + is_identity: false, + identity_generation: None, + }, + ColumnMetadata { + column_name: "image_url".to_owned(), + datatype: "nvarchar".to_owned(), + character_maximum_length: None, + is_nullable: false, + column_default: None, + foreign_key_info: None, + foreign_key_name: None, + primary_key_info: None, + primary_key_name: None, + is_identity: false, + identity_generation: None, + }, + ], + }); + + NON_MATCHING_TABLE_METADATA.get_or_init(|| TableMetadata { + table_name: "random_name_to_assert_false".to_string(), + columns: vec![], + }); + } + } +} diff --git a/octocat.png b/octocat.png deleted file mode 100644 index f9050b93..00000000 Binary files a/octocat.png and /dev/null differ diff --git a/src/lib.rs b/src/lib.rs index c74efbc5..70c0dbbb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,9 +3,10 @@ /// Here it's where all the available functionalities and features /// reaches the top most level, grouping them and making them visible /// through this crate, building the *public API* of the library -extern crate canyon_connection; +extern crate canyon_core; extern crate canyon_crud; extern crate canyon_macros; + #[cfg(feature = "migrations")] extern crate canyon_migrations; @@ -21,56 +22,54 @@ pub use canyon_macros::main; /// Public API for the `Canyon-SQL` proc-macros, and for the external ones pub mod macros { - pub use canyon_crud::async_trait::*; pub use canyon_macros::*; } /// connection module serves to reexport the public elements of the `canyon_connection` crate, /// exposing them through the public API pub mod connection { - #[cfg(feature = "postgres")] - pub use canyon_connection::canyon_database_connector::DatabaseConnection::Postgres; - - #[cfg(feature = "mssql")] - pub use canyon_connection::canyon_database_connector::DatabaseConnection::SqlServer; + pub use canyon_core::connection::contracts::DbConnection; + pub use canyon_core::connection::database_type::DatabaseType; + pub use canyon_core::connection::db_connector::DatabaseConnector; +} - #[cfg(feature = "mysql")] - pub use canyon_connection::canyon_database_connector::DatabaseConnection::MySQL; +pub mod core { + pub use canyon_core::canyon::Canyon; + pub use canyon_core::mapper::*; + pub use canyon_core::rows::CanyonRows; + pub use canyon_core::transaction::Transaction; } /// Crud module serves to reexport the public elements of the `canyon_crud` crate, /// exposing them through the public API pub mod crud { - pub use canyon_crud::bounds; pub use canyon_crud::crud::*; - pub use canyon_crud::mapper::*; - pub use canyon_crud::rows::CanyonRows; - pub use canyon_crud::DatabaseType; } /// Re-exports the query elements from the `crud`crate pub mod query { - pub use canyon_crud::query_elements::operators; - pub use canyon_crud::query_elements::{query::*, query_builder::*}; + pub use canyon_core::query::bounds; + pub use canyon_core::query::operators; + pub use canyon_core::query::parameters::QueryParameter; + pub use canyon_core::query::*; } /// Reexport the available database clients within Canyon pub mod db_clients { #[cfg(feature = "mysql")] - pub use canyon_connection::mysql_async; + pub use canyon_core::connection::mysql_async; #[cfg(feature = "mssql")] - pub use canyon_connection::tiberius; + pub use canyon_core::connection::tiberius; #[cfg(feature = "postgres")] - pub use canyon_connection::tokio_postgres; + pub use canyon_core::connection::tokio_postgres; } /// Reexport the needed runtime dependencies pub mod runtime { - pub use canyon_connection::futures; - pub use canyon_connection::init_connections_cache; - pub use canyon_connection::tokio; - pub use canyon_connection::tokio_util; - pub use canyon_connection::CANYON_TOKIO_RUNTIME; + pub use canyon_core::connection::futures; + pub use canyon_core::connection::get_canyon_tokio_runtime; + pub use canyon_core::connection::tokio; + pub use canyon_core::connection::tokio_util; } /// Module for reexport the `chrono` crate with the allowed public and available types in Canyon diff --git a/tests/Cargo.toml b/tests/Cargo.toml index ef9ee7f0..16f4462e 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -14,4 +14,5 @@ path = "canyon_integration_tests.rs" [features] postgres = ["canyon_sql/postgres"] mssql = ["canyon_sql/mssql"] -mysql = ["canyon_sql/mysql"] \ No newline at end of file +mysql = ["canyon_sql/mysql"] +migrations = ["canyon_sql/migrations"] diff --git a/tests/canyon_integration_tests.rs b/tests/canyon_integration_tests.rs index 6e61b549..fa51d7f0 100644 --- a/tests/canyon_integration_tests.rs +++ b/tests/canyon_integration_tests.rs @@ -11,6 +11,7 @@ extern crate canyon_sql; use std::error::Error; mod crud; +#[cfg(feature = "migrations")] mod migrations; mod constants; diff --git a/tests/crud/delete_operations.rs b/tests/crud/delete_operations.rs index 5c1f5c1c..c4edd96b 100644 --- a/tests/crud/delete_operations.rs +++ b/tests/crud/delete_operations.rs @@ -23,7 +23,7 @@ use crate::tests_models::league::*; #[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_method_operation() { - // For test the delete, we will insert a new instance of the database, and then, + // For test the delete operation, we will insert a new instance of the database, and then, // after inspect it, we will proceed to delete it let mut new_league: League = League { id: Default::default(), @@ -39,7 +39,7 @@ fn test_crud_delete_method_operation() { assert_eq!( new_league.id, - League::find_by_pk_datasource(&new_league.id, PSQL_DS) + League::find_by_pk_with(&new_league.id, PSQL_DS) .await .expect("Request error") .expect("None value") @@ -55,7 +55,7 @@ fn test_crud_delete_method_operation() { // To check the success, we can query by the primary key value and check if, after unwrap() // the result of the operation, the find by primary key contains Some(v) or None - // Remember that `find_by_primary_key(&dyn QueryParameter<'a>) -> Result>, Err> + // Remember that `find_by_primary_key(&dyn QueryParameter) -> Result>, Err> assert_eq!( League::find_by_pk(&new_league.id) .await @@ -67,7 +67,7 @@ fn test_crud_delete_method_operation() { /// Same as the delete test, but performing the operations with the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_delete_datasource_mssql_method_operation() { +fn test_crud_delete_with_mssql_method_operation() { // For test the delete, we will insert a new instance of the database, and then, // after inspect it, we will proceed to delete it let mut new_league: League = League { @@ -81,12 +81,12 @@ fn test_crud_delete_datasource_mssql_method_operation() { // We insert the instance on the database, on the `League` entity new_league - .insert_datasource(SQL_SERVER_DS) + .insert_with(SQL_SERVER_DS) .await .expect("Failed insert operation"); assert_eq!( new_league.id, - League::find_by_pk_datasource(&new_league.id, SQL_SERVER_DS) + League::find_by_pk_with(&new_league.id, SQL_SERVER_DS) .await .expect("Request error") .expect("None value") @@ -96,15 +96,15 @@ fn test_crud_delete_datasource_mssql_method_operation() { // Now that we have an instance mapped to some entity by a primary key, we can now // remove that entry from the database with the delete operation new_league - .delete_datasource(SQL_SERVER_DS) + .delete_with(SQL_SERVER_DS) .await .expect("Failed to delete the operation"); // To check the success, we can query by the primary key value and check if, after unwrap() // the result of the operation, the find by primary key contains Some(v) or None - // Remember that `find_by_primary_key(&dyn QueryParameter<'a>) -> Result>, Err> + // Remember that `find_by_primary_key(&dyn QueryParameter) -> Result>, Err> assert_eq!( - League::find_by_pk_datasource(&new_league.id, SQL_SERVER_DS) + League::find_by_pk_with(&new_league.id, SQL_SERVER_DS) .await .expect("Unwrapping the result, letting the Option"), None @@ -114,7 +114,7 @@ fn test_crud_delete_datasource_mssql_method_operation() { /// Same as the delete test, but performing the operations with the specified datasource #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_delete_datasource_mysql_method_operation() { +fn test_crud_delete_with_mysql_method_operation() { // For test the delete, we will insert a new instance of the database, and then, // after inspect it, we will proceed to delete it let mut new_league: League = League { @@ -128,12 +128,12 @@ fn test_crud_delete_datasource_mysql_method_operation() { // We insert the instance on the database, on the `League` entity new_league - .insert_datasource(MYSQL_DS) + .insert_with(MYSQL_DS) .await .expect("Failed insert operation"); assert_eq!( new_league.id, - League::find_by_pk_datasource(&new_league.id, MYSQL_DS) + League::find_by_pk_with(&new_league.id, MYSQL_DS) .await .expect("Request error") .expect("None value") @@ -143,15 +143,15 @@ fn test_crud_delete_datasource_mysql_method_operation() { // Now that we have an instance mapped to some entity by a primary key, we can now // remove that entry from the database with the delete operation new_league - .delete_datasource(MYSQL_DS) + .delete_with(MYSQL_DS) .await .expect("Failed to delete the operation"); // To check the success, we can query by the primary key value and check if, after unwrap() // the result of the operation, the find by primary key contains Some(v) or None - // Remember that `find_by_primary_key(&dyn QueryParameter<'a>) -> Result>, Err> + // Remember that `find_by_primary_key(&dyn QueryParameter) -> Result>, Err> assert_eq!( - League::find_by_pk_datasource(&new_league.id, MYSQL_DS) + League::find_by_pk_with(&new_league.id, MYSQL_DS) .await .expect("Unwrapping the result, letting the Option"), None diff --git a/tests/crud/foreign_key_operations.rs b/tests/crud/foreign_key_operations.rs index 87630ad1..099354ff 100644 --- a/tests/crud/foreign_key_operations.rs +++ b/tests/crud/foreign_key_operations.rs @@ -10,7 +10,7 @@ /// For more info: TODO -> Link to the docs of the foreign key chapter use canyon_sql::crud::CrudOperations; -#[cfg(feature = "mssql")] +#[cfg(feature = "mysql")] use crate::constants::MYSQL_DS; #[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; @@ -45,15 +45,15 @@ fn test_crud_search_by_foreign_key() { /// Same as the search by foreign key, but with the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_search_by_foreign_key_datasource_mssql() { - let some_tournament: Tournament = Tournament::find_by_pk_datasource(&10, SQL_SERVER_DS) +fn test_crud_search_by_foreign_key_with_mssql() { + let some_tournament: Tournament = Tournament::find_by_pk_with(&10, SQL_SERVER_DS) .await .expect("Result variant of the query is err") .expect("No result found for the given parameter"); // We can get the parent entity for the retrieved child instance let parent_entity: Option = some_tournament - .search_league_datasource(SQL_SERVER_DS) + .search_league_with(SQL_SERVER_DS) .await .expect("Result variant of the query is err"); @@ -71,15 +71,15 @@ fn test_crud_search_by_foreign_key_datasource_mssql() { /// Same as the search by foreign key, but with the specified datasource #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_search_by_foreign_key_datasource_mysql() { - let some_tournament: Tournament = Tournament::find_by_pk_datasource(&10, MYSQL_DS) +fn test_crud_search_by_foreign_key_with_mysql() { + let some_tournament: Tournament = Tournament::find_by_pk_with(&10, MYSQL_DS) .await .expect("Result variant of the query is err") .expect("No result found for the given parameter"); // We can get the parent entity for the retrieved child instance let parent_entity: Option = some_tournament - .search_league_datasource(MYSQL_DS) + .search_league_with(MYSQL_DS) .await .expect("Result variant of the query is err"); @@ -108,7 +108,7 @@ fn test_crud_search_reverse_side_foreign_key() { .expect("No result found for the given parameter"); // Computes how many tournaments are pointing to the retrieved league - let child_tournaments: Vec = Tournament::search_league_childrens(&some_league) + let child_tournaments = Tournament::search_league_childrens(&some_league) .await .expect("Result variant of the query is err"); @@ -122,15 +122,15 @@ fn test_crud_search_reverse_side_foreign_key() { /// but with the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_search_reverse_side_foreign_key_datasource_mssql() { - let some_league: League = League::find_by_pk_datasource(&1, SQL_SERVER_DS) +fn test_crud_search_reverse_side_foreign_key_with_mssql() { + let some_league: League = League::find_by_pk_with(&1, SQL_SERVER_DS) .await .expect("Result variant of the query is err") .expect("No result found for the given parameter"); // Computes how many tournaments are pointing to the retrieved league let child_tournaments: Vec = - Tournament::search_league_childrens_datasource(&some_league, SQL_SERVER_DS) + Tournament::search_league_childrens_with(&some_league, SQL_SERVER_DS) .await .expect("Result variant of the query is err"); @@ -144,15 +144,15 @@ fn test_crud_search_reverse_side_foreign_key_datasource_mssql() { /// but with the specified datasource #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_search_reverse_side_foreign_key_datasource_mysql() { - let some_league: League = League::find_by_pk_datasource(&1, MYSQL_DS) +fn test_crud_search_reverse_side_foreign_key_with_mysql() { + let some_league: League = League::find_by_pk_with(&1, MYSQL_DS) .await .expect("Result variant of the query is err") .expect("No result found for the given parameter"); // Computes how many tournaments are pointing to the retrieved league let child_tournaments: Vec = - Tournament::search_league_childrens_datasource(&some_league, MYSQL_DS) + Tournament::search_league_childrens_with(&some_league, MYSQL_DS) .await .expect("Result variant of the query is err"); diff --git a/tests/crud/hex_arch_example.rs b/tests/crud/hex_arch_example.rs new file mode 100644 index 00000000..4fb52632 --- /dev/null +++ b/tests/crud/hex_arch_example.rs @@ -0,0 +1,196 @@ +use canyon_sql::connection::DatabaseConnector; +use canyon_sql::core::Canyon; +use canyon_sql::macros::{CanyonCrud, CanyonMapper, canyon_entity}; +use canyon_sql::query::{QueryParameter, querybuilder::SelectQueryBuilder}; +use std::error::Error; + +#[cfg(feature = "postgres")] +#[canyon_sql::macros::canyon_tokio_test] +fn test_hex_arch_ops() { + let default_db_conn = Canyon::instance() + .unwrap() + .get_default_connection() + .unwrap(); + let league_service = LeagueHexServiceAdapter { + league_repository: LeagueHexRepositoryAdapter { + db_conn: default_db_conn, + }, + }; + + let find_all_result = league_service.find_all().await; + + // Connection doesn't return an error + assert!(find_all_result.is_ok()); + let find_all_result = find_all_result.unwrap(); + assert!(!find_all_result.is_empty()); + // If we try to do a call using the adapter, count will use the default datasource, which is locked at this point, + // since we passed the same connection that it will be using here to the repository! + assert_eq!( + LeagueHexRepositoryAdapter::::count() + .await + .unwrap() as usize, + find_all_result.len() + ); +} + +#[cfg(feature = "postgres")] +#[canyon_sql::macros::canyon_tokio_test] +fn test_hex_arch_insert_entity_ops() { + let default_db_conn = Canyon::instance() + .unwrap() + .get_default_connection() + .unwrap(); + let league_service = LeagueHexServiceAdapter { + league_repository: LeagueHexRepositoryAdapter { + db_conn: default_db_conn, + }, + }; + + let mut other_league: LeagueHex = LeagueHex { + id: Default::default(), + ext_id: Default::default(), + slug: "leaguehex-slug".to_string(), + name: "Test LeagueHex on layered".to_string(), + region: "LeagueHex Region".to_string(), + image_url: "http://example.com/image.png".to_string(), + }; + league_service.create(&mut other_league).await.unwrap(); + + let find_new_league = league_service.get(&other_league.id).await.unwrap(); + assert!(find_new_league.is_some()); + assert_eq!( + find_new_league.as_ref().unwrap().name, + String::from("Test LeagueHex on layered") + ); +} + +#[cfg(feature = "postgres")] +#[canyon_sql::macros::canyon_tokio_test] +fn test_hex_arch_update_entity_ops() { + let default_db_conn = Canyon::instance() + .unwrap() + .get_default_connection() + .unwrap(); + let league_service = LeagueHexServiceAdapter { + league_repository: LeagueHexRepositoryAdapter { + db_conn: default_db_conn, + }, + }; + + let mut other_league: LeagueHex = LeagueHex { + id: Default::default(), + ext_id: Default::default(), + slug: "leaguehex-slug".to_string(), + name: "Test LeagueHex on layered".to_string(), + region: "LeagueHex Region".to_string(), + image_url: "http://example.com/image.png".to_string(), + }; + league_service.create(&mut other_league).await.unwrap(); + + let find_new_league = league_service.get(&other_league.id).await.unwrap(); + assert!(find_new_league.is_some()); + assert_eq!( + find_new_league.as_ref().unwrap().name, + String::from("Test LeagueHex on layered") + ); + + let mut updt = find_new_league.unwrap(); + updt.ext_id = 5; + let r = LeagueHexRepositoryAdapter::::update_entity(&updt).await; + assert!(r.is_ok()); + + let updated = league_service.get(&other_league.id).await.unwrap(); + assert_eq!(updated.unwrap().ext_id, 5); +} + +#[derive(CanyonMapper, Debug)] +#[canyon_entity] +pub struct LeagueHex { + // The core model of the 'LeagueHex' domain + #[primary_key] + pub id: i32, + pub ext_id: i64, + pub slug: String, + pub name: String, + pub region: String, + pub image_url: String, +} + +pub trait LeagueHexService { + async fn find_all(&self) -> Result, Box>; + async fn create<'a>( + &self, + league: &'a mut LeagueHex, + ) -> Result<(), Box>; + + async fn get<'a, Pk: QueryParameter>( + &self, + id: &'a Pk, + ) -> Result, Box>; +} // As a domain boundary for the application side of the hexagon + +pub struct LeagueHexServiceAdapter { + league_repository: T, +} +impl LeagueHexService for LeagueHexServiceAdapter { + async fn find_all(&self) -> Result, Box> { + self.league_repository.find_all().await + } + + async fn create<'a>( + &self, + league: &'a mut LeagueHex, + ) -> Result<(), Box> { + self.league_repository.create(league).await + } + + async fn get<'a, Pk: QueryParameter>( + &self, + id: &'a Pk, + ) -> Result, Box> { + self.league_repository.get(id).await + } +} + +pub trait LeagueHexRepository { + async fn find_all(&self) -> Result, Box>; + async fn create<'a>( + &self, + league: &'a mut LeagueHex, + ) -> Result<(), Box>; + + async fn get<'a, Pk: QueryParameter>( + &self, + id: &'a Pk, + ) -> Result, Box>; +} // As a domain boundary for the infrastructure side of the hexagon + +#[derive(CanyonCrud)] +#[canyon_crud(maps_to=LeagueHex)] +#[canyon_entity(table_name = "league")] +pub struct LeagueHexRepositoryAdapter { + // db_conn: &'b T, + db_conn: T, +} +impl LeagueHexRepository for LeagueHexRepositoryAdapter { + async fn find_all(&self) -> Result, Box> { + let db_conn = &self.db_conn; + let select_query = + SelectQueryBuilder::new_for("league", db_conn.get_database_type()?)?.build()?; + db_conn.query(select_query, &[]).await + } + + async fn create<'a>( + &self, + league: &'a mut LeagueHex, + ) -> Result<(), Box> { + Self::insert_entity(league).await + } + + async fn get<'a, Pk: QueryParameter>( + &self, + id: &'a Pk, + ) -> Result, Box> { + Self::find_by_pk(id).await + } +} diff --git a/tests/crud/init_mssql.rs b/tests/crud/init_mssql.rs index 19b08549..ee8bc341 100644 --- a/tests/crud/init_mssql.rs +++ b/tests/crud/init_mssql.rs @@ -4,32 +4,36 @@ use crate::constants::SQL_SERVER_FILL_TABLE_VALUES; use crate::tests_models::league::League; use canyon_sql::crud::CrudOperations; -use canyon_sql::db_clients::tiberius::{Client, Config}; +use canyon_sql::db_clients::tiberius::{Client, Config, EncryptionLevel}; use canyon_sql::runtime::tokio::net::TcpStream; use canyon_sql::runtime::tokio_util::compat::TokioAsyncWriteCompatExt; -/// In order to initialize data on `SqlServer`. we must manually insert it -/// when the docker starts. SqlServer official docker from Microsoft does -/// not allow you to run `.sql` files against the database (not at least, without) -/// using a workaround. So, we are going to query the `SqlServer` to check if already -/// has some data (other processes, persistence or multi-threading envs), af if not, -/// we are going to retrieve the inserted data on the `postgreSQL` at start-up and -/// inserting into the `SqlServer` instance. -/// -/// This will be marked as `#[ignore]`, so we can force to run first the marked as -/// ignored, check the data available, perform the necessary init operations and -/// then *cargo test * the real integration tests +// /// In order to initialize data on `SqlServer`. we must manually insert it +// /// when the docker starts. SqlServer official docker from Microsoft does +// /// not allow you to run `.sql` files against the database (not at least, without) +// /// using a workaround. So, we are going to query the `SqlServer` to check if already +// /// has some data (other processes, persistence or multi-threading envs), af if not, +// /// we are going to retrieve the inserted data on the `postgreSQL` at start-up and +// /// inserting into the `SqlServer` instance. +// /// +// /// This will be marked as `#[ignore]`, so we can force to run first the marked as +// /// ignored, check the data available, perform the necessary init operations and +// /// then *cargo test * the real integration tests #[canyon_sql::macros::canyon_tokio_test] #[ignore] fn initialize_sql_server_docker_instance() { - static CONN_STR: &str = - "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true"; + static CONN_STR: &str = "server=tcp:localhost,1434;User Id=SA;Password=SqlServer-10;TrustServerCertificate=true;Encrypt=true"; canyon_sql::runtime::futures::executor::block_on(async { - let config = Config::from_ado_string(CONN_STR).unwrap(); + let mut config = Config::from_ado_string(CONN_STR).expect("could not parse ado string"); - let tcp = TcpStream::connect(config.get_addr()).await.unwrap(); - let tcp2 = TcpStream::connect(config.get_addr()).await.unwrap(); + config.encryption(EncryptionLevel::NotSupported); + let tcp = TcpStream::connect(config.get_addr()) + .await + .expect("could not connect to stream 1"); + let tcp2 = TcpStream::connect(config.get_addr()) + .await + .expect("could not connect to stream 2"); tcp.set_nodelay(true).ok(); let mut client = Client::connect(config.clone(), tcp.compat_write()) @@ -40,8 +44,8 @@ fn initialize_sql_server_docker_instance() { let query_result = client.query(SQL_SERVER_CREATE_TABLES, &[]).await; assert!(query_result.is_ok()); - let leagues_sql = League::find_all_datasource(SQL_SERVER_DS).await; - println!("LSQL ERR: {leagues_sql:?}"); + let leagues_sql = League::find_all_with(SQL_SERVER_DS).await; + println!("LSqlServer: {leagues_sql:?}"); assert!(leagues_sql.is_ok()); match leagues_sql { diff --git a/tests/crud/insert_operations.rs b/tests/crud/insert_operations.rs index 13e2747e..f6a7d074 100644 --- a/tests/crud/insert_operations.rs +++ b/tests/crud/insert_operations.rs @@ -61,7 +61,7 @@ fn test_crud_insert_operation() { /// the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_insert_datasource_mssql_operation() { +fn test_crud_insert_with_mssql_operation() { let mut new_league: League = League { id: Default::default(), ext_id: 7892635306594_i64, @@ -73,7 +73,7 @@ fn test_crud_insert_datasource_mssql_operation() { // We insert the instance on the database, on the `League` entity new_league - .insert_datasource(SQL_SERVER_DS) + .insert_with(SQL_SERVER_DS) .await .expect("Failed insert datasource operation"); @@ -81,7 +81,7 @@ fn test_crud_insert_datasource_mssql_operation() { // value for the primary key field, which is id. So, we can query the // database again with the find by primary key operation to check if // the value was really inserted - let inserted_league = League::find_by_pk_datasource(&new_league.id, SQL_SERVER_DS) + let inserted_league = League::find_by_pk_with(&new_league.id, SQL_SERVER_DS) .await .expect("Failed the query to the database") .expect("No entity found for the primary key value passed in"); @@ -93,7 +93,7 @@ fn test_crud_insert_datasource_mssql_operation() { /// the specified datasource #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_insert_datasource_mysql_operation() { +fn test_crud_insert_with_mysql_operation() { let mut new_league: League = League { id: Default::default(), ext_id: 7892635306594_i64, @@ -105,7 +105,7 @@ fn test_crud_insert_datasource_mysql_operation() { // We insert the instance on the database, on the `League` entity new_league - .insert_datasource(MYSQL_DS) + .insert_with(MYSQL_DS) .await .expect("Failed insert datasource operation"); @@ -113,205 +113,205 @@ fn test_crud_insert_datasource_mysql_operation() { // value for the primary key field, which is id. So, we can query the // database again with the find by primary key operation to check if // the value was really inserted - let inserted_league = League::find_by_pk_datasource(&new_league.id, MYSQL_DS) + let inserted_league = League::find_by_pk_with(&new_league.id, MYSQL_DS) .await .expect("Failed the query to the database") .expect("No entity found for the primary key value passed in"); assert_eq!(new_league.id, inserted_league.id); } - -/// The multi insert operation is a shorthand for insert multiple instances of *T* -/// in the database at once. -/// -/// It works pretty much the same that the insert operation, with the same behaviour -/// of the `#[primary_key]` annotation over some field. It will auto set the primary -/// key field with the autogenerated value on the database on the insert operation, but -/// for every entity passed in as an array of mutable instances of `T`. -/// -/// The instances without `#[primary_key]` inserts all the values on the instaqce fields -/// on the database. -#[cfg(feature = "postgres")] -#[canyon_sql::macros::canyon_tokio_test] -fn test_crud_multi_insert_operation() { - let mut new_league_mi: League = League { - id: Default::default(), - ext_id: 54376478_i64, - slug: "some-new-random-league".to_string(), - name: "Some New Random League".to_string(), - region: "Unknown".to_string(), - image_url: "https://what-a-league.io".to_string(), - }; - let mut new_league_mi_2: League = League { - id: Default::default(), - ext_id: 3475689769678906_i64, - slug: "new-league-2".to_string(), - name: "New League 2".to_string(), - region: "Really unknown".to_string(), - image_url: "https://what-an-unknown-league.io".to_string(), - }; - let mut new_league_mi_3: League = League { - id: Default::default(), - ext_id: 46756867_i64, - slug: "a-new-multinsert".to_string(), - name: "New League 3".to_string(), - region: "The dark side of the moon".to_string(), - image_url: "https://interplanetary-league.io".to_string(), - }; - - // Insert the instance as database entities - new_league_mi - .insert() - .await - .expect("Failed insert datasource operation"); - new_league_mi_2 - .insert() - .await - .expect("Failed insert datasource operation"); - new_league_mi_3 - .insert() - .await - .expect("Failed insert datasource operation"); - - // Recover the inserted data by primary key - let inserted_league = League::find_by_pk(&new_league_mi.id) - .await - .expect("[1] - Failed the query to the database") - .expect("[1] - No entity found for the primary key value passed in"); - let inserted_league_2 = League::find_by_pk(&new_league_mi_2.id) - .await - .expect("[2] - Failed the query to the database") - .expect("[2] - No entity found for the primary key value passed in"); - let inserted_league_3 = League::find_by_pk(&new_league_mi_3.id) - .await - .expect("[3] - Failed the query to the database") - .expect("[3] - No entity found for the primary key value passed in"); - - assert_eq!(new_league_mi.id, inserted_league.id); - assert_eq!(new_league_mi_2.id, inserted_league_2.id); - assert_eq!(new_league_mi_3.id, inserted_league_3.id); -} - -/// Same as the multi insert above, but with the specified datasource -#[cfg(feature = "mssql")] -#[canyon_sql::macros::canyon_tokio_test] -fn test_crud_multi_insert_datasource_mssql_operation() { - let mut new_league_mi: League = League { - id: Default::default(), - ext_id: 54376478_i64, - slug: "some-new-random-league".to_string(), - name: "Some New Random League".to_string(), - region: "Unknown".to_string(), - image_url: "https://what-a-league.io".to_string(), - }; - let mut new_league_mi_2: League = League { - id: Default::default(), - ext_id: 3475689769678906_i64, - slug: "new-league-2".to_string(), - name: "New League 2".to_string(), - region: "Really unknown".to_string(), - image_url: "https://what-an-unknown-league.io".to_string(), - }; - let mut new_league_mi_3: League = League { - id: Default::default(), - ext_id: 46756867_i64, - slug: "a-new-multinsert".to_string(), - name: "New League 3".to_string(), - region: "The dark side of the moon".to_string(), - image_url: "https://interplanetary-league.io".to_string(), - }; - - // Insert the instance as database entities - new_league_mi - .insert_datasource(SQL_SERVER_DS) - .await - .expect("Failed insert datasource operation"); - new_league_mi_2 - .insert_datasource(SQL_SERVER_DS) - .await - .expect("Failed insert datasource operation"); - new_league_mi_3 - .insert_datasource(SQL_SERVER_DS) - .await - .expect("Failed insert datasource operation"); - - // Recover the inserted data by primary key - let inserted_league = League::find_by_pk_datasource(&new_league_mi.id, SQL_SERVER_DS) - .await - .expect("[1] - Failed the query to the database") - .expect("[1] - No entity found for the primary key value passed in"); - let inserted_league_2 = League::find_by_pk_datasource(&new_league_mi_2.id, SQL_SERVER_DS) - .await - .expect("[2] - Failed the query to the database") - .expect("[2] - No entity found for the primary key value passed in"); - let inserted_league_3 = League::find_by_pk_datasource(&new_league_mi_3.id, SQL_SERVER_DS) - .await - .expect("[3] - Failed the query to the database") - .expect("[3] - No entity found for the primary key value passed in"); - - assert_eq!(new_league_mi.id, inserted_league.id); - assert_eq!(new_league_mi_2.id, inserted_league_2.id); - assert_eq!(new_league_mi_3.id, inserted_league_3.id); -} - -/// Same as the multi insert above, but with the specified datasource -#[cfg(feature = "mysql")] -#[canyon_sql::macros::canyon_tokio_test] -fn test_crud_multi_insert_datasource_mysql_operation() { - let mut new_league_mi: League = League { - id: Default::default(), - ext_id: 54376478_i64, - slug: "some-new-random-league".to_string(), - name: "Some New Random League".to_string(), - region: "Unknown".to_string(), - image_url: "https://what-a-league.io".to_string(), - }; - let mut new_league_mi_2: League = League { - id: Default::default(), - ext_id: 3475689769678906_i64, - slug: "new-league-2".to_string(), - name: "New League 2".to_string(), - region: "Really unknown".to_string(), - image_url: "https://what-an-unknown-league.io".to_string(), - }; - let mut new_league_mi_3: League = League { - id: Default::default(), - ext_id: 46756867_i64, - slug: "a-new-multinsert".to_string(), - name: "New League 3".to_string(), - region: "The dark side of the moon".to_string(), - image_url: "https://interplanetary-league.io".to_string(), - }; - - // Insert the instance as database entities - new_league_mi - .insert_datasource(MYSQL_DS) - .await - .expect("Failed insert datasource operation"); - new_league_mi_2 - .insert_datasource(MYSQL_DS) - .await - .expect("Failed insert datasource operation"); - new_league_mi_3 - .insert_datasource(MYSQL_DS) - .await - .expect("Failed insert datasource operation"); - - // Recover the inserted data by primary key - let inserted_league = League::find_by_pk_datasource(&new_league_mi.id, MYSQL_DS) - .await - .expect("[1] - Failed the query to the database") - .expect("[1] - No entity found for the primary key value passed in"); - let inserted_league_2 = League::find_by_pk_datasource(&new_league_mi_2.id, MYSQL_DS) - .await - .expect("[2] - Failed the query to the database") - .expect("[2] - No entity found for the primary key value passed in"); - let inserted_league_3 = League::find_by_pk_datasource(&new_league_mi_3.id, MYSQL_DS) - .await - .expect("[3] - Failed the query to the database") - .expect("[3] - No entity found for the primary key value passed in"); - - assert_eq!(new_league_mi.id, inserted_league.id); - assert_eq!(new_league_mi_2.id, inserted_league_2.id); - assert_eq!(new_league_mi_3.id, inserted_league_3.id); -} +// +// /// The multi insert operation is a shorthand for insert multiple instances of *T* +// /// in the database at once. +// /// +// /// It works pretty much the same that the insert operation, with the same behaviour +// /// of the `#[primary_key]` annotation over some field. It will auto set the primary +// /// key field with the autogenerated value on the database on the insert operation, but +// /// for every entity passed in as an array of mutable instances of `T`. +// /// +// /// The instances without `#[primary_key]` inserts all the values on the instaqce fields +// /// on the database. +// #[cfg(feature = "postgres")] +// #[canyon_sql::macros::canyon_tokio_test] +// fn test_crud_multi_insert_operation() { +// let mut new_league_mi: League = League { +// id: Default::default(), +// ext_id: 54376478_i64, +// slug: "some-new-random-league".to_string(), +// name: "Some New Random League".to_string(), +// region: "Unknown".to_string(), +// image_url: "https://what-a-league.io".to_string(), +// }; +// let mut new_league_mi_2: League = League { +// id: Default::default(), +// ext_id: 3475689769678906_i64, +// slug: "new-league-2".to_string(), +// name: "New League 2".to_string(), +// region: "Really unknown".to_string(), +// image_url: "https://what-an-unknown-league.io".to_string(), +// }; +// let mut new_league_mi_3: League = League { +// id: Default::default(), +// ext_id: 46756867_i64, +// slug: "a-new-multinsert".to_string(), +// name: "New League 3".to_string(), +// region: "The dark side of the moon".to_string(), +// image_url: "https://interplanetary-league.io".to_string(), +// }; +// +// // Insert the instance as database entities +// new_league_mi +// .insert() +// .await +// .expect("Failed insert datasource operation"); +// new_league_mi_2 +// .insert() +// .await +// .expect("Failed insert datasource operation"); +// new_league_mi_3 +// .insert() +// .await +// .expect("Failed insert datasource operation"); +// +// // Recover the inserted data by primary key +// let inserted_league = League::find_by_pk(&new_league_mi.id) +// .await +// .expect("[1] - Failed the query to the database") +// .expect("[1] - No entity found for the primary key value passed in"); +// let inserted_league_2 = League::find_by_pk(&new_league_mi_2.id) +// .await +// .expect("[2] - Failed the query to the database") +// .expect("[2] - No entity found for the primary key value passed in"); +// let inserted_league_3 = League::find_by_pk(&new_league_mi_3.id) +// .await +// .expect("[3] - Failed the query to the database") +// .expect("[3] - No entity found for the primary key value passed in"); +// +// assert_eq!(new_league_mi.id, inserted_league.id); +// assert_eq!(new_league_mi_2.id, inserted_league_2.id); +// assert_eq!(new_league_mi_3.id, inserted_league_3.id); +// } +// +// /// Same as the multi insert above, but with the specified datasource +// #[cfg(feature = "mssql")] +// #[canyon_sql::macros::canyon_tokio_test] +// fn test_crud_multi_insert_with_mssql_operation() { +// let mut new_league_mi: League = League { +// id: Default::default(), +// ext_id: 54376478_i64, +// slug: "some-new-random-league".to_string(), +// name: "Some New Random League".to_string(), +// region: "Unknown".to_string(), +// image_url: "https://what-a-league.io".to_string(), +// }; +// let mut new_league_mi_2: League = League { +// id: Default::default(), +// ext_id: 3475689769678906_i64, +// slug: "new-league-2".to_string(), +// name: "New League 2".to_string(), +// region: "Really unknown".to_string(), +// image_url: "https://what-an-unknown-league.io".to_string(), +// }; +// let mut new_league_mi_3: League = League { +// id: Default::default(), +// ext_id: 46756867_i64, +// slug: "a-new-multinsert".to_string(), +// name: "New League 3".to_string(), +// region: "The dark side of the moon".to_string(), +// image_url: "https://interplanetary-league.io".to_string(), +// }; +// +// // Insert the instance as database entities +// new_league_mi +// .insert_with(SQL_SERVER_DS) +// .await +// .expect("Failed insert datasource operation"); +// new_league_mi_2 +// .insert_with(SQL_SERVER_DS) +// .await +// .expect("Failed insert datasource operation"); +// new_league_mi_3 +// .insert_with(SQL_SERVER_DS) +// .await +// .expect("Failed insert datasource operation"); +// +// // Recover the inserted data by primary key +// let inserted_league = League::find_by_pk_with(&new_league_mi.id, SQL_SERVER_DS) +// .await +// .expect("[1] - Failed the query to the database") +// .expect("[1] - No entity found for the primary key value passed in"); +// let inserted_league_2 = League::find_by_pk_with(&new_league_mi_2.id, SQL_SERVER_DS) +// .await +// .expect("[2] - Failed the query to the database") +// .expect("[2] - No entity found for the primary key value passed in"); +// let inserted_league_3 = League::find_by_pk_with(&new_league_mi_3.id, SQL_SERVER_DS) +// .await +// .expect("[3] - Failed the query to the database") +// .expect("[3] - No entity found for the primary key value passed in"); +// +// assert_eq!(new_league_mi.id, inserted_league.id); +// assert_eq!(new_league_mi_2.id, inserted_league_2.id); +// assert_eq!(new_league_mi_3.id, inserted_league_3.id); +// } +// +// /// Same as the multi insert above, but with the specified datasource +// #[cfg(feature = "mysql")] +// #[canyon_sql::macros::canyon_tokio_test] +// fn test_crud_multi_insert_with_mysql_operation() { +// let mut new_league_mi: League = League { +// id: Default::default(), +// ext_id: 54376478_i64, +// slug: "some-new-random-league".to_string(), +// name: "Some New Random League".to_string(), +// region: "Unknown".to_string(), +// image_url: "https://what-a-league.io".to_string(), +// }; +// let mut new_league_mi_2: League = League { +// id: Default::default(), +// ext_id: 3475689769678906_i64, +// slug: "new-league-2".to_string(), +// name: "New League 2".to_string(), +// region: "Really unknown".to_string(), +// image_url: "https://what-an-unknown-league.io".to_string(), +// }; +// let mut new_league_mi_3: League = League { +// id: Default::default(), +// ext_id: 46756867_i64, +// slug: "a-new-multinsert".to_string(), +// name: "New League 3".to_string(), +// region: "The dark side of the moon".to_string(), +// image_url: "https://interplanetary-league.io".to_string(), +// }; +// +// // Insert the instance as database entities +// new_league_mi +// .insert_with(MYSQL_DS) +// .await +// .expect("Failed insert datasource operation"); +// new_league_mi_2 +// .insert_with(MYSQL_DS) +// .await +// .expect("Failed insert datasource operation"); +// new_league_mi_3 +// .insert_with(MYSQL_DS) +// .await +// .expect("Failed insert datasource operation"); +// +// // Recover the inserted data by primary key +// let inserted_league = League::find_by_pk_with(&new_league_mi.id, MYSQL_DS) +// .await +// .expect("[1] - Failed the query to the database") +// .expect("[1] - No entity found for the primary key value passed in"); +// let inserted_league_2 = League::find_by_pk_with(&new_league_mi_2.id, MYSQL_DS) +// .await +// .expect("[2] - Failed the query to the database") +// .expect("[2] - No entity found for the primary key value passed in"); +// let inserted_league_3 = League::find_by_pk_with(&new_league_mi_3.id, MYSQL_DS) +// .await +// .expect("[3] - Failed the query to the database") +// .expect("[3] - No entity found for the primary key value passed in"); +// +// assert_eq!(new_league_mi.id, inserted_league.id); +// assert_eq!(new_league_mi_2.id, inserted_league_2.id); +// assert_eq!(new_league_mi_3.id, inserted_league_3.id); +// } diff --git a/tests/crud/mod.rs b/tests/crud/mod.rs index 407e727c..f333a6de 100644 --- a/tests/crud/mod.rs +++ b/tests/crud/mod.rs @@ -1,10 +1,9 @@ -#![allow(unused_imports)] - pub mod delete_operations; pub mod foreign_key_operations; +pub mod hex_arch_example; #[cfg(feature = "mssql")] pub mod init_mssql; pub mod insert_operations; pub mod querybuilder_operations; -pub mod select_operations; +pub mod read_operations; pub mod update_operations; diff --git a/tests/crud/querybuilder_operations.rs b/tests/crud/querybuilder_operations.rs index f2dc8b57..1982e762 100644 --- a/tests/crud/querybuilder_operations.rs +++ b/tests/crud/querybuilder_operations.rs @@ -2,6 +2,15 @@ use crate::constants::MYSQL_DS; #[cfg(feature = "mssql")] use crate::constants::SQL_SERVER_DS; +use canyon_sql::connection::DatabaseType; + +/// Tests for the QueryBuilder available operations within Canyon. +/// +/// QueryBuilder are the way of obtain more flexibility that with +/// the default generated queries, essentially for build the queries +/// with the SQL filters +/// +use canyon_sql::query::operators::{Comp, Like}; /// Tests for the QueryBuilder available operations within Canyon. /// @@ -11,7 +20,7 @@ use crate::constants::SQL_SERVER_DS; /// use canyon_sql::{ crud::CrudOperations, - query::{operators::Comp, operators::Like, ops::QueryBuilder}, + query::querybuilder::{QueryBuilderOps, SelectQueryBuilderOps, UpdateQueryBuilderOps}, }; use crate::tests_models::league::*; @@ -22,21 +31,24 @@ use crate::tests_models::tournament::*; /// with the parameters that modifies the base SQL to SELECT * FROM #[canyon_sql::macros::canyon_tokio_test] fn test_generated_sql_by_the_select_querybuilder() { - let mut select_with_joins = League::select_query(); - select_with_joins - .inner_join("tournament", "league.id", "tournament.league_id") - .left_join("team", "tournament.id", "player.tournament_id") - .r#where(LeagueFieldValue::id(&7), Comp::Gt) - .and(LeagueFieldValue::name(&"KOREA"), Comp::Eq) + let fv = LeagueFieldValue::name("KOREA".to_string()); + let select_with_joins = League::select_query() + .unwrap() + .inner_join( + TournamentTable::DbName, + LeagueField::id, + TournamentField::league, + ) + .left_join(PlayerTable::DbName, TournamentField::id, PlayerField::id) + .r#where(&LeagueFieldValue::id(7), Comp::Gt) + .and(&fv, Comp::Eq) .and_values_in(LeagueField::name, &["LCK", "STRANGER THINGS"]); - // .query() - // .await; // NOTE: We don't have in the docker the generated relationships // with the joins, so for now, we are just going to check that the - // generated SQL by the SelectQueryBuilder is the spected + // generated SQL by the SelectQueryBuilder is the expected assert_eq!( select_with_joins.read_sql(), - "SELECT * FROM league INNER JOIN tournament ON league.id = tournament.league_id LEFT JOIN team ON tournament.id = player.tournament_id WHERE id > $1 AND name = $2 AND name IN ($2, $3)" + "SELECT * FROM league INNER JOIN tournament ON league.id = tournament.league LEFT JOIN player ON tournament.id = player.id WHERE id > $1 AND name = $2 AND name IN ($2, $3)" ) } @@ -47,10 +59,14 @@ fn test_generated_sql_by_the_select_querybuilder() { fn test_crud_find_with_querybuilder() { // Find all the leagues with ID less or equals that 7 // and where it's region column value is equals to 'Korea' + let fv = LeagueFieldValue::region("KOREA".to_string()); let filtered_leagues_result: Result, _> = League::select_query() - .r#where(LeagueFieldValue::id(&50), Comp::LtEq) - .and(LeagueFieldValue::region(&"KOREA"), Comp::Eq) - .query() + .unwrap() + .r#where(&LeagueFieldValue::id(50), Comp::LtEq) + .and(&fv, Comp::Eq) + .build() + .unwrap() + .launch_default() .await; let filtered_leagues: Vec = filtered_leagues_result.unwrap(); @@ -67,8 +83,10 @@ fn test_crud_find_with_querybuilder() { #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_with_querybuilder_and_fulllike() { // Find all the leagues with "LC" in their name - let mut filtered_leagues_result = League::select_query(); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"LC"), Like::Full); + let binding = LeagueFieldValue::name("LEC".to_string()); + let filtered_leagues_result = League::select_query() + .unwrap() + .r#where(&binding, Like::Full); assert_eq!( filtered_leagues_result.read_sql(), @@ -80,10 +98,12 @@ fn test_crud_find_with_querybuilder_and_fulllike() { /// with the parameters that modifies the base SQL to SELECT * FROM #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_and_fulllike_datasource_mssql() { +fn test_crud_find_with_querybuilder_and_fulllike_with_mssql() { // Find all the leagues with "LC" in their name - let mut filtered_leagues_result = League::select_query_datasource(SQL_SERVER_DS); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"LC"), Like::Full); + let fv = LeagueFieldValue::name("LEC".to_string()); + let filtered_leagues_result = League::select_query_with(DatabaseType::SqlServer) + .unwrap() + .r#where(&fv, Like::Full); assert_eq!( filtered_leagues_result.read_sql(), @@ -95,10 +115,12 @@ fn test_crud_find_with_querybuilder_and_fulllike_datasource_mssql() { /// with the parameters that modifies the base SQL to SELECT * FROM #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_and_fulllike_datasource_mysql() { +fn test_crud_find_with_querybuilder_and_fulllike_with_mysql() { // Find all the leagues with "LC" in their name - let mut filtered_leagues_result = League::select_query_datasource(MYSQL_DS); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"LC"), Like::Full); + let fv = LeagueFieldValue::name("LEC".to_string()); + let filtered_leagues_result = League::select_query_with(DatabaseType::MySQL) + .unwrap() + .r#where(&fv, Like::Full); assert_eq!( filtered_leagues_result.read_sql(), @@ -112,8 +134,8 @@ fn test_crud_find_with_querybuilder_and_fulllike_datasource_mysql() { #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_with_querybuilder_and_leftlike() { // Find all the leagues whose name ends with "CK" - let mut filtered_leagues_result = League::select_query(); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"CK"), Like::Left); + let fv = LeagueFieldValue::name("CK".to_string()); + let filtered_leagues_result = League::select_query().unwrap().r#where(&fv, Like::Left); assert_eq!( filtered_leagues_result.read_sql(), @@ -125,10 +147,12 @@ fn test_crud_find_with_querybuilder_and_leftlike() { /// with the parameters that modifies the base SQL to SELECT * FROM #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_and_leftlike_datasource_mssql() { +fn test_crud_find_with_querybuilder_and_leftlike_with_mssql() { // Find all the leagues whose name ends with "CK" - let mut filtered_leagues_result = League::select_query(); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"CK"), Like::Left); + let fv = LeagueFieldValue::name("CK".to_string()); + let filtered_leagues_result = League::select_query_with(DatabaseType::SqlServer) + .unwrap() + .r#where(&fv, Like::Left); assert_eq!( filtered_leagues_result.read_sql(), @@ -140,10 +164,12 @@ fn test_crud_find_with_querybuilder_and_leftlike_datasource_mssql() { /// with the parameters that modifies the base SQL to SELECT * FROM #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_and_leftlike_datasource_mysql() { +fn test_crud_find_with_querybuilder_and_leftlike_with_mysql() { // Find all the leagues whose name ends with "CK" - let mut filtered_leagues_result = League::select_query_datasource(MYSQL_DS); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"CK"), Like::Left); + let fv = LeagueFieldValue::name("CK".to_string()); + let filtered_leagues_result = League::select_query_with(DatabaseType::MySQL) + .unwrap() + .r#where(&fv, Like::Left); assert_eq!( filtered_leagues_result.read_sql(), @@ -157,8 +183,8 @@ fn test_crud_find_with_querybuilder_and_leftlike_datasource_mysql() { #[canyon_sql::macros::canyon_tokio_test] fn test_crud_find_with_querybuilder_and_rightlike() { // Find all the leagues whose name starts with "LC" - let mut filtered_leagues_result = League::select_query(); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"LC"), Like::Right); + let fv = LeagueFieldValue::name("LEC".to_string()); + let filtered_leagues_result = League::select_query().unwrap().r#where(&fv, Like::Right); assert_eq!( filtered_leagues_result.read_sql(), @@ -170,24 +196,29 @@ fn test_crud_find_with_querybuilder_and_rightlike() { /// with the parameters that modifies the base SQL to SELECT * FROM #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_and_rightlike_datasource_mssql() { +fn test_crud_find_with_querybuilder_and_rightlike_with_mssql() { // Find all the leagues whose name starts with "LC" - let mut filtered_leagues_result = League::select_query_datasource(SQL_SERVER_DS); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"LC"), Like::Right); + let fv = LeagueFieldValue::name("LEC".to_string()); + let filtered_leagues_result = League::select_query_with(DatabaseType::SqlServer) + .unwrap() + .r#where(&fv, Like::Right); assert_eq!( filtered_leagues_result.read_sql(), "SELECT * FROM league WHERE name LIKE CONCAT(CAST($1 AS VARCHAR) ,'%')" ) } + /// Builds a new SQL statement for retrieves entities of the `T` type, filtered /// with the parameters that modifies the base SQL to SELECT * FROM #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_and_rightlike_datasource_mysql() { +fn test_crud_find_with_querybuilder_and_rightlike_with_mysql() { // Find all the leagues whose name starts with "LC" - let mut filtered_leagues_result = League::select_query_datasource(MYSQL_DS); - filtered_leagues_result.r#where(LeagueFieldValue::name(&"LC"), Like::Right); + let wh = LeagueFieldValue::name("LEC".to_string()); + let filtered_leagues_result = League::select_query_with(DatabaseType::MySQL) + .unwrap() + .r#where(&wh, Like::Right); assert_eq!( filtered_leagues_result.read_sql(), @@ -198,11 +229,14 @@ fn test_crud_find_with_querybuilder_and_rightlike_datasource_mysql() { /// Same than the above but with the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_datasource_mssql() { - // Find all the players where its ID column value is greater that 50 - let filtered_find_players = Player::select_query_datasource(SQL_SERVER_DS) - .r#where(PlayerFieldValue::id(&50), Comp::Gt) - .query() +fn test_crud_find_with_querybuilder_with_mssql() { + // Find all the players where its ID column value is greater than 50 + let filtered_find_players = Player::select_query_with(DatabaseType::SqlServer) + .unwrap() + .r#where(&PlayerFieldValue::id(50), Comp::Gt) + .build() + .unwrap() + .launch_with::<&str, Player>(SQL_SERVER_DS) .await; assert!(!filtered_find_players.unwrap().is_empty()); @@ -211,11 +245,14 @@ fn test_crud_find_with_querybuilder_datasource_mssql() { /// Same than the above but with the specified datasource #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_with_querybuilder_datasource_mysql() { - // Find all the players where its ID column value is greater that 50 - let filtered_find_players = Player::select_query_datasource(MYSQL_DS) - .r#where(PlayerFieldValue::id(&50), Comp::Gt) - .query() +fn test_crud_find_with_querybuilder_with_mysql() { + // Find all the players where its ID column value is greater than 50 + let filtered_find_players = Player::select_query_with(DatabaseType::MySQL) + .unwrap() + .r#where(&PlayerFieldValue::id(50), Comp::Gt) + .build() + .unwrap() + .launch_with::<&str, Player>(MYSQL_DS) .await; assert!(!filtered_find_players.unwrap().is_empty()); @@ -228,28 +265,25 @@ fn test_crud_find_with_querybuilder_datasource_mysql() { fn test_crud_update_with_querybuilder() { // Find all the leagues with ID less or equals that 7 // and where it's region column value is equals to 'Korea' - let mut q = League::update_query(); - q.set(&[ - (LeagueField::slug, "Updated with the QueryBuilder"), - (LeagueField::name, "Random"), - ]) - .r#where(LeagueFieldValue::id(&1), Comp::Gt) - .and(LeagueFieldValue::id(&8), Comp::Lt); - - /* NOTE: Family of QueryBuilders are clone, useful in case of need to read the generated SQL - let qpr = q.clone(); - println!("PSQL: {:?}", qpr.read_sql()); - */ - - // We can now back to the original an throw the query - q.query() - .await + let q = League::update_query() + .unwrap() + .set(&[ + (LeagueField::slug, "Updated with the QueryBuilder"), + (LeagueField::name, "Random"), + ]) + .r#where(&LeagueFieldValue::id(1), Comp::Gt) + .and(&LeagueFieldValue::id(8), Comp::Lt); + + q.build() .expect("Failed to update records with the querybuilder"); let found_updated_values = League::select_query() - .r#where(LeagueFieldValue::id(&1), Comp::Gt) - .and(LeagueFieldValue::id(&7), Comp::Lt) - .query() + .unwrap() + .r#where(&LeagueFieldValue::id(1), Comp::Gt) + .and(&LeagueFieldValue::id(7), Comp::Lt) + .build() + .unwrap() + .launch_default::() .await .expect("Failed to retrieve database League entries with the querybuilder"); @@ -261,24 +295,29 @@ fn test_crud_update_with_querybuilder() { /// Same as above, but with the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_update_with_querybuilder_datasource_mssql() { +fn test_crud_update_with_querybuilder_with_mssql() { // Find all the leagues with ID less or equals that 7 // and where it's region column value is equals to 'Korea' - let mut q = Player::update_query_datasource(SQL_SERVER_DS); + let q = Player::update_query_with(DatabaseType::SqlServer).unwrap(); q.set(&[ (PlayerField::summoner_name, "Random updated player name"), (PlayerField::first_name, "I am an updated first name"), ]) - .r#where(PlayerFieldValue::id(&1), Comp::Gt) - .and(PlayerFieldValue::id(&8), Comp::Lt) - .query() + .r#where(&PlayerFieldValue::id(1), Comp::Gt) + .and(&PlayerFieldValue::id(8), Comp::Lt) + .build() + .unwrap() + .launch_with::<&str, Player>(SQL_SERVER_DS) .await .expect("Failed to update records with the querybuilder"); - let found_updated_values = Player::select_query_datasource(SQL_SERVER_DS) - .r#where(PlayerFieldValue::id(&1), Comp::Gt) - .and(PlayerFieldValue::id(&7), Comp::LtEq) - .query() + let found_updated_values = Player::select_query_with(DatabaseType::SqlServer) + .unwrap() + .r#where(&PlayerFieldValue::id(1), Comp::Gt) + .and(&PlayerFieldValue::id(7), Comp::LtEq) + .build() + .unwrap() + .launch_with::<&str, Player>(SQL_SERVER_DS) .await .expect("Failed to retrieve database League entries with the querybuilder"); @@ -291,25 +330,30 @@ fn test_crud_update_with_querybuilder_datasource_mssql() { /// Same as above, but with the specified datasource #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_update_with_querybuilder_datasource_mysql() { +fn test_crud_update_with_querybuilder_with_mysql() { // Find all the leagues with ID less or equals that 7 // and where it's region column value is equals to 'Korea' - let mut q = Player::update_query_datasource(MYSQL_DS); + let q = Player::update_query_with(DatabaseType::MySQL).unwrap(); q.set(&[ (PlayerField::summoner_name, "Random updated player name"), (PlayerField::first_name, "I am an updated first name"), ]) - .r#where(PlayerFieldValue::id(&1), Comp::Gt) - .and(PlayerFieldValue::id(&8), Comp::Lt) - .query() + .r#where(&PlayerFieldValue::id(1), Comp::Gt) + .and(&PlayerFieldValue::id(8), Comp::Lt) + .build() + .unwrap() + .launch_with::<&str, Player>(MYSQL_DS) .await .expect("Failed to update records with the querybuilder"); - let found_updated_values = Player::select_query_datasource(MYSQL_DS) - .r#where(PlayerFieldValue::id(&1), Comp::Gt) - .and(PlayerFieldValue::id(&7), Comp::LtEq) - .query() + let found_updated_values = Player::select_query_with(DatabaseType::MySQL) + .unwrap() + .r#where(&PlayerFieldValue::id(1), Comp::Gt) + .and(&PlayerFieldValue::id(7), Comp::LtEq) + .build() + .unwrap() + .launch_with::<&str, Player>(MYSQL_DS) .await .expect("Failed to retrieve database League entries with the querybuilder"); @@ -324,64 +368,97 @@ fn test_crud_update_with_querybuilder_datasource_mysql() { /// /// Note if the database is persisted (not created and destroyed on every docker or /// GitHub Action wake up), it won't delete things that already have been deleted, -/// but this isn't an error. They just don't exists. +/// but this isn't an error. They just don't exist. #[cfg(feature = "postgres")] #[canyon_sql::macros::canyon_tokio_test] fn test_crud_delete_with_querybuilder() { Tournament::delete_query() - .r#where(TournamentFieldValue::id(&14), Comp::Gt) - .and(TournamentFieldValue::id(&16), Comp::Lt) - .query() + .unwrap() + .r#where(&TournamentFieldValue::id(14), Comp::Gt) + .and(&TournamentFieldValue::id(16), Comp::Lt) + .build() + .unwrap() + .launch_default::() .await .expect("Error connecting with the database on the delete operation"); assert_eq!(Tournament::find_by_pk(&15).await.unwrap(), None); } +// #[cfg(feature = "postgres")] +// #[canyon_sql::macros::canyon_tokio_test] +// fn test_crud_delete_with_querybuilder_lt_creation() { +// let q = create_querybuilder_lt(10); +// assert_eq!(q.read_sql(), "DELETE FROM tournament WHERE id = 10"); +// } +// +// #[cfg(feature = "postgres")] +// fn create_querybuilder_lt<'a, 'b: 'a>(id: i32) -> DeleteQueryBuilder<'b> { +// Tournament::delete_query() +// .unwrap() +// .r#where(&TournamentFieldValue::id(id), Comp::Gt) +// } + /// Same as the above delete, but with the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_delete_with_querybuilder_datasource_mssql() { - Player::delete_query_datasource(SQL_SERVER_DS) - .r#where(PlayerFieldValue::id(&120), Comp::Gt) - .and(PlayerFieldValue::id(&130), Comp::Lt) - .query() +fn test_crud_delete_with_querybuilder_with_mssql() { + Player::delete_query_with(DatabaseType::SqlServer) + .unwrap() + .r#where(&PlayerFieldValue::id(120), Comp::Gt) + .and(&PlayerFieldValue::id(130), Comp::Lt) + .build() + .unwrap() + .launch_with::<&str, Player>(SQL_SERVER_DS) .await .expect("Error connecting with the database when we are going to delete data! :)"); - assert!(Player::select_query_datasource(SQL_SERVER_DS) - .r#where(PlayerFieldValue::id(&122), Comp::Eq) - .query() - .await - .unwrap() - .is_empty()); + assert!( + Player::select_query_with(DatabaseType::SqlServer) + .unwrap() + .r#where(&PlayerFieldValue::id(122), Comp::Eq) + .build() + .unwrap() + .launch_with::<&str, Player>(SQL_SERVER_DS) + .await + .unwrap() + .is_empty() + ); } /// Same as the above delete, but with the specified datasource #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_delete_with_querybuilder_datasource_mysql() { - Player::delete_query_datasource(MYSQL_DS) - .r#where(PlayerFieldValue::id(&120), Comp::Gt) - .and(PlayerFieldValue::id(&130), Comp::Lt) - .query() +fn test_crud_delete_with_querybuilder_with_mysql() { + Player::delete_query_with(DatabaseType::MySQL) + .unwrap() + .r#where(&PlayerFieldValue::id(120), Comp::Gt) + .and(&PlayerFieldValue::id(130), Comp::Lt) + .build() + .unwrap() + .launch_with::<&str, Player>(MYSQL_DS) .await .expect("Error connecting with the database when we are going to delete data! :)"); - assert!(Player::select_query_datasource(MYSQL_DS) - .r#where(PlayerFieldValue::id(&122), Comp::Eq) - .query() - .await - .unwrap() - .is_empty()); + assert!( + Player::select_query_with(DatabaseType::MySQL) + .unwrap() + .r#where(&PlayerFieldValue::id(122), Comp::Eq) + .build() + .unwrap() + .launch_with::<&str, Player>(MYSQL_DS) + .await + .unwrap() + .is_empty() + ); } /// Tests for the generated SQL query after use the /// WHERE clause #[canyon_sql::macros::canyon_tokio_test] fn test_where_clause() { - let mut l = League::select_query(); - l.r#where(LeagueFieldValue::name(&"LEC"), Comp::Eq); + let wh = LeagueFieldValue::name("LEC".to_string()); + let l = League::select_query().unwrap().r#where(&wh, Comp::Eq); assert_eq!(l.read_sql(), "SELECT * FROM league WHERE name = $1") } @@ -390,9 +467,11 @@ fn test_where_clause() { /// AND clause #[canyon_sql::macros::canyon_tokio_test] fn test_and_clause() { - let mut l = League::select_query(); - l.r#where(LeagueFieldValue::name(&"LEC"), Comp::Eq) - .and(LeagueFieldValue::id(&10), Comp::LtEq); + let wh = LeagueFieldValue::name("LEC".to_string()); + let l = League::select_query() + .unwrap() + .r#where(&wh, Comp::Eq) + .and(&LeagueFieldValue::id(10), Comp::LtEq); assert_eq!( l.read_sql().trim(), @@ -404,8 +483,10 @@ fn test_and_clause() { /// AND clause #[canyon_sql::macros::canyon_tokio_test] fn test_and_clause_with_in_constraint() { - let mut l = League::select_query(); - l.r#where(LeagueFieldValue::name(&"LEC"), Comp::Eq) + let wh = LeagueFieldValue::name("LEC".to_string()); + let l = League::select_query() + .unwrap() + .r#where(&wh, Comp::Eq) .and_values_in(LeagueField::id, &[1, 7, 10]); assert_eq!( @@ -418,9 +499,11 @@ fn test_and_clause_with_in_constraint() { /// AND clause #[canyon_sql::macros::canyon_tokio_test] fn test_or_clause() { - let mut l = League::select_query(); - l.r#where(LeagueFieldValue::name(&"LEC"), Comp::Eq) - .or(LeagueFieldValue::id(&10), Comp::LtEq); + let wh = LeagueFieldValue::name("LEC".to_string()); + let l = League::select_query() + .unwrap() + .r#where(&wh, Comp::Eq) + .or(&LeagueFieldValue::id(10), Comp::LtEq); assert_eq!( l.read_sql().trim(), @@ -432,8 +515,10 @@ fn test_or_clause() { /// AND clause #[canyon_sql::macros::canyon_tokio_test] fn test_or_clause_with_in_constraint() { - let mut l = League::select_query(); - l.r#where(LeagueFieldValue::name(&"LEC"), Comp::Eq) + let wh = LeagueFieldValue::name("LEC".to_string()); + let l = League::select_query() + .unwrap() + .r#where(&wh, Comp::Eq) .or_values_in(LeagueField::id, &[1, 7, 10]); assert_eq!( @@ -446,8 +531,10 @@ fn test_or_clause_with_in_constraint() { /// AND clause #[canyon_sql::macros::canyon_tokio_test] fn test_order_by_clause() { - let mut l = League::select_query(); - l.r#where(LeagueFieldValue::name(&"LEC"), Comp::Eq) + let fv = LeagueFieldValue::name("LEC".to_string()); + let l = League::select_query() + .unwrap() + .r#where(&fv, Comp::Eq) .order_by(LeagueField::id, false); assert_eq!( diff --git a/tests/crud/select_operations.rs b/tests/crud/read_operations.rs similarity index 76% rename from tests/crud/select_operations.rs rename to tests/crud/read_operations.rs index f3342c02..a711b1b4 100644 --- a/tests/crud/select_operations.rs +++ b/tests/crud/read_operations.rs @@ -31,47 +31,30 @@ fn test_crud_find_all() { assert!(!find_all_players.unwrap().is_empty()); } -/// Same as the `find_all()`, but with the unchecked variant, which directly returns `Vec` not -/// `Result` wrapped -#[cfg(feature = "postgres")] -#[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_all_unchecked() { - let find_all_result: Vec = League::find_all_unchecked().await; - assert!(!find_all_result.is_empty()); -} - /// Tests the behaviour of a SELECT * FROM {table_name} within Canyon, through the /// `::find_all()` associated function derived with the `CanyonCrud` derive proc-macro /// and using the specified datasource #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_all_datasource_mssql() { +fn test_crud_find_all_with_mssql() { let find_all_result: Result, Box> = - League::find_all_datasource(SQL_SERVER_DS).await; + League::find_all_with(SQL_SERVER_DS).await; // Connection doesn't return an error - assert!(!find_all_result.is_err()); + assert!(!find_all_result.is_err(), "{:?}", find_all_result); assert!(!find_all_result.unwrap().is_empty()); } #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_all_datasource_mysql() { +fn test_crud_find_all_with_mysql() { let find_all_result: Result, Box> = - League::find_all_datasource(MYSQL_DS).await; + League::find_all_with(MYSQL_DS).await; + // Connection doesn't return an error assert!(!find_all_result.is_err()); assert!(!find_all_result.unwrap().is_empty()); } -/// Same as the `find_all_datasource()`, but with the unchecked variant and the specified dataosource, -/// returning directly `Vec` and not `Result, Err>` -#[cfg(feature = "mssql")] -#[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_all_unchecked_datasource() { - let find_all_result: Vec = League::find_all_unchecked_datasource(SQL_SERVER_DS).await; - assert!(!find_all_result.is_empty()); -} - /// Tests the behaviour of a SELECT * FROM {table_name} WHERE = , where the pk is /// defined with the #[primary_key] attribute over some field of the type. /// @@ -101,9 +84,9 @@ fn test_crud_find_by_pk() { /// Uses the *specified datasource mssql* in the second parameter of the function call. #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_by_pk_datasource_mssql() { +fn test_crud_find_by_pk_with_mssql() { let find_by_pk_result: Result, Box> = - League::find_by_pk_datasource(&27, SQL_SERVER_DS).await; + League::find_by_pk_with(&27, SQL_SERVER_DS).await; assert!(find_by_pk_result.as_ref().unwrap().is_some()); let some_league = find_by_pk_result.unwrap().unwrap(); @@ -124,9 +107,9 @@ fn test_crud_find_by_pk_datasource_mssql() { /// Uses the *specified datasource mysql* in the second parameter of the function call. #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_find_by_pk_datasource_mysql() { +fn test_crud_find_by_pk_with_mysql() { let find_by_pk_result: Result, Box> = - League::find_by_pk_datasource(&27, MYSQL_DS).await; + League::find_by_pk_with(&27, MYSQL_DS).await; assert!(find_by_pk_result.as_ref().unwrap().is_some()); let some_league = find_by_pk_result.unwrap().unwrap(); @@ -155,13 +138,10 @@ fn test_crud_count_operation() { /// the specified datasource mssql #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_count_datasource_operation_mssql() { +fn test_crud_count_with_operation_mssql() { assert_eq!( - League::find_all_datasource(SQL_SERVER_DS) - .await - .unwrap() - .len() as i64, - League::count_datasource(SQL_SERVER_DS).await.unwrap() + League::find_all_with(SQL_SERVER_DS).await.unwrap().len() as i64, + League::count_with(SQL_SERVER_DS).await.unwrap() ); } @@ -169,9 +149,9 @@ fn test_crud_count_datasource_operation_mssql() { /// the specified datasource mysql #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_count_datasource_operation_mysql() { +fn test_crud_count_with_operation_mysql() { assert_eq!( - League::find_all_datasource(MYSQL_DS).await.unwrap().len() as i64, - League::count_datasource(MYSQL_DS).await.unwrap() + League::find_all_with(MYSQL_DS).await.unwrap().len() as i64, + League::count_with(MYSQL_DS).await.unwrap() ); } diff --git a/tests/crud/update_operations.rs b/tests/crud/update_operations.rs index dfc4af15..2f291884 100644 --- a/tests/crud/update_operations.rs +++ b/tests/crud/update_operations.rs @@ -12,7 +12,7 @@ use crate::constants::SQL_SERVER_DS; /// some change to a Rust's entity instance, and persisting them into the database. /// /// The `t.update(&self)` operation is only enabled for types that -/// has, at least, one of it's fields annotated with a `#[primary_key]` +/// has, at least, one of its fields annotated with a `#[primary_key]` /// operation, because we use that concrete field to construct the clause that targets /// that entity. /// @@ -30,7 +30,7 @@ fn test_crud_update_method_operation() { // The ext_id field value is extracted from the sql scripts under the // docker/sql folder. We are retrieving the first entity inserted at the - // wake up time of the database, and now checking some of its properties. + // wake-up time of the database, and now checking some of its properties. assert_eq!(updt_candidate.ext_id, 100695891328981122_i64); // Modify the value, and perform the update @@ -49,52 +49,52 @@ fn test_crud_update_method_operation() { assert_eq!(updt_entity.ext_id, updt_value); - // We rollback the changes to the initial value to don't broke other tests + // We roll back the changes to the initial value to don't broke other tests // the next time that will run updt_candidate.ext_id = 100695891328981122_i64; updt_candidate .update() .await - .expect("Failed the restablish initial value update operation"); + .expect("Failed to restore the initial value in the psql update operation"); } /// Same as the above test, but with the specified datasource. #[cfg(feature = "mssql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_update_datasource_mssql_method_operation() { +fn test_crud_update_with_mssql_method_operation() { // We first retrieve some entity from the database. Note that we must make // the retrieved instance mutable of clone it to a new mutable resource - let mut updt_candidate: League = League::find_by_pk_datasource(&1, SQL_SERVER_DS) + let mut updt_candidate: League = League::find_by_pk_with(&1, SQL_SERVER_DS) .await .expect("[1] - Failed the query to the database") .expect("[1] - No entity found for the primary key value passed in"); // The ext_id field value is extracted from the sql scripts under the // docker/sql folder. We are retrieving the first entity inserted at the - // wake up time of the database, and now checking some of its properties. + // wake-up time of the database, and now checking some of its properties. assert_eq!(updt_candidate.ext_id, 100695891328981122_i64); // Modify the value, and perform the update let updt_value: i64 = 59306442534_i64; updt_candidate.ext_id = updt_value; updt_candidate - .update_datasource(SQL_SERVER_DS) + .update_with(SQL_SERVER_DS) .await .expect("Failed the update operation"); // Retrieve it again, and check if the value was really updated - let updt_entity: League = League::find_by_pk_datasource(&1, SQL_SERVER_DS) + let updt_entity: League = League::find_by_pk_with(&1, SQL_SERVER_DS) .await .expect("[2] - Failed the query to the database") .expect("[2] - No entity found for the primary key value passed in"); assert_eq!(updt_entity.ext_id, updt_value); - // We rollback the changes to the initial value to don't broke other tests + // We roll back the changes to the initial value to don't broke other tests // the next time that will run updt_candidate.ext_id = 100695891328981122_i64; updt_candidate - .update_datasource(SQL_SERVER_DS) + .update_with(SQL_SERVER_DS) .await .expect("Failed to restablish the initial value update operation"); } @@ -102,41 +102,41 @@ fn test_crud_update_datasource_mssql_method_operation() { /// Same as the above test, but with the specified datasource. #[cfg(feature = "mysql")] #[canyon_sql::macros::canyon_tokio_test] -fn test_crud_update_datasource_mysql_method_operation() { +fn test_crud_update_with_mysql_method_operation() { // We first retrieve some entity from the database. Note that we must make // the retrieved instance mutable of clone it to a new mutable resource - let mut updt_candidate: League = League::find_by_pk_datasource(&1, MYSQL_DS) + let mut updt_candidate: League = League::find_by_pk_with(&1, MYSQL_DS) .await .expect("[1] - Failed the query to the database") .expect("[1] - No entity found for the primary key value passed in"); // The ext_id field value is extracted from the sql scripts under the // docker/sql folder. We are retrieving the first entity inserted at the - // wake up time of the database, and now checking some of its properties. + // wake-up time of the database, and now checking some of its properties. assert_eq!(updt_candidate.ext_id, 100695891328981122_i64); // Modify the value, and perform the update let updt_value: i64 = 59306442534_i64; updt_candidate.ext_id = updt_value; updt_candidate - .update_datasource(MYSQL_DS) + .update_with(MYSQL_DS) .await .expect("Failed the update operation"); // Retrieve it again, and check if the value was really updated - let updt_entity: League = League::find_by_pk_datasource(&1, MYSQL_DS) + let updt_entity: League = League::find_by_pk_with(&1, MYSQL_DS) .await .expect("[2] - Failed the query to the database") .expect("[2] - No entity found for the primary key value passed in"); assert_eq!(updt_entity.ext_id, updt_value); - // We rollback the changes to the initial value to don't broke other tests + // We roll back the changes to the initial value to don't broke other tests // the next time that will run updt_candidate.ext_id = 100695891328981122_i64; updt_candidate - .update_datasource(MYSQL_DS) + .update_with(MYSQL_DS) .await .expect("Failed to restablish the initial value update operation"); } diff --git a/tests/migrations/mod.rs b/tests/migrations/mod.rs index b0fbed96..957c0a5d 100644 --- a/tests/migrations/mod.rs +++ b/tests/migrations/mod.rs @@ -1,15 +1,34 @@ #![allow(unused_imports)] + use crate::constants; +use canyon_sql::connection::DbConnection; +use canyon_sql::core::Canyon; /// Integration tests for the migrations feature of `Canyon-SQL` -use canyon_sql::crud::Transaction; -#[cfg(feature = "migrations")] +use canyon_sql::core::Transaction; use canyon_sql::migrations::handler::Migrations; +use std::ops::DerefMut; /// Brings the information of the `PostgreSQL` requested schema #[cfg(all(feature = "postgres", feature = "migrations"))] #[canyon_sql::macros::canyon_tokio_test] fn test_migrations_postgresql_status_query() { - let results = Migrations::query(constants::FETCH_PUBLIC_SCHEMA, [], constants::PSQL_DS).await; + let canyon = Canyon::instance().unwrap(); + + let ds = canyon.find_datasource_by_name_or_default(constants::PSQL_DS); + assert!(ds.is_ok()); + let ds = ds.unwrap(); + let ds_name = &ds.name; + + let db_conn = canyon.get_connection(ds_name).unwrap_or_else(|_| { + panic!( + "Unable to get a database connection on Canyon Memory: {:?}", + ds_name + ) + }); + + let results = db_conn + .query_rows(constants::FETCH_PUBLIC_SCHEMA, &[]) + .await; assert!(results.is_ok()); let res = results.unwrap(); diff --git a/tests/simple_canyon.toml b/tests/simple_canyon.toml new file mode 100644 index 00000000..a5536b6e --- /dev/null +++ b/tests/simple_canyon.toml @@ -0,0 +1,12 @@ +[canyon_sql] + +[[canyon_sql.datasources]] +name = 'postgres_docker' + +[canyon_sql.datasources.auth] +postgresql = { basic = { username = 'postgres', password = 'postgres'}} + +[canyon_sql.datasources.properties] +host = 'localhost' +port = 5438 +db_name = 'postgres' \ No newline at end of file diff --git a/tests/tests_models/league.rs b/tests/tests_models/league.rs index 3f3037e7..b1503117 100644 --- a/tests/tests_models/league.rs +++ b/tests/tests_models/league.rs @@ -1,8 +1,7 @@ use canyon_sql::macros::*; #[derive(Debug, Fields, CanyonCrud, CanyonMapper, ForeignKeyable, Eq, PartialEq)] -// #[canyon_entity(table_name = "league", schema = "public")] -#[canyon_entity(table_name = "league")] +#[canyon_entity(table_name = "league", /* schema = "public"*/)] pub struct League { #[primary_key] id: i32, diff --git a/tests/tests_models/player.rs b/tests/tests_models/player.rs index 59c03daa..3bdc251e 100644 --- a/tests/tests_models/player.rs +++ b/tests/tests_models/player.rs @@ -9,11 +9,11 @@ use canyon_sql::macros::*; /// Note that this entity has a primary key declared in the database, but we will /// omit this in Canyon, so for us, is like if the primary key wasn't set up. /// -/// Remember that the entities that does not declares at least a field as `#[primary_key]` +/// Remember that the entities that does not declare at least a field as `#[primary_key]` /// does not have all the CRUD operations available, only the ones that doesn't -/// requires of a primary key. +/// require of a primary key. pub struct Player { - // #[primary_key] We will omit this to use it as a mock of entities that doesn't declares primary key + // #[primary_key] // We will omit this to use it as a mock of entities that doesn't declare primary key id: i32, ext_id: i64, first_name: String,