diff --git a/drift/CHANGELOG.md b/drift/CHANGELOG.md new file mode 100644 index 00000000..21f89afb --- /dev/null +++ b/drift/CHANGELOG.md @@ -0,0 +1,7 @@ +## 4.6.0 + +- Add `DoUpdate.withExcluded` to refer to the excluded row in an upsert clause. +- Add optional `where` clause to `DoUpdate` constructors + +This is the initial release of the `drift` package (formally known as `moor`). +For an overview of old `moor` releases, see its [changelog](https://pub.dev/packages/moor/changelog). diff --git a/drift/lib/backends.dart b/drift/lib/backends.dart new file mode 100644 index 00000000..7fa5cc90 --- /dev/null +++ b/drift/lib/backends.dart @@ -0,0 +1,9 @@ +/// Utility classes to implement custom database backends that work together +/// with drift. +library backends; + +export 'src/runtime/executor/executor.dart'; +export 'src/runtime/executor/helpers/delegates.dart'; +export 'src/runtime/executor/helpers/engines.dart'; +export 'src/runtime/executor/helpers/results.dart'; +export 'src/runtime/query_builder/query_builder.dart' show SqlDialect; diff --git a/drift/lib/drift.dart b/drift/lib/drift.dart new file mode 100644 index 00000000..df73c062 --- /dev/null +++ b/drift/lib/drift.dart @@ -0,0 +1,21 @@ +library drift; + +// needed for the generated code that generates data classes with an Uint8List +// field. +export 'dart:typed_data' show Uint8List; + +// needed for generated code which provides an @required parameter hint where +// appropriate +export 'package:meta/meta.dart' show required; +export 'src/dsl/dsl.dart'; +export 'src/runtime/api/runtime_api.dart'; +export 'src/runtime/custom_result_set.dart'; +export 'src/runtime/data_class.dart'; +export 'src/runtime/data_verification.dart'; +export 'src/runtime/exceptions.dart'; +export 'src/runtime/executor/connection_pool.dart'; +export 'src/runtime/executor/executor.dart'; +export 'src/runtime/query_builder/query_builder.dart'; +export 'src/runtime/types/sql_types.dart'; +export 'src/utils/expand_variables.dart'; +export 'src/utils/lazy_database.dart'; diff --git a/drift/lib/extensions/json1.dart b/drift/lib/extensions/json1.dart new file mode 100644 index 00000000..5a7600d8 --- /dev/null +++ b/drift/lib/extensions/json1.dart @@ -0,0 +1,56 @@ +/// Experimental bindings to the [json1](https://www.sqlite.org/json1.html) +/// sqlite extension. +/// +/// Note that the json1 extension might not be available on all runtimes. +/// When using this library, it is recommended to use a `NativeDatabase` with +/// a dependency on `sqlite3_flutter_libs`. +@experimental +library json1; + +import 'package:meta/meta.dart'; +import '../drift.dart'; + +/// Defines extensions on string expressions to support the json1 api from Dart. +extension JsonExtensions on Expression { + /// Assuming that this string is a json array, returns the length of this json + /// array. + /// + /// The [path] parameter is optional. If it's set, it must refer to a valid + /// path in this json that will be used instead of `this`. See the + /// [sqlite documentation](https://www.sqlite.org/json1.html#path_arguments) + /// for details. If [path] is an invalid path, this expression can cause an + /// error when run by sqlite. + /// + /// For this method to be valid, `this` must be a string representing a valid + /// json array. Otherwise, sqlite will report an error when attempting to + /// evaluate this expression. + /// + /// See also: + /// - the [sqlite documentation for this function](https://www.sqlite.org/json1.html#the_json_array_length_function) + Expression jsonArrayLength([String? path]) { + return FunctionCallExpression('json_array_length', [ + this, + if (path != null) Variable.withString(path), + ]); + } + + /// Assuming that this string is a json object or array, extracts a part of + /// this structure identified by [path]. + /// + /// For more details on how to format the [path] argument, see the + /// [sqlite documentation](https://www.sqlite.org/json1.html#path_arguments). + /// + /// Evaluating this expression will cause an error if [path] has an invalid + /// format or `this` isn't well-formatted json. + /// + /// Note that the [T] type parameter has to be set if this function is used + /// in [JoinedSelectStatement.addColumns] or compared via [Expression.equals]. + /// The [T] parameter denotes the mapped Dart type for this expression, + /// such as [String]. + Expression jsonExtract(String path) { + return FunctionCallExpression('json_extract', [ + this, + Variable.withString(path), + ]).dartCast(); + } +} diff --git a/drift/lib/extensions/moor_ffi.dart b/drift/lib/extensions/moor_ffi.dart new file mode 100644 index 00000000..169b303b --- /dev/null +++ b/drift/lib/extensions/moor_ffi.dart @@ -0,0 +1,114 @@ +/// High-level bindings to mathematical functions that are only available in +/// a `NativeDatabase`. +library drift.ffi.functions; + +import 'dart:math'; + +import '../drift.dart'; + +/// Raises [base] to the power of [exponent]. +/// +/// This function is equivalent to [pow], except that it evaluates to null +/// instead of `NaN`. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlPow(Expression base, Expression exponent) { + return FunctionCallExpression('pow', [base, exponent]); +} + +/// Calculates the square root of [value] in sql. +/// +/// This function is equivalent to [sqrt], except that it returns null instead +/// of `NaN` for negative values. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlSqrt(Expression value) { + return FunctionCallExpression('sqrt', [value]); +} + +/// Calculates the sine of [value] in sql. +/// +/// This function is equivalent to [sin]. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlSin(Expression value) { + return FunctionCallExpression('sin', [value]); +} + +/// Calculates the cosine of [value] in sql. +/// +/// This function is equivalent to [sin]. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlCos(Expression value) { + return FunctionCallExpression('cos', [value]); +} + +/// Calculates the tangent of [value] in sql. +/// +/// This function is equivalent to [tan]. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlTan(Expression value) { + return FunctionCallExpression('tan', [value]); +} + +/// Calculates the arc sine of [value] in sql. +/// +/// This function is equivalent to [asin], except that it evaluates to null +/// instead of `NaN`. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlAsin(Expression value) { + return FunctionCallExpression('asin', [value]); +} + +/// Calculates the cosine of [value] in sql. +/// +/// This function is equivalent to [acos], except that it evaluates to null +/// instead of `NaN`. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlAcos(Expression value) { + return FunctionCallExpression('acos', [value]); +} + +/// Calculates the tangent of [value] in sql. +/// +/// This function is equivalent to [atan], except that it evaluates to null +/// instead of `NaN`. +/// +/// This function is only available when using `moor_ffi`. +Expression sqlAtan(Expression value) { + return FunctionCallExpression('atan', [value]); +} + +/// Adds functionality to string expressions that only work when using +/// `moor_ffi`. +extension MoorFfiSpecificStringExtensions on Expression { + /// Version of `contains` that allows controlling case sensitivity better. + /// + /// The default `contains` method uses sqlite's `LIKE`, which is case- + /// insensitive for the English alphabet only. [containsCase] is implemented + /// in Dart with better support for casing. + /// When [caseSensitive] is false (the default), this is equivalent to the + /// Dart expression `this.contains(substring)`, where `this` is the string + /// value this expression evaluates to. + /// When [caseSensitive] is true, the equivalent Dart expression would be + /// `this.toLowerCase().contains(substring.toLowerCase())`. + /// + /// Note that, while Dart has better support for an international alphabet, + /// it can still yield unexpected results like the + /// [Turkish İ Problem](https://haacked.com/archive/2012/07/05/turkish-i-problem-and-why-you-should-care.aspx/) + /// + /// Note that this is only available when using `moor_ffi` version 0.6.0 or + /// greater. + Expression containsCase(String substring, + {bool caseSensitive = false}) { + return FunctionCallExpression('moor_contains', [ + this, + Variable(substring), + if (caseSensitive) const Constant(1) else const Constant(0), + ]); + } +} diff --git a/drift/lib/ffi.dart b/drift/lib/ffi.dart new file mode 100644 index 00000000..582d9bfd --- /dev/null +++ b/drift/lib/ffi.dart @@ -0,0 +1,13 @@ +/// Moor implementation using `package:sqlite3/`. +/// +/// When using a [NativeDatabase], you need to ensure that `sqlite3` is +/// available when running your app. For mobile Flutter apps, you can simply +/// depend on the `sqlite3_flutter_libs` package to ship the latest sqlite3 +/// version with your app. +/// For more information other platforms, see [other engines](https://drift.simonbinder.eu/docs/other-engines/vm/). +library moor.ffi; + +import 'src/ffi/database.dart'; + +export 'package:sqlite3/sqlite3.dart' show SqliteException; +export 'src/ffi/database.dart'; diff --git a/drift/lib/isolate.dart b/drift/lib/isolate.dart new file mode 100644 index 00000000..d19d96e0 --- /dev/null +++ b/drift/lib/isolate.dart @@ -0,0 +1,131 @@ +/// Contains utils to run moor databases in a background isolate. This API is +/// not supported on the web. +library isolate; + +import 'dart:isolate'; + +import 'package:stream_channel/stream_channel.dart'; + +import 'drift.dart'; +import 'remote.dart'; +import 'src/isolate.dart'; + +/// Signature of a function that opens a database connection. +typedef DatabaseOpener = DatabaseConnection Function(); + +/// Defines utilities to run moor in a background isolate. In the operation mode +/// created by these utilities, there's a single background isolate doing all +/// the work. Any other isolate can use the [connect] method to obtain an +/// instance of a [GeneratedDatabase] class that will delegate its work onto a +/// background isolate. Auto-updating queries, and transactions work across +/// isolates, and the user facing api is exactly the same. +/// +/// Please note that, while running moor in a background isolate can reduce +/// latency in foreground isolates (thus reducing UI lags), the overall +/// performance is going to be much worse as data has to be serialized and +/// deserialized to be sent over isolates. +/// Also, be aware that this api is not available on the web. +/// +/// See also: +/// - [Isolate], for general information on multi threading in Dart. +/// - The [detailed documentation](https://moor.simonbinder.eu/docs/advanced-features/isolates), +/// which provides example codes on how to use this api. +class DriftIsolate { + /// The underlying port used to establish a connection with this + /// [DriftIsolate]. + /// + /// This [SendPort] can safely be sent over isolates. The receiving isolate + /// can reconstruct a [DriftIsolate] by using [DriftIsolate.fromConnectPort]. + final SendPort connectPort; + + /// Creates a [DriftIsolate] talking to another isolate by using the + /// [connectPort]. + DriftIsolate.fromConnectPort(this.connectPort); + + StreamChannel _open() { + final receive = ReceivePort('moor client receive'); + connectPort.send(receive.sendPort); + + final controller = + StreamChannelController(allowForeignErrors: false, sync: true); + receive.listen((message) { + if (message is SendPort) { + controller.local.stream + .map(prepareForTransport) + .listen(message.send, onDone: receive.close); + } else { + controller.local.sink.add(decodeAfterTransport(message)); + } + }); + + return controller.foreign; + } + + /// Connects to this [DriftIsolate] from another isolate. + /// + /// All operations on the returned [DatabaseConnection] will be executed on a + /// background isolate. Setting the [isolateDebugLog] is only helpful when + /// debugging moor itself. + // todo: breaking: Make synchronous in drift 5 + Future connect({bool isolateDebugLog = false}) async { + return remote(_open(), debugLog: isolateDebugLog); + } + + /// Stops the background isolate and disconnects all [DatabaseConnection]s + /// created. + /// If you only want to disconnect a database connection created via + /// [connect], use [GeneratedDatabase.close] instead. + Future shutdownAll() { + return shutdown(_open()); + } + + /// Creates a new [DriftIsolate] on a background thread. + /// + /// The [opener] function will be used to open the [DatabaseConnection] used + /// by the isolate. Most implementations are likely to use + /// [DatabaseConnection.fromExecutor] instead of providing stream queries and + /// the type system manually. + /// + /// Because [opener] will be called on another isolate with its own memory, + /// it must either be a top-level member or a static class method. + /// + /// To close the isolate later, use [shutdownAll]. + static Future spawn(DatabaseOpener opener) async { + final receiveServer = ReceivePort(); + final keyFuture = receiveServer.first; + + await Isolate.spawn(_startMoorIsolate, [receiveServer.sendPort, opener]); + final key = await keyFuture as SendPort; + return DriftIsolate.fromConnectPort(key); + } + + /// Creates a [DriftIsolate] in the [Isolate.current] isolate. The returned + /// [DriftIsolate] is an object than can be sent across isolates - any other + /// isolate can then use [DriftIsolate.connect] to obtain a special database + /// connection which operations are all executed on this isolate. + /// + /// When [killIsolateWhenDone] is enabled (it defaults to `false`) and + /// [shutdownAll] is called on the returned [DriftIsolate], the isolate used + /// to call [DriftIsolate.inCurrent] will be killed. + factory DriftIsolate.inCurrent(DatabaseOpener opener, + {bool killIsolateWhenDone = false}) { + final server = RunningMoorServer(Isolate.current, opener(), + killIsolateWhenDone: killIsolateWhenDone); + return DriftIsolate.fromConnectPort(server.portToOpenConnection); + } +} + +/// Creates a [RunningMoorServer] and sends a [SendPort] that can be used to +/// establish connections. +/// +/// Te [args] list must contain two elements. The first one is the [SendPort] +/// that [_startMoorIsolate] will use to send the new [SendPort] used to +/// establish further connections. The second element is a [DatabaseOpener] +/// used to open the underlying database connection. +void _startMoorIsolate(List args) { + final sendPort = args[0] as SendPort; + final opener = args[1] as DatabaseOpener; + + final server = RunningMoorServer(Isolate.current, opener()); + sendPort.send(server.portToOpenConnection); +} diff --git a/drift/lib/remote.dart b/drift/lib/remote.dart new file mode 100644 index 00000000..707b30e7 --- /dev/null +++ b/drift/lib/remote.dart @@ -0,0 +1,130 @@ +/// Library support for accessing remote databases. +/// +/// This library provides support for database servers and remote clients. It +/// makes few assumptions over the underlying two-way communication channel, +/// except that it must adhere to the [StreamChannel] guarantees. +/// +/// This allows you to use a drift database (including stream queries) over a +/// remote connection as it were a local database. For instance, this api could +/// be used for +/// +/// - accessing databases on a remote isolate: The `package:drift/isolate.dart` +/// library is implemented on top of this library. +/// - running databases in web workers +/// - synchronizing stream queries and data across multiple tabs with shared +/// web workers +/// - accessing databases over TCP or WebSockets. +/// +/// Drift uses an internal protocol to serialize database requests over stream +/// channels. To make the implementation of channels easier, drift guarantees +/// that nothing but the following messages will be sent: +/// +/// - primitive values (`null`, [int], [bool], [double], [String]) +/// - lists +/// +/// Lists are allowed to nest, but drift will never send messages with cyclic +/// references. Implementations are not required to reserve the type argument +/// of lists when serializing them. +/// However, note that drift might encode a `List` as `Uint8List`. For +/// performance reasons, channel implementations should preserve this. +/// +/// Moor assumes full control over the [StreamChannel]s it manages. For this +/// reason, do not send your own messages over them or close them prematurely. +/// If you need further channels over the same underlying connection, consider a +/// [MultiChannel] instead. +/// +/// The public apis of this libraries are stable. The present [experimental] +/// annotation refers to the underlying protocol implementation. +/// As long as this library is marked as experimental, the communication +/// protocol can change in every version. For this reason, please make sure that +/// all channel participants are using the exact same drift version. +/// For local communication across isolates or web workers, this is usually not +/// an issue. +/// +/// For an example of a channel implementation, you could study the +/// implementation of the `package:drift/isolate.dart` library, which uses this +/// library to implement its apis. +/// The [web](https://drift.simonbinder.eu/web/) documentation on the website +/// contains another implementation based on web workers that might be of +/// interest. +@experimental +library drift.remote; + +import 'package:meta/meta.dart'; +import 'package:stream_channel/stream_channel.dart'; + +import 'drift.dart'; +import 'remote.dart' as self; + +import 'src/remote/client_impl.dart'; +import 'src/remote/communication.dart'; +import 'src/remote/protocol.dart'; +import 'src/remote/server_impl.dart'; + +/// Serves a drift database connection over any two-way communication channel. +/// +/// Users are responsible for creating the underlying stream channels before +/// passing them to this server via [serve]. +/// A single drift server can safely handle multiple clients. +@sealed +abstract class MoorServer { + /// Creates a drift server proxying incoming requests to the underlying + /// [connection]. + /// + /// If [allowRemoteShutdown] is set to `true` (it defaults to `false`), + /// clients can use [shutdown] to stop this server remotely. + factory MoorServer(DatabaseConnection connection, + {bool allowRemoteShutdown = false}) { + return ServerImplementation(connection, allowRemoteShutdown); + } + + /// A future that completes when this server has been shut down. + /// + /// This future completes after [shutdown] is called directly on this + /// instance, or if a remote client uses [self.shutdown] on a connection + /// handled by this server. + Future get done; + + /// Starts processing requests from the [channel]. + /// + /// The [channel] uses a drift-internal protocol to serialize database + /// requests. Moor assumes full control over the [channel]. Manually sending + /// messages over it, or closing it prematurely, can disrupt the server. + /// + /// __Warning__: As long as this library is marked experimental, the protocol + /// might change with every drift version. For this reason, make sure that + /// your server and clients are using the exact same version of the drift + /// package to avoid conflicts. + void serve(StreamChannel channel); + + /// Shuts this server down. + /// + /// The server will continue to handle ongoing requests, but enqueued or new + /// requests will be rejected. + /// + /// This future returns after all client connections have been closed. + Future shutdown(); +} + +/// Connects to a remote server over a two-way communication channel. +/// +/// On the remote side, the corresponding [channel] must have been passed to +/// [MoorServer.serve] for this setup to work. +/// +/// The optional [debugLog] can be enabled to print incoming and outgoing +/// messages. +DatabaseConnection remote(StreamChannel channel, + {bool debugLog = false}) { + final client = MoorClient(channel, debugLog); + return client.connection; +} + +/// Sends a shutdown request over a channel. +/// +/// On the remote side, the corresponding channel must have been passed to +/// [MoorServer.serve] for this setup to work. +/// Also, the [MoorServer] must have been configured to allow remote-shutdowns. +Future shutdown(StreamChannel channel) { + final comm = MoorCommunication(channel); + return comm.request(NoArgsRequest.terminateAll); +} diff --git a/drift/lib/sqlite_keywords.dart b/drift/lib/sqlite_keywords.dart new file mode 100644 index 00000000..086c97cd --- /dev/null +++ b/drift/lib/sqlite_keywords.dart @@ -0,0 +1,168 @@ +/// Provides utilities around sql keywords, like optional escaping etc. +library drift.sqlite_keywords; + +/// Contains a set of all sqlite keywords, according to +/// https://www.sqlite.org/lang_keywords.html. Moor will use this list to +/// escape keywords. +const sqliteKeywords = { + 'ADD', + 'ABORT', + 'ACTION', + 'AFTER', + 'ALL', + 'ALTER', + 'ALWAYS', + 'ANALYZE', + 'AND', + 'AS', + 'ASC', + 'ATTACH', + 'AUTOINCREMENT', + 'BEFORE', + 'BEGIN', + 'BETWEEN', + 'BY', + 'CASCADE', + 'CASE', + 'CAST', + 'CHECK', + 'COLLATE', + 'COLUMN', + 'COMMIT', + 'CONFLICT', + 'CONSTRAINT', + 'CREATE', + 'CROSS', + 'CURRENT', + 'CURRENT_DATE', + 'CURRENT_TIME', + 'CURRENT_TIMESTAMP', + 'DATABASE', + 'DEFAULT', + 'DEFERRABLE', + 'DEFERRED', + 'DELETE', + 'DESC', + 'DETACH', + 'DISTINCT', + 'DO', + 'DROP', + 'EACH', + 'ELSE', + 'END', + 'ESCAPE', + 'EXCEPT', + 'EXCLUDE', + 'EXCLUSIVE', + 'EXISTS', + 'EXPLAIN', + 'FAIL', + 'FALSE', + 'FILTER', + 'FIRST', + 'FOLLOWING', + 'FOR', + 'FOREIGN', + 'FROM', + 'FULL', + 'GENERATED', + 'GLOB', + 'GROUP', + 'GROUPS', + 'HAVING', + 'IF', + 'IGNORE', + 'IMMEDIATE', + 'IN', + 'INDEX', + 'INDEXED', + 'INITIALLY', + 'INNER', + 'INSERT', + 'INSTEAD', + 'INTERSECT', + 'INTO', + 'IS', + 'ISNULL', + 'JOIN', + 'KEY', + 'LAST', + 'LEFT', + 'LIKE', + 'LIMIT', + 'MATCH', + 'NATURAL', + 'NO', + 'NOT', + 'NOTHING', + 'NOTNULL', + 'NULL', + 'NULLS', + 'OF', + 'OFFSET', + 'ON', + 'OR', + 'ORDER', + 'OTHERS', + 'OUTER', + 'OVER', + 'PARTITION', + 'PLAN', + 'PRAGMA', + 'PRECEDING', + 'PRIMARY', + 'QUERY', + 'RAISE', + 'RANGE', + 'RECURSIVE', + 'REFERENCES', + 'REGEXP', + 'REINDEX', + 'RELEASE', + 'RENAME', + 'REPLACE', + 'RIGHT', + 'RESTRICT', + 'ROLLBACK', + 'ROW', + 'ROWID', + 'ROWS', + 'SAVEPOINT', + 'SELECT', + 'SET', + 'TABLE', + 'TEMP', + 'TEMPORARY', + 'THEN', + 'TIES', + 'TO', + 'TRANSACTION', + 'TRIGGER', + 'TRUE', + 'UNBOUNDED', + 'UNION', + 'UNIQUE', + 'UPDATE', + 'USING', + 'VACUUM', + 'VALUES', + 'VIEW', + 'VIRTUAL', + 'WHEN', + 'WHERE', + 'WINDOW', + 'WITH', + 'WITHOUT', +}; + +/// Returns whether [s] is an sql keyword by comparing it to the +/// [sqliteKeywords]. +bool isSqliteKeyword(String s) => sqliteKeywords.contains(s.toUpperCase()); + +final _whitespace = RegExp(r'\s'); + +/// Escapes [s] by wrapping it in backticks if it's an sqlite keyword. +String escapeIfNeeded(String s) { + if (isSqliteKeyword(s) || s.contains(_whitespace)) return '"$s"'; + return s; +} diff --git a/drift/lib/src/dsl/columns.dart b/drift/lib/src/dsl/columns.dart new file mode 100644 index 00000000..fb9daeba --- /dev/null +++ b/drift/lib/src/dsl/columns.dart @@ -0,0 +1,233 @@ +part of 'dsl.dart'; + +/// Base class for columns in sql. Type [T] refers to the type a value of this +/// column will have in Dart. +abstract class Column extends Expression { + @override + final Precedence precedence = Precedence.primary; + + /// The (unescaped) name of this column. + /// + /// Use [escapedName] to access a name that's escaped in double quotes if + /// needed. + String get name; + + /// [name], but escaped if it's an sql keyword. + String get escapedName => escapeIfNeeded(name); +} + +/// A column that stores int values. +typedef IntColumn = Column; + +/// A column that stores boolean values. Booleans will be stored as an integer +/// that can either be 0 (false) or 1 (true). +typedef BoolColumn = Column; + +/// A column that stores text. +typedef TextColumn = Column; + +/// A column that stores a [DateTime]. Times will be stored as unix timestamp +/// and will thus have a second accuracy. +typedef DateTimeColumn = Column; + +/// A column that stores arbitrary blobs of data as a [Uint8List]. +typedef BlobColumn = Column; + +/// A column that stores floating point numeric values. +typedef RealColumn = Column; + +/// A column builder is used to specify which columns should appear in a table. +/// All of the methods defined in this class and its subclasses are not meant to +/// be called at runtime. Instead, moor_generator will take a look at your +/// source code (specifically, it will analyze which of the methods you use) to +/// figure out the column structure of a table. +class ColumnBuilder {} + +/// DSL extension to define a column with moor. +extension BuildColumn on ColumnBuilder { + /// By default, the field name will be used as the column name, e.g. + /// `IntColumn get id = integer()` will have "id" as its associated name. + /// Columns made up of multiple words are expected to be in camelCase and will + /// be converted to snake_case (e.g. a getter called accountCreationDate will + /// result in an SQL column called account_creation_date). + /// To change this default behavior, use something like + /// `IntColumn get id = integer((c) => c.named('user_id'))`. + /// + /// Note that using [named] __does not__ have an effect on the json key of an + /// object. To change the json key, annotate this column getter with + /// [JsonKey]. + ColumnBuilder named(String name) => _isGenerated(); + + /// Marks this column as nullable. Nullable columns should not appear in a + /// primary key. Columns are non-null by default. + ColumnBuilder nullable() => _isGenerated(); + + /// Tells moor to write a custom constraint after this column definition when + /// writing this column, for instance in a CREATE TABLE statement. + /// + /// When no custom constraint is set, columns will be written like this: + /// `name TYPE NULLABILITY NATIVE_CONSTRAINTS`. Native constraints are used to + /// enforce that booleans are either 0 or 1 (e.g. + /// `field BOOLEAN NOT NULL CHECK (field in (0, 1)`). Auto-Increment + /// columns also make use of the native constraints, as do default values. + /// If [customConstraint] has been called, the nullability information and + /// native constraints will never be written. Instead, they will be replaced + /// with the [constraint]. For example, if you call + /// `customConstraint('UNIQUE')` on an [IntColumn] named "votes", the + /// generated column definition will be `votes INTEGER UNIQUE`. Notice how the + /// nullability information is lost - you'll have to include it in + /// [constraint] if that is desired. + /// + /// This can be used to implement constraints that moor does not (yet) + /// support (e.g. unique keys, etc.). If you've found a common use-case for + /// this, it should be considered a limitation of moor itself. Please feel + /// free to open an issue at https://github.com/simolus3/moor/issues/new to + /// report that. + /// + /// See also: + /// - https://www.sqlite.org/syntax/column-constraint.html + /// - [GeneratedColumn.$customConstraints] + ColumnBuilder customConstraint(String constraint) => _isGenerated(); + + /// The column will use this expression when a row is inserted and no value + /// has been specified. + /// + /// Note: Unlike most other methods used to declare tables, the parameter + /// [e] which denotes the default expression doesn't have to be a Dart + /// constant. + /// Particularly, you can use operators like those defined in + /// [BooleanExpressionOperators] to form expressions here. + /// + /// If you need a column that just stores a static default value, you could + /// use this method with a [Constant]: + /// ```dart + /// IntColumn get level => int().withDefault(const Constant(1))(); + /// ``` + /// + /// See also: + /// - [Constant], which can be used to model literals that appear in CREATE + /// TABLE statements. + /// - [currentDate] and [currentDateAndTime], which are useful expressions to + /// store the current date/time as a default value. + ColumnBuilder withDefault(Expression e) => _isGenerated(); + + /// Sets a dynamic default value for this column. + /// + /// When a row is inserted into the table and no value has been specified for + /// this column, [onInsert] will be evaluated. Its return value will be used + /// for the missing column. [onInsert] may return different values when called + /// multiple times. + /// + /// Here's an example using the [uuid](https://pub.dev/packages/uuid) package: + /// + /// ```dart + /// final uuid = Uuid(); + /// + /// class Pictures extends Table { + /// TextColumn get id => text().clientDefault(() => uuid.v4())(); + /// BlobColumn get rawData => blob(); + /// + /// @override + /// Set get primaryKey = {id}; + /// } + /// ``` + /// + /// For a default value that's constant, it is more efficient to use + /// [withDefault] instead. [withDefault] will write the default value into the + /// generated `CREATE TABLE` statement. The underlying sql engine will then + /// apply the default value. + ColumnBuilder clientDefault(T Function() onInsert) => _isGenerated(); + + /// Uses a custom [converter] to store custom Dart objects in a single column + /// and automatically mapping them from and to sql. + /// + /// An example might look like this: + /// ```dart + /// // this is the custom object with we want to store in a column. It + /// // can be as complex as you want it to be + /// class MyCustomObject { + /// final String data; + /// MyCustomObject(this.data); + /// } + /// + /// class CustomConverter extends TypeConverter { + /// // this class is responsible for turning a custom object into a string. + /// // this is easy here, but more complex objects could be serialized using + /// // json or any other method of your choice. + /// const CustomConverter(); + /// @override + /// MyCustomObject mapToDart(String fromDb) { + /// return fromDb == null ? null : MyCustomObject(fromDb); + /// } + /// + /// @override + /// String mapToSql(MyCustomObject value) { + /// return value?.data; + /// } + /// } + /// + /// ``` + /// + /// In that case, you could have a table with this column + /// ```dart + /// TextColumn get custom => text().map(const CustomConverter())(); + /// ``` + /// The generated row class will then use a `MyFancyClass` instead of a + /// `String`, which would usually be used for [Table.text] columns. + ColumnBuilder map(TypeConverter converter) => + _isGenerated(); + + /// Turns this column builder into a column. This method won't actually be + /// called in your code. Instead, moor_generator will take a look at your + /// source code to figure out your table structure. + Column call() => _isGenerated(); +} + +/// Tells the generator to build an [IntColumn]. See the docs at [ColumnBuilder] +/// for details. +extension BuildIntColumn on ColumnBuilder { + /// Enables auto-increment for this column, which will also make this column + /// the primary key of the table. + /// + /// For this reason, you can't use an [autoIncrement] column and also set a + /// custom [Table.primaryKey] on the same table. + ColumnBuilder autoIncrement() => _isGenerated(); +} + +/// Tells the generator to build an [TextColumn]. See the docs at +/// [ColumnBuilder] for details. +extension BuildTextColumn on ColumnBuilder { + /// Puts a constraint on the minimum and maximum length of text that can be + /// stored in this column. + /// + /// Both [min] and [max] are inclusive. This constraint will be validated in + /// Dart, it doesn't have an impact on the database schema. If [min] is not + /// null and one tries to write a string which [String.length] is + /// _strictly less_ than [min], an exception will be thrown. Similarly, you + /// can't insert strings with a length _strictly greater_ than [max]. + ColumnBuilder withLength({int? min, int? max}) => _isGenerated(); +} + +/// Annotation to use on column getters inside of a [Table] to define the name +/// of the column in the json used by [DataClass.toJson]. +/// +/// Example: +/// ```dart +/// class Users extends Table { +/// IntColumn get id => integer().autoIncrement()(); +/// @JsonKey('user_name') +/// TextColumn get name => text().nullable()(); +/// } +/// ``` +/// When calling [DataClass.toJson] on a `User` object, the output will be a map +/// with the keys "id" and "user_name". The output would be "id" and "name" if +/// the [JsonKey] annotation was omitted. +class JsonKey { + /// The key in the json map to use for this [Column]. See the documentation + /// for [JsonKey] for details. + final String key; + + /// An annotation to tell moor how the name of a column should appear in + /// generated json. See the documentation for [JsonKey] for details. + const JsonKey(this.key); +} diff --git a/drift/lib/src/dsl/database.dart b/drift/lib/src/dsl/database.dart new file mode 100644 index 00000000..088e5688 --- /dev/null +++ b/drift/lib/src/dsl/database.dart @@ -0,0 +1,101 @@ +part of 'dsl.dart'; + +/// Use this class as an annotation to inform moor_generator that a database +/// class should be generated using the specified [UseMoor.tables]. +/// +/// To write a database class, first annotate an empty class with [UseMoor] and +/// run the build runner using (flutter packages) pub run build_runner build. +/// Moor will have generated a class that has the same name as your database +/// class, but with `_$` as a prefix. You can now extend that class and provide +/// a [QueryExecutor] to use moor: +/// ```dart +/// class MyDatabase extends _$MyDatabase { // _$MyDatabase was generated +/// MyDatabase(): +/// super(FlutterQueryExecutor.inDatabaseFolder(path: 'path.db')); +/// } +/// ``` +class UseMoor { + /// The tables to include in the database + final List tables; + + /// Optionally, the list of daos to use. A dao can also make queries like a + /// regular database class, making is suitable to extract parts of your + /// database logic into smaller components. + /// + /// For instructions on how to write a dao, see the documentation of [UseDao] + final List daos; + + /// {@template moor_compile_queries_param} + /// Optionally, a list of named sql queries. During a build, moor will look at + /// the defined sql, figure out what they do, and write appropriate + /// methods in your generated database. + /// + /// For instance, when using + /// ```dart + /// @UseMoor( + /// tables: [Users], + /// queries: { + /// 'userById': 'SELECT * FROM users WHERE id = ?', + /// }, + /// ) + /// ``` + /// Moor will generate two methods for you: `userById(int id)` and + /// `watchUserById(int id)`. + /// {@endtemplate} + final Map queries; + + /// {@template moor_include_param} + /// Defines the `.moor` files to include when building the table structure for + /// this database. For details on how to integrate `.moor` files into your + /// Dart code, see [the documentation](https://moor.simonbinder.eu/docs/using-sql/custom_tables/). + /// {@endtemplate} + final Set include; + + /// Use this class as an annotation to inform moor_generator that a database + /// class should be generated using the specified [UseMoor.tables]. + const UseMoor({ + this.tables = const [], + this.daos = const [], + this.queries = const {}, + this.include = const {}, + }); +} + +/// Annotation to use on classes that implement [DatabaseAccessor]. It specifies +/// which tables should be made available in this dao. +/// +/// To write a dao, you'll first have to write a database class. See [UseMoor] +/// for instructions on how to do that. Then, create an empty class that is +/// annotated with [UseDao] and that extends [DatabaseAccessor]. For instance, +/// if you have a class called `MyDatabase`, this could look like this: +/// ```dart +/// class MyDao extends DatabaseAccessor { +/// MyDao(MyDatabase db) : super(db); +/// } +/// ``` +/// After having run the build step once more, moor will have generated a mixin +/// called `_$MyDaoMixin`. Change your class definition to +/// `class MyDao extends DatabaseAccessor with _$MyDaoMixin` and +/// you're ready to make queries inside your dao. You can obtain an instance of +/// that dao by using the getter that will be generated inside your database +/// class. +/// +/// See also: +/// - https://moor.simonbinder.eu/daos/ +class UseDao { + /// The tables accessed by this DAO. + final List tables; + + /// {@macro moor_compile_queries_param} + final Map queries; + + /// {@macro moor_include_param} + final Set include; + + /// Annotation for a class to declare it as an dao. See [UseDao] and the + /// referenced documentation on how to use daos with moor. + const UseDao( + {this.tables = const [], + this.queries = const {}, + this.include = const {}}); +} diff --git a/drift/lib/src/dsl/dsl.dart b/drift/lib/src/dsl/dsl.dart new file mode 100644 index 00000000..2788b0ef --- /dev/null +++ b/drift/lib/src/dsl/dsl.dart @@ -0,0 +1,40 @@ +import 'dart:typed_data' show Uint8List; + +import 'package:drift/drift.dart'; +import 'package:drift/sqlite_keywords.dart'; +import 'package:meta/meta.dart'; +import 'package:meta/meta_meta.dart'; + +part 'columns.dart'; +part 'database.dart'; +part 'table.dart'; + +/// Implementation for dsl methods that aren't called at runtime but only exist +/// for the generator to pick up. For instance, in +/// ```dart +/// class MyTable extends Table { +/// IntColumn get id => integer().autoIncrement()(); +/// } +/// ``` +/// Neither [Table.integer], [BuildIntColumn.autoIncrement] or +/// [BuildColumn.call] will be called at runtime. Instead, the generator will +/// take a look at the written Dart code to recognize that `id` is a column of +/// type int that has auto increment (and is thus the primary key). It will +/// generate a subclass of `MyTable` which looks like this: +/// ```dart +/// class _$MyTable extends MyTable { +/// IntColumn get id => GeneratedIntColumn( +/// 'id', +/// 'my-table', +/// false, +/// declaredAsPrimaryKey: false, +/// declaredAsAutoIncrement: true, +/// ); +/// } +/// ``` +Never _isGenerated() { + throw UnsupportedError( + 'This method should not be called at runtime. Are you sure you re-ran the ' + 'builder after changing your tables or databases?', + ); +} diff --git a/drift/lib/src/dsl/table.dart b/drift/lib/src/dsl/table.dart new file mode 100644 index 00000000..5126c49b --- /dev/null +++ b/drift/lib/src/dsl/table.dart @@ -0,0 +1,163 @@ +part of 'dsl.dart'; + +/// Base class for dsl [Table]s and [View]s. +abstract class HasResultSet { + /// Default constant constructor. + const HasResultSet(); +} + +/// Subclasses represent a table in a database generated by moor. +abstract class Table extends HasResultSet { + /// Defines a table to be used with moor. + const Table(); + + /// The sql table name to be used. By default, moor will use the snake_case + /// representation of your class name as the sql table name. For instance, a + /// [Table] class named `LocalSettings` will be called `local_settings` by + /// default. + /// You can change that behavior by overriding this method to use a custom + /// name. Please note that you must directly return a string literal by using + /// a getter. For instance `@override String get tableName => 'my_table';` is + /// valid, whereas `@override final String tableName = 'my_table';` or + /// `@override String get tableName => createMyTableName();` is not. + @visibleForOverriding + String? get tableName => null; + + /// Whether to append a `WITHOUT ROWID` clause in the `CREATE TABLE` + /// statement. This is intended to be used by generated code only. + bool get withoutRowId => false; + + /// Moor will write some table constraints automatically, for instance when + /// you override [primaryKey]. You can turn this behavior off if you want to. + /// This is intended to be used by generated code only. + bool get dontWriteConstraints => false; + + /// Override this to specify custom primary keys: + /// ```dart + /// class IngredientInRecipes extends Table { + /// @override + /// Set get primaryKey => {recipe, ingredient}; + /// + /// IntColumn get recipe => integer()(); + /// IntColumn get ingredient => integer()(); + /// + /// IntColumn get amountInGrams => integer().named('amount')(); + ///} + /// ``` + /// The getter must return a set literal using the `=>` syntax so that the + /// moor generator can understand the code. + /// Also, please note that it's an error to have an + /// [BuildIntColumn.autoIncrement] column and a custom primary key. + /// As an auto-incremented `IntColumn` is recognized by moor to be the + /// primary key, doing so will result in an exception thrown at runtime. + @visibleForOverriding + Set? get primaryKey => null; + + /// Custom table constraints that should be added to the table. + /// + /// See also: + /// - https://www.sqlite.org/syntax/table-constraint.html, which defines what + /// table constraints are supported. + List get customConstraints => []; + + /// Use this as the body of a getter to declare a column that holds integers. + /// Example (inside the body of a table class): + /// ``` + /// IntColumn get id => integer().autoIncrement()(); + /// ``` + @protected + ColumnBuilder integer() => _isGenerated(); + + /// Creates a column to store an `enum` class [T]. + /// + /// In the database, the column will be represented as an integer + /// corresponding to the enum's index. Note that this can invalidate your data + /// if you add another value to the enum class. + @protected + ColumnBuilder intEnum() => _isGenerated(); + + /// Use this as the body of a getter to declare a column that holds strings. + /// Example (inside the body of a table class): + /// ``` + /// TextColumn get name => text()(); + /// ``` + @protected + ColumnBuilder text() => _isGenerated(); + + /// Use this as the body of a getter to declare a column that holds bools. + /// Example (inside the body of a table class): + /// ``` + /// BoolColumn get isAwesome => boolean()(); + /// ``` + @protected + ColumnBuilder boolean() => _isGenerated(); + + /// Use this as the body of a getter to declare a column that holds date and + /// time. Note that [DateTime] values are stored on a second-accuracy. + /// Example (inside the body of a table class): + /// ``` + /// DateTimeColumn get accountCreatedAt => dateTime()(); + /// ``` + @protected + ColumnBuilder dateTime() => _isGenerated(); + + /// Use this as the body of a getter to declare a column that holds arbitrary + /// data blobs, stored as an [Uint8List]. Example: + /// ``` + /// BlobColumn get payload => blob()(); + /// ``` + @protected + ColumnBuilder blob() => _isGenerated(); + + /// Use this as the body of a getter to declare a column that holds floating + /// point numbers. Example + /// ``` + /// RealColumn get averageSpeed => real()(); + /// ``` + @protected + ColumnBuilder real() => _isGenerated(); +} + +/// A class to be used as an annotation on [Table] classes to customize the +/// name for the data class that will be generated for the table class. The data +/// class is a dart object that will be used to represent a row in the table. +/// {@template moor_custom_data_class} +/// By default, moor will attempt to use the singular form of the table name +/// when naming data classes (e.g. a table named "Users" will generate a data +/// class called "User"). However, this doesn't work for irregular plurals and +/// you might want to choose a different name, for which this annotation can be +/// used. +/// {@template} +@Target({TargetKind.classType}) +class DataClassName { + /// The overridden name to use when generating the data class for a table. + /// {@macro moor_custom_data_class} + final String name; + + /// Customize the data class name for a given table. + /// {@macro moor_custom_data_class} + const DataClassName(this.name); +} + +/// An annotation specifying an existing class to be used as a data class. +@Target({TargetKind.classType}) +@experimental +class UseRowClass { + /// The existing class + /// + /// This type must refer to an existing class. All other types, like functions + /// or types with arguments, are not allowed. + final Type type; + + /// The name of the constructor to use. + /// + /// When this option is not set, the default (unnamed) constructor will be + /// used to map database rows to the desired row class. + final String constructor; + + /// Customize the class used by moor to hold an instance of an annotated + /// table. + /// + /// For details, see the overall documentation on [UseRowClass]. + const UseRowClass(this.type, {this.constructor = ''}); +} diff --git a/drift/lib/src/ffi/database.dart b/drift/lib/src/ffi/database.dart new file mode 100644 index 00000000..2e4b1d50 --- /dev/null +++ b/drift/lib/src/ffi/database.dart @@ -0,0 +1,259 @@ +import 'dart:io'; + +import 'package:meta/meta.dart'; +import 'package:sqlite3/sqlite3.dart'; + +import '../../backends.dart'; +import 'database_tracker.dart'; +import 'moor_ffi_functions.dart'; + +/// Signature of a function that can perform setup work on a [database] before +/// moor is fully ready. +/// +/// This could be used to, for instance, set encryption keys for SQLCipher +/// implementations. +typedef DatabaseSetup = void Function(Database database); + +/// A moor database implementation based on `dart:ffi`, running directly in a +/// Dart VM or an AOT compiled Dart/Flutter application. +class NativeDatabase extends DelegatedDatabase { + NativeDatabase._(DatabaseDelegate delegate, bool logStatements) + : super(delegate, isSequential: true, logStatements: logStatements); + + /// Creates a database that will store its result in the [file], creating it + /// if it doesn't exist. + /// + /// {@template moor_vm_database_factory} + /// If [logStatements] is true (defaults to `false`), generated sql statements + /// will be printed before executing. This can be useful for debugging. + /// The optional [setup] function can be used to perform a setup just after + /// the database is opened, before moor is fully ready. This can be used to + /// add custom user-defined sql functions or to provide encryption keys in + /// SQLCipher implementations. + /// {@endtemplate} + factory NativeDatabase(File file, + {bool logStatements = false, DatabaseSetup? setup}) { + return NativeDatabase._(_VmDelegate(file, setup), logStatements); + } + + /// Creates an in-memory database won't persist its changes on disk. + /// + /// {@macro moor_vm_database_factory} + factory NativeDatabase.memory( + {bool logStatements = false, DatabaseSetup? setup}) { + return NativeDatabase._(_VmDelegate(null, setup), logStatements); + } + + /// Creates a moor executor for an opened [database] from the `sqlite3` + /// package. + /// + /// When the [closeUnderlyingOnClose] argument is set (which is the default), + /// calling [QueryExecutor.close] on the returned [NativeDatabase] will also + /// [Database.dispose] the [database] passed to this constructor. + /// + /// Using [NativeDatabase.opened] may be useful when you want to use the same + /// underlying [Database] in multiple moor connections. Moor uses this + /// internally when running [integration tests for migrations](https://moor.simonbinder.eu/docs/advanced-features/migrations/#verifying-migrations). + /// + /// {@macro moor_vm_database_factory} + factory NativeDatabase.opened(Database database, + {bool logStatements = false, + DatabaseSetup? setup, + bool closeUnderlyingOnClose = true}) { + return NativeDatabase._( + _VmDelegate._opened(database, setup, closeUnderlyingOnClose), + logStatements); + } + + /// Disposes resources allocated by all `VmDatabase` instances of this + /// process. + /// + /// This method will call `sqlite3_close_v2` for every `VmDatabase` that this + /// process has opened without closing later. + /// + /// __Warning__: This functionality appears to cause crashes on iOS, and it + /// does nothing on Android. It's mainly intended for Desktop operating + /// systems, so try to avoid calling it where it's not necessary. + /// For safety measures, avoid calling [closeExistingInstances] in release + /// builds. + /// + /// Ideally, all databases should be closed properly in Dart. In that case, + /// it's not necessary to call [closeExistingInstances]. However, features + /// like hot (stateless) restart can make it impossible to reliably close + /// every database. In that case, we leak native sqlite3 database connections + /// that aren't referenced by any Dart object. Moor can track those + /// connections across Dart VM restarts by storing them in an in-memory sqlite + /// database. + /// Calling this method can cleanup resources and database locks after a + /// restart. + /// + /// Note that calling [closeExistingInstances] when you're still actively + /// using a [NativeDatabase] can lead to crashes, since the database would + /// then attempt to use an invalid connection. + /// This, this method should only be called when you're certain that there + /// aren't any active [NativeDatabase]s, not even on another isolate. + /// + /// A suitable place to call [closeExistingInstances] is at an early stage + /// of your `main` method, before you're using moor. + /// + /// ```dart + /// void main() { + /// // Guard against zombie database connections caused by hot restarts + /// assert(() { + /// VmDatabase.closeExistingInstances(); + /// return true; + /// }()); + /// + /// runApp(MyApp()); + /// } + /// ``` + /// + /// For more information, see [issue 835](https://github.com/simolus3/moor/issues/835). + @experimental + static void closeExistingInstances() { + tracker.closeExisting(); + } +} + +class _VmDelegate extends DatabaseDelegate { + late Database _db; + + bool _hasCreatedDatabase = false; + bool _isOpen = false; + + final File? file; + final DatabaseSetup? setup; + final bool closeUnderlyingWhenClosed; + + _VmDelegate(this.file, this.setup) : closeUnderlyingWhenClosed = true; + + _VmDelegate._opened(this._db, this.setup, this.closeUnderlyingWhenClosed) + : file = null, + _hasCreatedDatabase = true { + _initializeDatabase(); + } + + @override + TransactionDelegate get transactionDelegate => const NoTransactionDelegate(); + + @override + late DbVersionDelegate versionDelegate; + + @override + Future get isOpen => Future.value(_isOpen); + + @override + Future open(QueryExecutorUser user) async { + if (!_hasCreatedDatabase) { + _createDatabase(); + _initializeDatabase(); + } + + _isOpen = true; + return Future.value(); + } + + void _createDatabase() { + assert(!_hasCreatedDatabase); + _hasCreatedDatabase = true; + + final file = this.file; + if (file != null) { + // Create the parent directory if it doesn't exist. sqlite will emit + // confusing misuse warnings otherwise + final dir = file.parent; + if (!dir.existsSync()) { + dir.createSync(recursive: true); + } + + _db = sqlite3.open(file.path); + tracker.markOpened(file.path, _db); + } else { + _db = sqlite3.openInMemory(); + } + } + + void _initializeDatabase() { + _db.useMoorVersions(); + setup?.call(_db); + versionDelegate = _VmVersionDelegate(_db); + } + + @override + Future runBatched(BatchedStatements statements) async { + final prepared = [ + for (final stmt in statements.statements) _db.prepare(stmt), + ]; + + for (final application in statements.arguments) { + final stmt = prepared[application.statementIndex]; + + stmt.execute(application.arguments); + } + + for (final stmt in prepared) { + stmt.dispose(); + } + + return Future.value(); + } + + Future _runWithArgs(String statement, List args) async { + if (args.isEmpty) { + _db.execute(statement); + } else { + final stmt = _db.prepare(statement); + stmt.execute(args); + stmt.dispose(); + } + } + + @override + Future runCustom(String statement, List args) async { + await _runWithArgs(statement, args); + } + + @override + Future runInsert(String statement, List args) async { + await _runWithArgs(statement, args); + return _db.lastInsertRowId; + } + + @override + Future runUpdate(String statement, List args) async { + await _runWithArgs(statement, args); + return _db.getUpdatedRows(); + } + + @override + Future runSelect(String statement, List args) async { + final stmt = _db.prepare(statement); + final result = stmt.select(args); + stmt.dispose(); + + return Future.value(QueryResult.fromRows(result.toList())); + } + + @override + Future close() async { + if (closeUnderlyingWhenClosed) { + _db.dispose(); + tracker.markClosed(_db); + } + } +} + +class _VmVersionDelegate extends DynamicVersionDelegate { + final Database database; + + _VmVersionDelegate(this.database); + + @override + Future get schemaVersion => Future.value(database.userVersion); + + @override + Future setSchemaVersion(int version) { + database.userVersion = version; + return Future.value(); + } +} diff --git a/drift/lib/src/ffi/database_tracker.dart b/drift/lib/src/ffi/database_tracker.dart new file mode 100644 index 00000000..97a5df7a --- /dev/null +++ b/drift/lib/src/ffi/database_tracker.dart @@ -0,0 +1,73 @@ +import 'dart:ffi'; + +import 'package:sqlite3/sqlite3.dart'; + +/// This entire file is an elaborate hack to workaround https://github.com/simolus3/moor/issues/835. +/// +/// Users were running into database deadlocks after (stateless) hot restarts +/// in Flutter when they use transactions. The problem is that we don't have a +/// chance to call `sqlite3_close` before a Dart VM restart, the Dart object is +/// just gone without a trace. This means that we're leaking sqlite3 database +/// connections on restarts. +/// Even worse, those connections might have a lock on the database, for +/// instance if they just started a transaction. +/// +/// Our solution is to store open sqlite3 database connections in an in-memory +/// sqlite database which can survive restarts! For now, we keep track of the +/// pointer of an sqlite3 database handle in that database. +/// At an early stage of their `main()` method, users can now use +/// `VmDatabase.closeExistingInstances()` to release those resources. +final DatabaseTracker tracker = DatabaseTracker(); + +/// Internal class that we don't export to moor users. See [tracker] for why +/// this is necessary. +class DatabaseTracker { + final Database _db; + + /// Creates a new tracker with necessary tables. + DatabaseTracker() + : _db = sqlite3.open( + 'file:moor_connection_store?mode=memory&cache=shared', + uri: true, + ) { + _db.execute(''' +CREATE TABLE IF NOT EXISTS open_connections( + database_pointer INTEGER NOT NULL PRIMARY KEY, + path TEXT NULL +); + '''); + } + + /// Tracks the [openedDb]. The [path] argument can be used to track the path + /// of that database, if it's bound to a file. + void markOpened(String path, Database openedDb) { + final stmt = _db.prepare('INSERT INTO open_connections VALUES (?, ?)'); + stmt.execute([openedDb.handle.address, path]); + stmt.dispose(); + } + + /// Marks the database [db] as closed. + void markClosed(Database db) { + final ptr = db.handle.address; + _db.execute('DELETE FROM open_connections WHERE database_pointer = $ptr'); + } + + /// Closes tracked database connections. + void closeExisting() { + _db.execute('BEGIN;'); + + try { + final results = + _db.select('SELECT database_pointer FROM open_connections'); + + for (final row in results) { + final ptr = Pointer.fromAddress(row.columnAt(0) as int); + sqlite3.fromPointer(ptr).dispose(); + } + + _db.execute('DELETE FROM open_connections;'); + } finally { + _db.execute('COMMIT;'); + } + } +} diff --git a/drift/lib/src/ffi/moor_ffi_functions.dart b/drift/lib/src/ffi/moor_ffi_functions.dart new file mode 100644 index 00000000..0a2971da --- /dev/null +++ b/drift/lib/src/ffi/moor_ffi_functions.dart @@ -0,0 +1,201 @@ +import 'dart:math'; + +import 'package:sqlite3/sqlite3.dart'; + +// ignore_for_file: avoid_returning_null, only_throw_errors + +/// Extension to register moor-specific sql functions. +extension EnableMoorFunctions on Database { + /// Enables moor-specific sql functions on this database. + void useMoorVersions() { + createFunction( + functionName: 'power', + deterministic: true, + argumentCount: const AllowedArgumentCount(2), + function: _pow, + ); + createFunction( + functionName: 'pow', + deterministic: true, + argumentCount: const AllowedArgumentCount(2), + function: _pow, + ); + + createFunction( + functionName: 'sqrt', + deterministic: true, + argumentCount: const AllowedArgumentCount(1), + function: _unaryNumFunction(sqrt), + ); + createFunction( + functionName: 'sin', + deterministic: true, + argumentCount: const AllowedArgumentCount(1), + function: _unaryNumFunction(sin), + ); + createFunction( + functionName: 'cos', + deterministic: true, + argumentCount: const AllowedArgumentCount(1), + function: _unaryNumFunction(cos), + ); + createFunction( + functionName: 'tan', + deterministic: true, + argumentCount: const AllowedArgumentCount(1), + function: _unaryNumFunction(tan), + ); + createFunction( + functionName: 'asin', + deterministic: true, + argumentCount: const AllowedArgumentCount(1), + function: _unaryNumFunction(asin), + ); + createFunction( + functionName: 'acos', + deterministic: true, + argumentCount: const AllowedArgumentCount(1), + function: _unaryNumFunction(acos), + ); + createFunction( + functionName: 'atan', + deterministic: true, + argumentCount: const AllowedArgumentCount(1), + function: _unaryNumFunction(atan), + ); + + createFunction( + functionName: 'regexp', + deterministic: true, + argumentCount: const AllowedArgumentCount(2), + function: _regexpImpl, + ); + // Third argument can be used to set flags (like multiline, case + // sensitivity, etc.) + createFunction( + functionName: 'regexp_moor_ffi', + deterministic: true, + argumentCount: const AllowedArgumentCount(3), + function: _regexpImpl, + ); + + createFunction( + functionName: 'moor_contains', + deterministic: true, + argumentCount: const AllowedArgumentCount(2), + function: _containsImpl, + ); + createFunction( + functionName: 'moor_contains', + deterministic: true, + argumentCount: const AllowedArgumentCount(3), + function: _containsImpl, + ); + createFunction( + functionName: 'current_time_millis', + deterministic: true, + directOnly: false, + argumentCount: const AllowedArgumentCount(0), + function: (List args) => DateTime.now().millisecondsSinceEpoch, + ); + } +} + +num? _pow(List args) { + final first = args[0]; + final second = args[1]; + + if (first == null || second == null || first is! num || second is! num) { + return null; + } + + return pow(first, second); +} + +/// Base implementation for a sqlite function that takes one numerical argument +/// and returns one numerical argument. +/// +/// When not called with a number, returns will null. Otherwise, returns with +/// [calculation]. +num? Function(List) _unaryNumFunction(num Function(num) calculation) { + return (List args) { + // sqlite will ensure that this is only called with one argument + final value = args[0]; + if (value is num) { + return calculation(value); + } else { + return null; + } + }; +} + +bool? _regexpImpl(List args) { + var multiLine = false; + var caseSensitive = true; + var unicode = false; + var dotAll = false; + + final argCount = args.length; + if (argCount < 2 || argCount > 3) { + throw 'Expected two or three arguments to regexp'; + } + + final firstParam = args[0]; + final secondParam = args[1]; + + if (firstParam == null || secondParam == null) { + return null; + } + if (firstParam is! String || secondParam is! String) { + throw 'Expected two strings as parameters to regexp'; + } + + if (argCount == 3) { + // In the variant with three arguments, the last (int) arg can be used to + // enable regex flags. See the regexp() extension in moor for details. + final value = args[2]; + if (value is int) { + multiLine = (value & 1) == 1; + caseSensitive = (value & 2) != 2; + unicode = (value & 4) == 4; + dotAll = (value & 8) == 8; + } + } + + RegExp regex; + try { + regex = RegExp( + firstParam, + multiLine: multiLine, + caseSensitive: caseSensitive, + unicode: unicode, + dotAll: dotAll, + ); + } on FormatException { + throw 'Invalid regex'; + } + + return regex.hasMatch(secondParam); +} + +bool _containsImpl(List args) { + final argCount = args.length; + if (argCount < 2 || argCount > 3) { + throw 'Expected 2 or 3 arguments to moor_contains'; + } + + final first = args[0]; + final second = args[1]; + + if (first is! String || second is! String) { + throw 'First two args to contains must be strings'; + } + + final caseSensitive = argCount == 3 && args[2] == 1; + + final result = caseSensitive + ? first.contains(second) + : first.toLowerCase().contains(second.toLowerCase()); + + return result; +} diff --git a/drift/lib/src/isolate.dart b/drift/lib/src/isolate.dart new file mode 100644 index 00000000..e9062512 --- /dev/null +++ b/drift/lib/src/isolate.dart @@ -0,0 +1,70 @@ +import 'dart:isolate'; + +import 'package:async/async.dart'; +import 'package:meta/meta.dart'; +import 'package:stream_channel/isolate_channel.dart'; + +import '../drift.dart'; +import '../remote.dart'; + +// All of this is drift-internal and not exported, so: +// ignore_for_file: public_member_api_docs + +@internal +class RunningMoorServer { + final Isolate self; + final bool killIsolateWhenDone; + + final MoorServer server; + final ReceivePort connectPort = ReceivePort('drift connect'); + int _counter = 0; + + SendPort get portToOpenConnection => connectPort.sendPort; + + RunningMoorServer(this.self, DatabaseConnection connection, + {this.killIsolateWhenDone = true}) + : server = MoorServer(connection, allowRemoteShutdown: true) { + final subscription = connectPort.listen((message) { + if (message is SendPort) { + final receiveForConnection = + ReceivePort('drift channel #${_counter++}'); + message.send(receiveForConnection.sendPort); + final channel = IsolateChannel(receiveForConnection, message) + .changeStream((source) => source.map(decodeAfterTransport)) + .transformSink( + StreamSinkTransformer.fromHandlers( + handleData: (data, sink) => + sink.add(prepareForTransport(data))), + ); + + server.serve(channel); + } + }); + + server.done.then((_) { + subscription.cancel(); + connectPort.close(); + if (killIsolateWhenDone) self.kill(); + }); + } +} + +Object? prepareForTransport(Object? source) { + if (source is! List) return source; + + if (source is Uint8List) { + return TransferableTypedData.fromList([source]); + } + + return source.map(prepareForTransport).toList(); +} + +Object? decodeAfterTransport(Object? source) { + if (source is TransferableTypedData) { + return source.materialize().asUint8List(); + } else if (source is List) { + return source.map(decodeAfterTransport).toList(); + } else { + return source; + } +} diff --git a/drift/lib/src/remote/client_impl.dart b/drift/lib/src/remote/client_impl.dart new file mode 100644 index 00000000..97a00743 --- /dev/null +++ b/drift/lib/src/remote/client_impl.dart @@ -0,0 +1,239 @@ +import 'dart:async'; + +import 'package:drift/src/runtime/api/runtime_api.dart'; +import 'package:drift/src/runtime/executor/executor.dart'; +import 'package:drift/src/runtime/executor/stream_queries.dart'; +import 'package:drift/src/runtime/types/sql_types.dart'; +import 'package:stream_channel/stream_channel.dart'; + +import '../runtime/cancellation_zone.dart'; +import 'communication.dart'; +import 'protocol.dart'; + +/// The client part of a remote moor communication scheme. +class MoorClient { + final MoorCommunication _channel; + + late final _RemoteStreamQueryStore _streamStore = + _RemoteStreamQueryStore(this); + + /// The resulting database connection. Operations on this connection are + /// relayed through the remote communication channel. + late final DatabaseConnection connection = DatabaseConnection( + SqlTypeSystem.defaultInstance, + _RemoteQueryExecutor(this), + _streamStore, + ); + + late QueryExecutorUser _connectedDb; + + /// Starts relaying database operations over the request channel. + MoorClient(StreamChannel channel, bool debugLog) + : _channel = MoorCommunication(channel, debugLog) { + _channel.setRequestHandler(_handleRequest); + } + + dynamic _handleRequest(Request request) { + final payload = request.payload; + + if (payload is RunBeforeOpen) { + final executor = _RemoteQueryExecutor(this, payload.createdExecutor); + return _connectedDb.beforeOpen(executor, payload.details); + } else if (payload is NotifyTablesUpdated) { + _streamStore.handleTableUpdates(payload.updates.toSet(), true); + } + } +} + +abstract class _BaseExecutor extends QueryExecutor { + final MoorClient client; + int? _executorId; + + _BaseExecutor(this.client, [this._executorId]); + + @override + Future runBatched(BatchedStatements statements) { + return client._channel + .request(ExecuteBatchedStatement(statements, _executorId)); + } + + Future _runRequest( + StatementMethod method, String sql, List? args) { + // fast path: If the operation has already been cancelled, don't bother + // sending a request in the first place + checkIfCancelled(); + + final id = client._channel.newRequestId(); + // otherwise, send the request now and cancel it later, if that's desired + doOnCancellation(() { + client._channel.request(RequestCancellation(id)); + }); + + return client._channel.request( + ExecuteQuery(method, sql, args ?? const [], _executorId), + requestId: id, + ); + } + + @override + Future runCustom(String statement, [List? args]) { + return _runRequest( + StatementMethod.custom, + statement, + args, + ); + } + + @override + Future runDelete(String statement, List args) { + return _runRequest(StatementMethod.deleteOrUpdate, statement, args); + } + + @override + Future runUpdate(String statement, List args) { + return _runRequest(StatementMethod.deleteOrUpdate, statement, args); + } + + @override + Future runInsert(String statement, List args) { + return _runRequest(StatementMethod.insert, statement, args); + } + + @override + Future>> runSelect( + String statement, List args) async { + final result = await _runRequest( + StatementMethod.select, statement, args); + + return result.rows; + } +} + +class _RemoteQueryExecutor extends _BaseExecutor { + _RemoteQueryExecutor(MoorClient client, [int? executorId]) + : super(client, executorId); + + Completer? _setSchemaVersion; + Future? _serverIsOpen; + + @override + TransactionExecutor beginTransaction() { + return _RemoteTransactionExecutor(client, _executorId); + } + + @override + Future ensureOpen(QueryExecutorUser user) async { + client._connectedDb = user; + if (_setSchemaVersion != null) { + await _setSchemaVersion!.future; + _setSchemaVersion = null; + } + + return _serverIsOpen ??= client._channel + .request(EnsureOpen(user.schemaVersion, _executorId)); + } + + @override + Future close() { + if (!client._channel.isClosed) { + client._channel.close(); + } + + return Future.value(); + } +} + +class _RemoteTransactionExecutor extends _BaseExecutor + implements TransactionExecutor { + final int? _outerExecutorId; + + _RemoteTransactionExecutor(MoorClient client, this._outerExecutorId) + : super(client); + + Completer? _pendingOpen; + bool _done = false; + + @override + TransactionExecutor beginTransaction() { + throw UnsupportedError('Nested transactions'); + } + + @override + Future ensureOpen(_) { + assert( + !_done, + 'Transaction used after it was closed. Are you missing an await ' + 'somewhere?', + ); + + final completer = _pendingOpen ??= Completer()..complete(_openAtServer()); + return completer.future; + } + + Future _openAtServer() async { + _executorId = await client._channel.request( + RunTransactionAction(TransactionControl.begin, _outerExecutorId)); + return true; + } + + Future _sendAction(TransactionControl action) { + return client._channel.request(RunTransactionAction(action, _executorId)); + } + + @override + Future rollback() async { + // don't do anything if the transaction isn't open yet + if (_pendingOpen == null) return; + + await _sendAction(TransactionControl.rollback); + _done = true; + } + + @override + Future send() async { + // don't do anything if the transaction isn't open yet + if (_pendingOpen == null) return; + + await _sendAction(TransactionControl.commit); + _done = true; + } +} + +class _RemoteStreamQueryStore extends StreamQueryStore { + final MoorClient _client; + final Set _awaitingUpdates = {}; + + _RemoteStreamQueryStore(this._client); + + @override + void handleTableUpdates(Set updates, + [bool comesFromServer = false]) { + if (comesFromServer) { + super.handleTableUpdates(updates); + } else { + // requests are async, but the function is synchronous. We await that + // future in close() + final completer = Completer(); + _awaitingUpdates.add(completer); + + completer.complete( + _client._channel.request(NotifyTablesUpdated(updates.toList()))); + + completer.future.catchError((_) { + // we don't care about errors if the connection is closed before the + // update is dispatched. Why? + }, test: (e) => e is ConnectionClosedException).whenComplete(() { + _awaitingUpdates.remove(completer); + }); + } + } + + @override + Future close() async { + await super.close(); + + // create a copy because awaiting futures in here mutates the set + final updatesCopy = _awaitingUpdates.map((e) => e.future).toList(); + await Future.wait(updatesCopy); + } +} diff --git a/drift/lib/src/remote/communication.dart b/drift/lib/src/remote/communication.dart new file mode 100644 index 00000000..372817cb --- /dev/null +++ b/drift/lib/src/remote/communication.dart @@ -0,0 +1,165 @@ +import 'dart:async'; + +import 'package:drift/src/runtime/api/runtime_api.dart'; +import 'package:stream_channel/stream_channel.dart'; + +import '../runtime/cancellation_zone.dart'; +import 'protocol.dart'; + +/// Wrapper around a two-way communication channel to support requests and +/// responses. +class MoorCommunication { + static const _protocol = MoorProtocol(); + + final StreamChannel _channel; + final bool _debugLog; + + StreamSubscription? _inputSubscription; + + // note that there are two MoorCommunication instances in each connection, + // (one per remote). Each of them has an independent _currentRequestId field + int _currentRequestId = 0; + final Completer _closeCompleter = Completer(); + final Map _pendingRequests = {}; + final StreamController _incomingRequests = + StreamController(sync: true); + + /// Starts a moor communication channel over a raw [StreamChannel]. + MoorCommunication(this._channel, [this._debugLog = false]) { + _inputSubscription = _channel.stream.listen( + _handleMessage, + onDone: _closeCompleter.complete, + ); + } + + /// Returns a future that resolves when this communication channel was closed, + /// either via a call to [close] from this isolate or from the other isolate. + Future get closed => _closeCompleter.future; + + /// Whether this channel is closed at the moment. + bool get isClosed => _closeCompleter.isCompleted; + + /// A stream of requests coming from the other peer. + Stream get incomingRequests => _incomingRequests.stream; + + /// Returns a new request id to be used for the next request. + int newRequestId() => _currentRequestId++; + + /// Closes the connection to the server. + void close() { + if (isClosed) return; + + _channel.sink.close(); + _closeLocally(); + } + + void _closeLocally() { + _inputSubscription?.cancel(); + + for (final pending in _pendingRequests.values) { + pending.completeError(const ConnectionClosedException()); + } + _pendingRequests.clear(); + } + + void _handleMessage(Object? msg) { + msg = _protocol.deserialize(msg!); + + if (_debugLog) { + driftRuntimeOptions.debugPrint('[IN]: $msg'); + } + + if (msg is SuccessResponse) { + final completer = _pendingRequests[msg.requestId]; + completer?.complete(msg.response); + _pendingRequests.remove(msg.requestId); + } else if (msg is ErrorResponse) { + final completer = _pendingRequests[msg.requestId]; + final trace = msg.stackTrace != null + ? StackTrace.fromString(msg.stackTrace!) + : null; + completer?.completeError(msg.error, trace); + _pendingRequests.remove(msg.requestId); + } else if (msg is Request) { + _incomingRequests.add(msg); + } else if (msg is CancelledResponse) { + final completer = _pendingRequests[msg.requestId]; + completer?.completeError(const CancellationException()); + } + } + + /// Sends a request and waits for the peer to reply with a value that is + /// assumed to be of type [T]. + /// + /// The [requestId] parameter can be used to set a fixed request id for the + /// request. + Future request(Object? request, {int? requestId}) { + final id = requestId ?? newRequestId(); + final completer = Completer(); + + _pendingRequests[id] = completer; + _send(Request(id, request)); + return completer.future; + } + + void _send(Message msg) { + if (isClosed) { + throw StateError('Tried to send $msg over isolate channel, but the ' + 'connection was closed!'); + } + + if (_debugLog) { + driftRuntimeOptions.debugPrint('[OUT]: $msg'); + } + _channel.sink.add(_protocol.serialize(msg)); + } + + /// Sends a response for a handled [Request]. + void respond(Request request, Object? response) { + _send(SuccessResponse(request.id, response)); + } + + /// Sends an erroneous response for a [Request]. + void respondError(Request request, dynamic error, [StackTrace? trace]) { + // sending a message while closed will throw, so don't even try. + if (isClosed) return; + + if (error is CancellationException) { + _send(CancelledResponse(request.id)); + } else { + _send(ErrorResponse(request.id, error.toString(), trace.toString())); + } + } + + /// Utility that listens to [incomingRequests] and invokes the [handler] on + /// each request, sending the result back to the originating client. If + /// [handler] throws, the error will be re-directed to the client. If + /// [handler] returns a [Future], it will be awaited. + void setRequestHandler(dynamic Function(Request) handler) { + incomingRequests.listen((request) { + try { + final result = handler(request); + + if (result is Future) { + result.then( + (value) => respond(request, value), + onError: (e, StackTrace s) { + respondError(request, e, s); + }, + ); + } else { + respond(request, result); + } + } catch (e, s) { + respondError(request, e, s); + } + }); + } +} + +/// Exception thrown when there are outstanding pending requests at the time the +/// isolate connection was cancelled. +class ConnectionClosedException implements Exception { + /// Constant constructor. + const ConnectionClosedException(); +} diff --git a/drift/lib/src/remote/protocol.dart b/drift/lib/src/remote/protocol.dart new file mode 100644 index 00000000..05a935e9 --- /dev/null +++ b/drift/lib/src/remote/protocol.dart @@ -0,0 +1,436 @@ +// This is a moor-internal file +// ignore_for_file: constant_identifier_names, public_member_api_docs + +import 'package:drift/drift.dart'; + +class MoorProtocol { + const MoorProtocol(); + + static const _tag_Request = 0; + static const _tag_Response_success = 1; + static const _tag_Response_error = 2; + static const _tag_Response_cancelled = 3; + + static const _tag_NoArgsRequest_getTypeSystem = 0; + static const _tag_NoArgsRequest_terminateAll = 1; + + static const _tag_ExecuteQuery = 3; + static const _tag_ExecuteBatchedStatement = 4; + static const _tag_RunTransactionAction = 5; + static const _tag_EnsureOpen = 6; + static const _tag_RunBeforeOpen = 7; + static const _tag_NotifyTablesUpdated = 8; + static const _tag_DefaultSqlTypeSystem = 9; + static const _tag_DirectValue = 10; + static const _tag_SelectResult = 11; + static const _tag_RequestCancellation = 12; + + Object? serialize(Message message) { + if (message is Request) { + return [ + _tag_Request, + message.id, + encodePayload(message.payload), + ]; + } else if (message is ErrorResponse) { + return [ + _tag_Response_error, + message.requestId, + message.error.toString(), + message.stackTrace, + ]; + } else if (message is SuccessResponse) { + return [ + _tag_Response_success, + message.requestId, + encodePayload(message.response), + ]; + } else if (message is CancelledResponse) { + return [_tag_Response_cancelled, message.requestId]; + } + } + + Message deserialize(Object message) { + if (message is! List) throw const FormatException('Cannot read message'); + + final tag = message[0]; + final id = message[1] as int; + + switch (tag) { + case _tag_Request: + return Request(id, decodePayload(message[2])); + case _tag_Response_error: + return ErrorResponse(id, message[2] as Object, message[3] as String); + case _tag_Response_success: + return SuccessResponse(id, decodePayload(message[2])); + case _tag_Response_cancelled: + return CancelledResponse(id); + } + + throw const FormatException('Unknown tag'); + } + + dynamic encodePayload(dynamic payload) { + if (payload == null || payload is bool) return payload; + + if (payload is NoArgsRequest) { + return payload.index; + } else if (payload is ExecuteQuery) { + return [ + _tag_ExecuteQuery, + payload.method.index, + payload.sql, + [for (final arg in payload.args) _encodeDbValue(arg)], + payload.executorId, + ]; + } else if (payload is ExecuteBatchedStatement) { + return [ + _tag_ExecuteBatchedStatement, + payload.stmts.statements, + for (final arg in payload.stmts.arguments) + [ + arg.statementIndex, + for (final value in arg.arguments) _encodeDbValue(value), + ], + payload.executorId, + ]; + } else if (payload is RunTransactionAction) { + return [ + _tag_RunTransactionAction, + payload.control.index, + payload.executorId, + ]; + } else if (payload is EnsureOpen) { + return [_tag_EnsureOpen, payload.schemaVersion, payload.executorId]; + } else if (payload is RunBeforeOpen) { + return [ + _tag_RunBeforeOpen, + payload.details.versionBefore, + payload.details.versionNow, + payload.createdExecutor, + ]; + } else if (payload is NotifyTablesUpdated) { + return [ + _tag_NotifyTablesUpdated, + for (final update in payload.updates) + [ + update.table, + update.kind?.index, + ] + ]; + } else if (payload is SqlTypeSystem) { + // assume connection uses SqlTypeSystem.defaultInstance, this can't + // possibly be encoded. + return _tag_DefaultSqlTypeSystem; + } else if (payload is SelectResult) { + // We can't necessary transport maps, so encode as list + final rows = payload.rows; + if (rows.isEmpty) { + return const [_tag_SelectResult]; + } else { + // Encode by first sending column names, followed by row data + final result = [_tag_SelectResult]; + + final columns = rows.first.keys.toList(); + result + ..add(columns.length) + ..addAll(columns); + + result.add(rows.length); + for (final row in rows) { + result.addAll(row.values); + } + return result; + } + } else if (payload is RequestCancellation) { + return [_tag_RequestCancellation, payload.originalRequestId]; + } else { + return [_tag_DirectValue, payload]; + } + } + + dynamic decodePayload(dynamic encoded) { + if (encoded == null || encoded is bool) return encoded; + + int tag; + List? fullMessage; + + if (encoded is int) { + tag = encoded; + } else { + fullMessage = encoded as List; + tag = fullMessage[0] as int; + } + + int readInt(int index) => fullMessage![index] as int; + int? readNullableInt(int index) => fullMessage![index] as int?; + + switch (tag) { + case _tag_NoArgsRequest_getTypeSystem: + return NoArgsRequest.getTypeSystem; + case _tag_NoArgsRequest_terminateAll: + return NoArgsRequest.terminateAll; + case _tag_ExecuteQuery: + final method = StatementMethod.values[readInt(1)]; + final sql = fullMessage![2] as String; + final args = fullMessage[3] as List; + final executorId = readNullableInt(4); + return ExecuteQuery(method, sql, args, executorId); + case _tag_ExecuteBatchedStatement: + final sql = (fullMessage![1] as List).cast(); + final args = []; + + for (var i = 2; i < fullMessage.length - 1; i++) { + final list = fullMessage[i] as List; + args.add(ArgumentsForBatchedStatement( + list[0] as int, list.skip(1).toList())); + } + + final executorId = fullMessage.last as int; + return ExecuteBatchedStatement( + BatchedStatements(sql, args), executorId); + case _tag_RunTransactionAction: + final control = TransactionControl.values[readInt(1)]; + return RunTransactionAction(control, readNullableInt(2)); + case _tag_EnsureOpen: + return EnsureOpen(readInt(1), readNullableInt(2)); + case _tag_RunBeforeOpen: + return RunBeforeOpen( + OpeningDetails(readNullableInt(1), readInt(2)), + readInt(3), + ); + case _tag_DefaultSqlTypeSystem: + return SqlTypeSystem.defaultInstance; + case _tag_NotifyTablesUpdated: + final updates = []; + for (var i = 1; i < fullMessage!.length; i++) { + final encodedUpdate = fullMessage[i] as List; + final kindIndex = encodedUpdate[1] as int?; + + updates.add( + TableUpdate(encodedUpdate[0] as String, + kind: kindIndex == null ? null : UpdateKind.values[kindIndex]), + ); + } + return NotifyTablesUpdated(updates); + case _tag_SelectResult: + if (fullMessage!.length == 1) { + // Empty result set, no data + return const SelectResult([]); + } + + final columnCount = readInt(1); + final columns = fullMessage.sublist(2, 2 + columnCount).cast(); + final rows = readInt(2 + columnCount); + + final result = >[]; + for (var i = 0; i < rows; i++) { + final rowOffset = 3 + columnCount + i * columnCount; + + result.add({ + for (var c = 0; c < columnCount; c++) + columns[c]: fullMessage[rowOffset + c] + }); + } + return SelectResult(result); + case _tag_RequestCancellation: + return RequestCancellation(readInt(1)); + case _tag_DirectValue: + return encoded[1]; + } + + throw ArgumentError.value(tag, 'tag', 'Tag was unknown'); + } + + dynamic _encodeDbValue(dynamic variable) { + if (variable is List && variable is! Uint8List) { + return Uint8List.fromList(variable); + } else { + return variable; + } + } +} + +abstract class Message {} + +/// A request sent over a communication channel. It is expected that the other +/// peer eventually answers with a matching response. +class Request extends Message { + /// The id of this request. + /// + /// Ids are generated by the sender, so they are only unique per direction + /// and channel. + final int id; + + /// The payload associated with this request. + final Object? payload; + + Request(this.id, this.payload); + + @override + String toString() { + return 'Request (id = $id): $payload'; + } +} + +class SuccessResponse extends Message { + final int requestId; + final Object? response; + + SuccessResponse(this.requestId, this.response); + + @override + String toString() { + return 'SuccessResponse (id = $requestId): $response'; + } +} + +class ErrorResponse extends Message { + final int requestId; + final Object error; + final String? stackTrace; + + ErrorResponse(this.requestId, this.error, [this.stackTrace]); + + @override + String toString() { + return 'ErrorResponse (id = $requestId): $error at $stackTrace'; + } +} + +class CancelledResponse extends Message { + final int requestId; + + CancelledResponse(this.requestId); + + @override + String toString() { + return 'Previous request $requestId was cancelled'; + } +} + +/// A request without further parameters +enum NoArgsRequest { + /// Sent from the client to the server. The server will reply with the + /// [SqlTypeSystem] of the connection it's managing. + getTypeSystem, + + /// Close the background isolate, disconnect all clients, release all + /// associated resources + terminateAll, +} + +enum StatementMethod { + custom, + deleteOrUpdate, + insert, + select, +} + +/// Sent from the client to run a sql query. The server replies with the +/// result. +class ExecuteQuery { + final StatementMethod method; + final String sql; + final List args; + final int? executorId; + + ExecuteQuery(this.method, this.sql, this.args, [this.executorId]); + + @override + String toString() { + if (executorId != null) { + return '$method: $sql with $args (@$executorId)'; + } + return '$method: $sql with $args'; + } +} + +/// Requests a previous request to be cancelled. +/// +/// Whether this is supported or not depends on the server and its internal +/// state. This request will be immediately be acknowledged with a null +/// response, which does not indicate whether a cancellation actually happened. +class RequestCancellation { + final int originalRequestId; + + RequestCancellation(this.originalRequestId); + + @override + String toString() { + return 'Cancel previous request $originalRequestId'; + } +} + +/// Sent from the client to run [BatchedStatements] +class ExecuteBatchedStatement { + final BatchedStatements stmts; + final int? executorId; + + ExecuteBatchedStatement(this.stmts, [this.executorId]); +} + +enum TransactionControl { + /// When using [begin], the [RunTransactionAction.executorId] refers to the + /// executor starting the transaction. The server must reply with an int + /// representing the created transaction executor. + begin, + commit, + rollback, +} + +/// Sent from the client to commit or rollback a transaction +class RunTransactionAction { + final TransactionControl control; + final int? executorId; + + RunTransactionAction(this.control, this.executorId); + + @override + String toString() { + return 'RunTransactionAction($control, $executorId)'; + } +} + +/// Sent from the client to the server. The server should open the underlying +/// database connection, using the [schemaVersion]. +class EnsureOpen { + final int schemaVersion; + final int? executorId; + + EnsureOpen(this.schemaVersion, this.executorId); + + @override + String toString() { + return 'EnsureOpen($schemaVersion, $executorId)'; + } +} + +/// Sent from the server to the client when it should run the before open +/// callback. +class RunBeforeOpen { + final OpeningDetails details; + final int createdExecutor; + + RunBeforeOpen(this.details, this.createdExecutor); + + @override + String toString() { + return 'RunBeforeOpen($details, $createdExecutor)'; + } +} + +/// Sent to notify that a previous query has updated some tables. When a server +/// receives this message, it replies with `null` but forwards a new request +/// with this payload to all connected clients. +class NotifyTablesUpdated { + final List updates; + + NotifyTablesUpdated(this.updates); +} + +class SelectResult { + final List> rows; + + const SelectResult(this.rows); +} diff --git a/drift/lib/src/remote/server_impl.dart b/drift/lib/src/remote/server_impl.dart new file mode 100644 index 00000000..8a7d2f99 --- /dev/null +++ b/drift/lib/src/remote/server_impl.dart @@ -0,0 +1,257 @@ +import 'dart:async'; + +import 'package:drift/drift.dart'; +import 'package:drift/remote.dart'; +import 'package:stream_channel/stream_channel.dart'; + +import '../runtime/cancellation_zone.dart'; +import 'communication.dart'; +import 'protocol.dart'; + +/// The implementation of a moor server, manging remote channels to send +/// database requests. +class ServerImplementation implements MoorServer { + /// The Underlying database connection that will be used. + final DatabaseConnection connection; + + /// Whether clients are allowed to shutdown this server for all. + final bool allowRemoteShutdown; + + final Map _managedExecutors = {}; + int _currentExecutorId = 0; + + final Map _cancellableOperations = {}; + + /// when a transaction is active, all queries that don't operate on another + /// query executor have to wait! + /// + /// When this list is empty, the top-level executor is active. When not, the + /// first transaction id in the backlog is active at the moment. Whenever a + /// transaction completes, we emit an item on [_backlogUpdated]. This can be + /// used to implement a lock. + final List _executorBacklog = []; + final StreamController _backlogUpdated = + StreamController.broadcast(sync: true); + + late final _ServerDbUser _dbUser = _ServerDbUser(this); + + bool _isShuttingDown = false; + final Set _activeChannels = {}; + final Completer _done = Completer(); + + /// Creates a server from the underlying connection and further options. + ServerImplementation(this.connection, this.allowRemoteShutdown); + + @override + Future get done => _done.future; + + @override + void serve(StreamChannel channel) { + if (_isShuttingDown) { + throw StateError('Cannot add new channels after shutdown() was called'); + } + + final comm = MoorCommunication(channel)..setRequestHandler(_handleRequest); + _activeChannels.add(comm); + comm.closed.then((_) => _activeChannels.remove(comm)); + } + + @override + Future shutdown() { + if (!_isShuttingDown) { + _done.complete(); + _isShuttingDown = true; + } + + return done; + } + + MoorCommunication? get _anyClient { + final iterator = _activeChannels.iterator; + if (iterator.moveNext()) { + return iterator.current; + } + + return null; + } + + dynamic _handleRequest(Request request) { + final payload = request.payload; + + if (payload is NoArgsRequest) { + switch (payload) { + case NoArgsRequest.getTypeSystem: + return connection.typeSystem; + case NoArgsRequest.terminateAll: + if (allowRemoteShutdown) { + _backlogUpdated.close(); + shutdown(); + } else { + throw StateError('Remote shutdowns not allowed'); + } + + break; + } + } else if (payload is EnsureOpen) { + return _handleEnsureOpen(payload); + } else if (payload is ExecuteQuery) { + final token = runCancellable(() => _runQuery( + payload.method, payload.sql, payload.args, payload.executorId)); + _cancellableOperations[request.id] = token; + return token.result + .whenComplete(() => _cancellableOperations.remove(request.id)); + } else if (payload is ExecuteBatchedStatement) { + return _runBatched(payload.stmts, payload.executorId); + } else if (payload is NotifyTablesUpdated) { + for (final connected in _activeChannels) { + connected.request(payload); + } + } else if (payload is RunTransactionAction) { + return _transactionControl(payload.control, payload.executorId); + } else if (payload is RequestCancellation) { + _cancellableOperations[payload.originalRequestId]?.cancel(); + return null; + } + } + + Future _handleEnsureOpen(EnsureOpen open) async { + _dbUser.schemaVersion = open.schemaVersion; + final executor = await _loadExecutor(open.executorId); + + return await executor.ensureOpen(_dbUser); + } + + Future _runQuery(StatementMethod method, String sql, + List args, int? transactionId) async { + final executor = await _loadExecutor(transactionId); + + // Give cancellations more time to come in + await Future.delayed(Duration.zero); + checkIfCancelled(); + + switch (method) { + case StatementMethod.custom: + return executor.runCustom(sql, args); + case StatementMethod.deleteOrUpdate: + return executor.runDelete(sql, args); + case StatementMethod.insert: + return executor.runInsert(sql, args); + case StatementMethod.select: + return SelectResult(await executor.runSelect(sql, args)); + } + } + + Future _runBatched(BatchedStatements stmts, int? transactionId) async { + final executor = await _loadExecutor(transactionId); + await executor.runBatched(stmts); + } + + Future _loadExecutor(int? transactionId) async { + await _waitForTurn(transactionId); + return transactionId != null + ? _managedExecutors[transactionId]! + : connection.executor; + } + + Future _spawnTransaction(int? executor) async { + final transaction = (await _loadExecutor(executor)).beginTransaction(); + final id = _putExecutor(transaction, beforeCurrent: true); + + await transaction.ensureOpen(_dbUser); + return id; + } + + int _putExecutor(QueryExecutor executor, {bool beforeCurrent = false}) { + final id = _currentExecutorId++; + _managedExecutors[id] = executor; + + if (beforeCurrent && _executorBacklog.isNotEmpty) { + _executorBacklog.insert(0, id); + } else { + _executorBacklog.add(id); + } + + return id; + } + + Future _transactionControl( + TransactionControl action, int? executorId) async { + if (action == TransactionControl.begin) { + return await _spawnTransaction(executorId); + } + + final executor = _managedExecutors[executorId]; + if (executor is! TransactionExecutor) { + throw ArgumentError.value( + executorId, + 'transactionId', + "Does not reference a transaction. This might happen if you don't " + 'await all operations made inside a transaction, in which case the ' + 'transaction might complete with pending operations.', + ); + } + + try { + switch (action) { + case TransactionControl.commit: + await executor.send(); + break; + case TransactionControl.rollback: + await executor.rollback(); + break; + default: + assert(false, 'Unknown TransactionControl'); + } + } finally { + _releaseExecutor(executorId!); + } + } + + void _releaseExecutor(int id) { + _managedExecutors.remove(id); + _executorBacklog.remove(id); + _notifyActiveExecutorUpdated(); + } + + Future _waitForTurn(int? transactionId) { + bool idIsActive() { + if (transactionId == null) { + return _executorBacklog.isEmpty; + } else { + return _executorBacklog.isNotEmpty && + _executorBacklog.first == transactionId; + } + } + + // Don't wait for a backlog update if the current transaction id is active + if (idIsActive()) return Future.value(null); + + return _backlogUpdated.stream.firstWhere((_) => idIsActive()); + } + + void _notifyActiveExecutorUpdated() { + if (!_backlogUpdated.isClosed) { + _backlogUpdated.add(null); + } + } +} + +class _ServerDbUser implements QueryExecutorUser { + final ServerImplementation _server; + + @override + int schemaVersion = 0; + + _ServerDbUser(this._server); // will be overridden by client requests + + @override + Future beforeOpen( + QueryExecutor executor, OpeningDetails details) async { + final id = _server._putExecutor(executor, beforeCurrent: true); + try { + await _server._anyClient!.request(RunBeforeOpen(details, id)); + } finally { + _server._releaseExecutor(id); + } + } +} diff --git a/drift/lib/src/runtime/api/batch.dart b/drift/lib/src/runtime/api/batch.dart new file mode 100644 index 00000000..b0df5466 --- /dev/null +++ b/drift/lib/src/runtime/api/batch.dart @@ -0,0 +1,194 @@ +part of 'runtime_api.dart'; + +/// Contains operations to run queries in a batched mode. This can be much more +/// efficient when running a lot of similar queries at the same time, making +/// this api suitable for bulk updates. +class Batch { + final List _createdSql = []; + final Map _sqlToIndex = {}; + final List _createdArguments = []; + + final DatabaseConnectionUser _user; + + /// Whether we should start a transaction when completing. + final bool _startTransaction; + + final Set _createdUpdates = {}; + + Batch._(this._user, this._startTransaction); + + void _addUpdate(TableInfo table, UpdateKind kind) { + _createdUpdates.add(TableUpdate.onTable(table, kind: kind)); + } + + /// Inserts a row constructed from the fields in [row]. + /// + /// All fields in the entity that don't have a default value or auto-increment + /// must be set and non-null. Otherwise, an [InvalidDataException] will be + /// thrown. + /// + /// By default, an exception will be thrown if another row with the same + /// primary key already exists. This behavior can be overridden with [mode], + /// for instance by using [InsertMode.replace] or [InsertMode.insertOrIgnore]. + /// + /// [onConflict] can be used to create an upsert clause for engines that + /// support it. For details and examples, see [InsertStatement.insert]. + /// + /// See also: + /// - [InsertStatement.insert], which would be used outside a [Batch]. + void insert(TableInfo table, Insertable row, + {InsertMode? mode, UpsertClause? onConflict}) { + _addUpdate(table, UpdateKind.insert); + final actualMode = mode ?? InsertMode.insert; + final context = InsertStatement(_user, table) + .createContext(row, actualMode, onConflict: onConflict); + _addContext(context); + } + + /// Inserts all [rows] into the [table]. + /// + /// All fields in a row that don't have a default value or auto-increment + /// must be set and non-null. Otherwise, an [InvalidDataException] will be + /// thrown. + /// By default, an exception will be thrown if another row with the same + /// primary key already exists. This behavior can be overridden with [mode], + /// for instance by using [InsertMode.replace] or [InsertMode.insertOrIgnore]. + /// Using [insertAll] will not disable primary keys or any column constraint + /// checks. + /// [onConflict] can be used to create an upsert clause for engines that + /// support it. For details and examples, see [InsertStatement.insert]. + void insertAll( + TableInfo table, List> rows, + {InsertMode? mode, UpsertClause? onConflict}) { + for (final row in rows) { + insert(table, row, mode: mode, onConflict: onConflict); + } + } + + /// Equivalent of [InsertStatement.insertOnConflictUpdate] for multiple rows + /// that will be inserted in this batch. + void insertAllOnConflictUpdate( + TableInfo table, List> rows) { + for (final row in rows) { + insert(table, row, onConflict: DoUpdate((_) => row)); + } + } + + /// Writes all present columns from the [row] into all rows in the [table] + /// that match the [where] clause. + /// + /// For more details on how updates work in moor, check out + /// [UpdateStatement.write] or the [documentation with examples](https://moor.simonbinder.eu/docs/getting-started/writing_queries/#updates-and-deletes) + void update(TableInfo table, Insertable row, + {Expression Function(T table)? where}) { + _addUpdate(table, UpdateKind.update); + final stmt = UpdateStatement(_user, table); + if (where != null) stmt.where(where); + + stmt.write(row, dontExecute: true); + final context = stmt.constructQuery(); + _addContext(context); + } + + /// Replaces the [row] from the [table] with the updated values. The row in + /// the table with the same primary key will be replaced. + /// + /// See also: + /// - [UpdateStatement.replace], which is what would be used outside of a + /// [Batch]. + void replace( + TableInfo table, + Insertable row, + ) { + _addUpdate(table, UpdateKind.update); + final stmt = UpdateStatement(_user, table)..replace(row, dontExecute: true); + _addContext(stmt.constructQuery()); + } + + /// Helper that calls [replace] for all [rows]. + void replaceAll( + TableInfo table, List> rows) { + for (final row in rows) { + replace(table, row); + } + } + + /// Deletes [row] from the [table] when this batch is executed. + /// + /// See also: + /// - [DatabaseConnectionUser.delete] + /// - [DeleteStatement.delete] + void delete(TableInfo table, Insertable row) { + _addUpdate(table, UpdateKind.delete); + final stmt = DeleteStatement(_user, table)..whereSamePrimaryKey(row); + _addContext(stmt.constructQuery()); + } + + /// Deletes all rows from [table] matching the provided [filter]. + /// + /// See also: + /// - [DatabaseConnectionUser.delete] + void deleteWhere( + TableInfo table, Expression Function(T tbl) filter) { + _addUpdate(table, UpdateKind.delete); + final stmt = DeleteStatement(_user, table)..where(filter); + _addContext(stmt.constructQuery()); + } + + /// Executes the custom [sql] statement with variables instantiated to [args]. + /// + /// The statement will be added to this batch and executed when the batch + /// completes. So, this method returns synchronously and it's not possible to + /// inspect the return value of individual statements. + /// + /// See also: + /// - [DatabaseConnectionUser.customStatement], the equivalent method outside + /// of batches. + void customStatement(String sql, [List? args]) { + _addSqlAndArguments(sql, args ?? const []); + } + + void _addContext(GenerationContext ctx) { + _addSqlAndArguments(ctx.sql, ctx.boundVariables); + } + + void _addSqlAndArguments(String sql, List arguments) { + final stmtIndex = _sqlToIndex.putIfAbsent(sql, () { + final newIndex = _createdSql.length; + _createdSql.add(sql); + + return newIndex; + }); + + _createdArguments.add(ArgumentsForBatchedStatement(stmtIndex, arguments)); + } + + Future _commit() async { + await _user.executor.ensureOpen(_user.attachedDatabase); + + if (_startTransaction) { + TransactionExecutor? transaction; + + try { + transaction = _user.executor.beginTransaction(); + await transaction.ensureOpen(_user.attachedDatabase); + + await _runWith(transaction); + + await transaction.send(); + } catch (e) { + await transaction?.rollback(); + rethrow; + } + } else { + await _runWith(_user.executor); + } + + _user.notifyUpdates(_createdUpdates); + } + + Future _runWith(QueryExecutor executor) { + return executor + .runBatched(BatchedStatements(_createdSql, _createdArguments)); + } +} diff --git a/drift/lib/src/runtime/api/connection.dart b/drift/lib/src/runtime/api/connection.dart new file mode 100644 index 00000000..28e44eb1 --- /dev/null +++ b/drift/lib/src/runtime/api/connection.dart @@ -0,0 +1,68 @@ +part of 'runtime_api.dart'; + +/// A database connection managed by moor. Contains three components: +/// - a [SqlTypeSystem], which is responsible to map between Dart types and +/// values understood by the database engine. +/// - a [QueryExecutor], which runs sql commands +/// - a [StreamQueryStore], which dispatches table changes to listening queries, +/// on which the auto-updating queries are based. +class DatabaseConnection { + /// The type system to use with this database. The type system is responsible + /// for mapping Dart objects into sql expressions and vice-versa. + @Deprecated('Only the default type system is supported') + final SqlTypeSystem typeSystem; + + /// The executor to use when queries are executed. + final QueryExecutor executor; + + /// Manages active streams from select statements. + final StreamQueryStore streamQueries; + + /// Constructs a raw database connection from the three components. + DatabaseConnection(this.typeSystem, this.executor, this.streamQueries); + + /// Constructs a [DatabaseConnection] from the [QueryExecutor] by using the + /// default type system and a new [StreamQueryStore]. + DatabaseConnection.fromExecutor(this.executor) + : typeSystem = SqlTypeSystem.defaultInstance, + streamQueries = StreamQueryStore(); + + /// Database connection that is instantly available, but delegates work to a + /// connection only available through a `Future`. + /// + /// This can be useful in scenarios where you need to obtain a database + /// instance synchronously, but need an async setup. A prime example here is + /// `MoorIsolate`: + /// + /// ```dart + /// @UseMoor(...) + /// class MyDatabase extends _$MyDatabase { + /// MyDatabase._connect(DatabaseConnection c): super.connect(c); + /// + /// factory MyDatabase.fromIsolate(MoorIsolate isolate) { + /// return MyDatabase._connect( + /// // isolate.connect() returns a future, but we can still return a + /// // database synchronously thanks to DatabaseConnection.delayed! + /// DatabaseConnection.delayed(isolate.connect()), + /// ); + /// } + /// } + /// ``` + factory DatabaseConnection.delayed(FutureOr connection) { + if (connection is DatabaseConnection) { + return connection; + } + + return DatabaseConnection( + SqlTypeSystem.defaultInstance, + LazyDatabase(() async => (await connection).executor), + DelayedStreamQueryStore(connection.then((conn) => conn.streamQueries)), + ); + } + + /// Returns a database connection that is identical to this one, except that + /// it uses the provided [executor]. + DatabaseConnection withExecutor(QueryExecutor executor) { + return DatabaseConnection(typeSystem, executor, streamQueries); + } +} diff --git a/drift/lib/src/runtime/api/connection_user.dart b/drift/lib/src/runtime/api/connection_user.dart new file mode 100644 index 00000000..b3f41fc3 --- /dev/null +++ b/drift/lib/src/runtime/api/connection_user.dart @@ -0,0 +1,510 @@ +part of 'runtime_api.dart'; + +const _zoneRootUserKey = #DatabaseConnectionUser; + +typedef _CustomWriter = Future Function( + QueryExecutor e, String sql, List vars); + +typedef _BatchRunner = FutureOr Function(Batch batch); + +/// Manages a [DatabaseConnection] to send queries to the database. +abstract class DatabaseConnectionUser { + /// The database connection used by this [DatabaseConnectionUser]. + @protected + final DatabaseConnection connection; + + /// The database class that this user is attached to. + @visibleForOverriding + GeneratedDatabase get attachedDatabase; + + /// The type system to use with this database. The type system is responsible + /// for mapping Dart objects into sql expressions and vice-versa. + SqlTypeSystem get typeSystem => connection.typeSystem; + + /// The executor to use when queries are executed. + QueryExecutor get executor => connection.executor; + + /// Manages active streams from select statements. + @visibleForTesting + @protected + StreamQueryStore get streamQueries => connection.streamQueries; + + /// Constructs a database connection user, which is responsible to store query + /// streams, wrap the underlying executor and perform type mapping. + DatabaseConnectionUser(SqlTypeSystem typeSystem, QueryExecutor executor, + {StreamQueryStore? streamQueries}) + : connection = DatabaseConnection( + typeSystem, executor, streamQueries ?? StreamQueryStore()); + + /// Creates another [DatabaseConnectionUser] by referencing the implementation + /// from the [other] user. + DatabaseConnectionUser.delegate(DatabaseConnectionUser other, + {SqlTypeSystem? typeSystem, + QueryExecutor? executor, + StreamQueryStore? streamQueries}) + : connection = DatabaseConnection( + typeSystem ?? other.connection.typeSystem, + executor ?? other.connection.executor, + streamQueries ?? other.connection.streamQueries, + ); + + /// Constructs a [DatabaseConnectionUser] that will use the provided + /// [DatabaseConnection]. + DatabaseConnectionUser.fromConnection(this.connection); + + /// Creates and auto-updating stream from the given select statement. This + /// method should not be used directly. + Stream>> createStream(QueryStreamFetcher stmt) => + streamQueries.registerStream(stmt); + + /// Creates a copy of the table with an alias so that it can be used in the + /// same query more than once. + /// + /// Example which uses the same table (here: points) more than once to + /// differentiate between the start and end point of a route: + /// ``` + /// var source = alias(points, 'source'); + /// var destination = alias(points, 'dest'); + /// + /// select(routes).join([ + /// innerJoin(source, routes.startPoint.equalsExp(source.id)), + /// innerJoin(destination, routes.startPoint.equalsExp(destination.id)), + /// ]); + /// ``` + T alias(TableInfo table, String alias) { + return table.createAlias(alias).asDslTable; + } + + /// A, potentially more specific, database engine based on the [Zone] context. + /// + /// Inside a [transaction] block, moor will replace this [resolvedEngine] with + /// an engine specific to the transaction. All other methods on this class + /// implicitly use the [resolvedEngine] to run their SQL statements. + /// This let's users call methods on their top-level database or dao class + /// but run them in a transaction-specific executor. + @internal + DatabaseConnectionUser get resolvedEngine { + return (Zone.current[_zoneRootUserKey] as DatabaseConnectionUser?) ?? this; + } + + /// Marks the [tables] as updated. + /// + /// In response to calling this method, all streams listening on any of the + /// [tables] will load their data again. + /// + /// Primarily, this method is meant to be used by moor-internal code. Higher- + /// level moor APIs will call this method to dispatch stream updates. + /// Of course, you can also call it yourself to manually dispatch table + /// updates. To obtain a [TableInfo], use the corresponding getter on the + /// database class. + void markTablesUpdated(Iterable tables) { + notifyUpdates( + {for (final table in tables) TableUpdate(table.actualTableName)}, + ); + } + + /// Dispatches the set of [updates] to the stream query manager. + /// + /// This method is more specific than [markTablesUpdated] in the presence of + /// triggers or foreign key constraints. Moor needs to support both when + /// calculating which streams to update. For instance, consider a simple + /// database with two tables (`a` and `b`) and a trigger inserting into `b` + /// after a delete on `a`). + /// Now, an insert on `a` should not update a stream listening on table `b`, + /// but a delete should! This additional information is not available with + /// [markTablesUpdated], so [notifyUpdates] can be used to more efficiently + /// calculate stream updates in some instances. + void notifyUpdates(Set updates) { + final withRulesApplied = attachedDatabase.streamUpdateRules.apply(updates); + resolvedEngine.streamQueries.handleTableUpdates(withRulesApplied); + } + + /// Listen for table updates reported through [notifyUpdates]. + /// + /// By default, this listens to every table update. Table updates are reported + /// as a set of individual updates that happened atomically. + /// An optional filter can be provided in the [query] parameter. When set, + /// only updates matching the query will be reported in the stream. + /// + /// When called inside a transaction, the stream will close when the + /// transaction completes or is rolled back. Otherwise, the stream will + /// complete as the database is closed. + Stream> tableUpdates( + [TableUpdateQuery query = const TableUpdateQuery.any()]) { + // The stream should refer to the transaction active when tableUpdates was + // called, not the one when a listener attaches. + final engine = resolvedEngine; + + // We're wrapping updatesForSync in a stream controller to make it async. + return Stream.multi( + (controller) { + final source = engine.streamQueries.updatesForSync(query); + source.pipe(controller); + }, + isBroadcast: true, + ); + } + + /// Performs the async [fn] after this executor is ready, or directly if it's + /// already ready. + /// + /// Calling this method directly might circumvent the current transaction. For + /// that reason, it should only be called inside moor. + Future doWhenOpened(FutureOr Function(QueryExecutor e) fn) { + return executor.ensureOpen(attachedDatabase).then((_) => fn(executor)); + } + + /// Starts an [InsertStatement] for a given table. You can use that statement + /// to write data into the [table] by using [InsertStatement.insert]. + InsertStatement into(TableInfo table) { + return InsertStatement(resolvedEngine, table); + } + + /// Starts an [UpdateStatement] for the given table. You can use that + /// statement to update individual rows in that table by setting a where + /// clause on that table and then use [UpdateStatement.write]. + UpdateStatement update( + TableInfo table) => + UpdateStatement(resolvedEngine, table); + + /// Starts a query on the given table. + /// + /// In moor, queries are commonly used as a builder by chaining calls on them + /// using the `..` syntax from Dart. For instance, to load the 10 oldest users + /// with an 'S' in their name, you could use: + /// ```dart + /// Future> oldestUsers() { + /// return ( + /// select(users) + /// ..where((u) => u.name.like('%S%')) + /// ..orderBy([(u) => OrderingTerm( + /// expression: u.id, + /// mode: OrderingMode.asc + /// )]) + /// ..limit(10) + /// ).get(); + /// } + /// ``` + /// + /// The [distinct] parameter (defaults to false) can be used to remove + /// duplicate rows from the result set. + /// + /// For more information on queries, see the + /// [documentation](https://moor.simonbinder.eu/docs/getting-started/writing_queries/). + SimpleSelectStatement select( + ResultSetImplementation table, + {bool distinct = false}) { + return SimpleSelectStatement(resolvedEngine, table, + distinct: distinct); + } + + /// Starts a complex statement on [table] that doesn't necessarily use all of + /// [table]'s columns. + /// + /// Unlike [select], which automatically selects all columns of [table], this + /// method is suitable for more advanced queries that can use [table] without + /// using their column. As an example, assuming we have a table `comments` + /// with a `TextColumn content`, this query would report the average length of + /// a comment: + /// ```dart + /// Stream watchAverageCommentLength() { + /// final avgLength = comments.content.length.avg(); + /// final query = selectWithoutResults(comments) + /// ..addColumns([avgLength]); + /// + /// return query.map((row) => row.read(avgLength)).watchSingle(); + /// } + /// ``` + /// + /// While this query reads from `comments`, it doesn't use all of it's columns + /// (in fact, it uses none of them!). This makes it suitable for + /// [selectOnly] instead of [select]. + /// + /// The [distinct] parameter (defaults to false) can be used to remove + /// duplicate rows from the result set. + /// + /// For simple queries, use [select]. + /// + /// See also: + /// - the documentation on [aggregate expressions](https://moor.simonbinder.eu/docs/getting-started/expressions/#aggregate) + /// - the documentation on [group by](https://moor.simonbinder.eu/docs/advanced-features/joins/#group-by) + JoinedSelectStatement selectOnly( + ResultSetImplementation table, { + bool distinct = false, + }) { + return JoinedSelectStatement( + resolvedEngine, table, [], distinct, false); + } + + /// Starts a [DeleteStatement] that can be used to delete rows from a table. + /// + /// See the [documentation](https://moor.simonbinder.eu/docs/getting-started/writing_queries/#updates-and-deletes) + /// for more details and example on how delete statements work. + DeleteStatement delete(TableInfo table) { + return DeleteStatement(resolvedEngine, table); + } + + /// Executes a custom delete or update statement and returns the amount of + /// rows that have been changed. + /// You can use the [updates] parameter so that moor knows which tables are + /// affected by your query. All select streams that depend on a table + /// specified there will then update their data. For more accurate results, + /// you can also set the [updateKind] parameter to [UpdateKind.delete] or + /// [UpdateKind.update]. This is optional, but can improve the accuracy of + /// query updates, especially when using triggers. + Future customUpdate( + String query, { + List variables = const [], + Set? updates, + UpdateKind? updateKind, + }) async { + return _customWrite( + query, + variables, + updates, + updateKind, + (executor, sql, vars) { + return executor.runUpdate(sql, vars); + }, + ); + } + + /// Executes a custom insert statement and returns the last inserted rowid. + /// + /// You can tell moor which tables your query is going to affect by using the + /// [updates] parameter. Query-streams running on any of these tables will + /// then be re-run. + Future customInsert(String query, + {List variables = const [], Set? updates}) { + return _customWrite( + query, + variables, + updates, + UpdateKind.insert, + (executor, sql, vars) { + return executor.runInsert(sql, vars); + }, + ); + } + + /// Runs a `INSERT`, `UPDATE` or `DELETE` statement returning rows. + /// + /// You can use the [updates] parameter so that moor knows which tables are + /// affected by your query. All select streams that depend on a table + /// specified there will then update their data. For more accurate results, + /// you can also set the [updateKind] parameter. + /// This is optional, but can improve the accuracy of query updates, + /// especially when using triggers. + Future> customWriteReturning( + String query, { + List variables = const [], + Set? updates, + UpdateKind? updateKind, + }) { + return _customWrite(query, variables, updates, updateKind, + (executor, sql, vars) async { + final rows = await executor.runSelect(sql, vars); + return [for (final row in rows) QueryRow(row, attachedDatabase)]; + }); + } + + /// Common logic for [customUpdate] and [customInsert] which takes care of + /// mapping the variables, running the query and optionally informing the + /// stream-queries. + Future _customWrite( + String query, + List variables, + Set? updates, + UpdateKind? updateKind, + _CustomWriter writer, + ) async { + final engine = resolvedEngine; + + final ctx = GenerationContext.fromDb(engine); + final mappedArgs = variables.map((v) => v.mapToSimpleValue(ctx)).toList(); + + final result = + await engine.doWhenOpened((e) => writer(e, query, mappedArgs)); + + if (updates != null) { + engine.notifyUpdates({ + for (final table in updates) + TableUpdate(table.actualTableName, kind: updateKind), + }); + } + + return result; + } + + /// Creates a custom select statement from the given sql [query]. To run the + /// query once, use [Selectable.get]. For an auto-updating streams, set the + /// set of tables the ready [readsFrom] and use [Selectable.watch]. If you + /// know the query will never emit more than one row, you can also use + /// `getSingle` and `SelectableUtils.watchSingle` which return the item + /// directly without wrapping it into a list. + /// + /// If you use variables in your query (for instance with "?"), they will be + /// bound to the [variables] you specify on this query. + Selectable customSelect(String query, + {List variables = const [], + Set readsFrom = const {}}) { + return CustomSelectStatement(query, variables, readsFrom, resolvedEngine); + } + + /// Creates a custom select statement from the given sql [query]. To run the + /// query once, use [Selectable.get]. For an auto-updating streams, set the + /// set of tables the ready [readsFrom] and use [Selectable.watch]. If you + /// know the query will never emit more than one row, you can also use + /// `getSingle` and `watchSingle` which return the item directly without + /// wrapping it into a list. + /// + /// If you use variables in your query (for instance with "?"), they will be + /// bound to the [variables] you specify on this query. + @Deprecated('Renamed to customSelect') + Selectable customSelectQuery(String query, + {List variables = const [], + Set readsFrom = const {}}) { + return customSelect(query, variables: variables, readsFrom: readsFrom); + } + + /// Executes the custom sql [statement] on the database. + Future customStatement(String statement, [List? args]) { + final engine = resolvedEngine; + + return engine.doWhenOpened((executor) { + return executor.runCustom(statement, args); + }); + } + + /// Executes [action] in a transaction, which means that all its queries and + /// updates will be called atomically. + /// + /// Returns the value of [action]. + /// When [action] throws an exception, the transaction will be reset and no + /// changes will be applied to the databases. The exception will be rethrown + /// by [transaction]. + /// + /// The behavior of stream queries in transactions depends on where the stream + /// was created: + /// + /// - streams created outside of a [transaction] block: The stream will update + /// with the tables modified in the transaction after it completes + /// successfully. If the transaction fails, the stream will not update. + /// - streams created inside a [transaction] block: The stream will update for + /// each write in the transaction. When the transaction completes, + /// successful or not, streams created in it will close. Writes happening + /// outside of this transaction will not affect the stream. + /// + /// Please note that nested transactions are not supported. Creating another + /// transaction inside a transaction returns the parent transaction. + /// + /// See also: + /// - the docs on [transactions](https://moor.simonbinder.eu/docs/transactions/) + Future transaction(Future Function() action) async { + final resolved = resolvedEngine; + if (resolved is Transaction) { + return action(); + } + + return await resolved.doWhenOpened((executor) { + final transactionExecutor = executor.beginTransaction(); + final transaction = Transaction(this, transactionExecutor); + + return _runConnectionZoned(transaction, () async { + var success = false; + try { + final result = await action(); + success = true; + return result; + } catch (e, s) { + try { + await transactionExecutor.rollback(); + } catch (rollBackException) { + throw CouldNotRollBackException(e, s, rollBackException); + } + + // pass the exception on to the one who called transaction() + rethrow; + } finally { + if (success) { + // complete() will also take care of committing the transaction + await transaction.complete(); + } + await transaction.disposeChildStreams(); + } + }); + }); + } + + /// Runs statements inside a batch. + /// + /// A batch can only run a subset of statements, and those statements must be + /// called on the [Batch] instance. The statements aren't executed with a call + /// to [Batch]. Instead, all generated queries are queued up and are then run + /// and executed atomically in a transaction. + /// If [batch] is called outside of a [transaction] call, it will implicitly + /// start a transaction. Otherwise, the batch will re-use the transaction, + /// and will have an effect when the transaction completes. + /// Typically, running bulk updates (so a lot of similar statements) over a + /// [Batch] is much faster than running them via the [GeneratedDatabase] + /// directly. + /// + /// An example that inserts users in a batch: + /// ```dart + /// await batch((b) { + /// b.insertAll( + /// todos, + /// [ + /// TodosCompanion.insert(content: 'Use batches'), + /// TodosCompanion.insert(content: 'Have fun'), + /// ], + /// ); + /// }); + /// ``` + Future batch(_BatchRunner runInBatch) { + final engine = resolvedEngine; + + final batch = Batch._(engine, engine is! Transaction); + final result = runInBatch(batch); + + if (result is Future) { + return result.then((_) => batch._commit()); + } else { + return batch._commit(); + } + } + + /// Runs [calculation] in a forked [Zone] that has its [resolvedEngine] set + /// to the [user]. + @protected + Future _runConnectionZoned( + DatabaseConnectionUser user, Future Function() calculation) { + return runZoned(calculation, zoneValues: {_zoneRootUserKey: user}); + } + + /// Will be used by generated code to resolve inline Dart components in sql. + @protected + GenerationContext $write(Component component, {bool? hasMultipleTables}) { + final context = GenerationContext.fromDb(this); + if (hasMultipleTables != null) { + context.hasMultipleTables = hasMultipleTables; + } + component.writeInto(context); + + return context; + } + + /// Writes column names and `VALUES` for an insert statement. + /// + /// Used by generated code. + @protected + GenerationContext $writeInsertable(TableInfo table, Insertable insertable) { + final context = GenerationContext.fromDb(this); + + table.validateIntegrity(insertable, isInserting: true); + InsertStatement(this, table) + .writeInsertable(context, insertable.toColumns(true)); + + return context; + } +} diff --git a/drift/lib/src/runtime/api/dao_base.dart b/drift/lib/src/runtime/api/dao_base.dart new file mode 100644 index 00000000..6e62755e --- /dev/null +++ b/drift/lib/src/runtime/api/dao_base.dart @@ -0,0 +1,26 @@ +part of 'runtime_api.dart'; + +/// Class that runs queries to a subset of all available queries in a database. +/// +/// This comes in handy to structure large amounts of database code better: The +/// migration logic can live in the main [GeneratedDatabase] class, but code +/// can be extracted into [DatabaseAccessor]s outside of that database. +/// For details on how to write a dao, see [UseDao]. +/// [T] should be the associated database class you wrote. +abstract class DatabaseAccessor + extends DatabaseConnectionUser { + /// The main database instance for this dao + @override + final T attachedDatabase; + + /// Used internally by moor + DatabaseAccessor(this.attachedDatabase) : super.delegate(attachedDatabase); +} + +/// Extension for generated dao classes to keep the old [db] field that was +/// renamed to [DatabaseAccessor.attachedDatabase] in moor 3.0 +extension OldDbFieldInDatabaseAccessor + on DatabaseAccessor { + /// The generated database that this dao is attached to. + T get db => attachedDatabase; +} diff --git a/drift/lib/src/runtime/api/db_base.dart b/drift/lib/src/runtime/api/db_base.dart new file mode 100644 index 00000000..489f9a22 --- /dev/null +++ b/drift/lib/src/runtime/api/db_base.dart @@ -0,0 +1,136 @@ +part of 'runtime_api.dart'; + +/// Keep track of how many databases have been opened for a given database +/// type. +/// We get a number of error reports of "moor not generating tables" that have +/// their origin in users opening multiple instances of their database. This +/// can cause a race conditions when the second [GeneratedDatabase] is opening a +/// underlying [DatabaseConnection] that is already opened but doesn't have the +/// tables created. +Map _openedDbCount = {}; + +/// A base class for all generated databases. +abstract class GeneratedDatabase extends DatabaseConnectionUser + implements QueryExecutorUser { + @override + GeneratedDatabase get attachedDatabase => this; + + /// Specify the schema version of your database. Whenever you change or add + /// tables, you should bump this field and provide a [migration] strategy. + @override + int get schemaVersion; + + /// Defines the migration strategy that will determine how to deal with an + /// increasing [schemaVersion]. The default value only supports creating the + /// database by creating all tables known in this database. When you have + /// changes in your schema, you'll need a custom migration strategy to create + /// the new tables or change the columns. + MigrationStrategy get migration => MigrationStrategy(); + MigrationStrategy? _cachedMigration; + MigrationStrategy get _resolvedMigration => _cachedMigration ??= migration; + + /// The collection of update rules contains information on how updates on + /// tables result in other updates, for instance due to a trigger. + /// + /// There should be no need to overwrite this field, moor will generate an + /// appropriate implementation automatically. + StreamQueryUpdateRules get streamUpdateRules => + const StreamQueryUpdateRules.none(); + + /// A list of tables specified in this database. + Iterable get allTables; + + /// A list of all [DatabaseSchemaEntity] that are specified in this database. + /// + /// This contains [allTables], but also advanced entities like triggers. + // return allTables for backwards compatibility + Iterable get allSchemaEntities => allTables; + + /// A [Type] can't be sent across isolates. Instances of this class shouldn't + /// be sent over isolates either, so let's keep a reference to a [Type] that + /// definitely prohibits this. + // ignore: unused_field + final Type _$dontSendThisOverIsolates = Null; + + /// Used by generated code + GeneratedDatabase(SqlTypeSystem types, QueryExecutor executor, + {StreamQueryStore? streamStore}) + : super(types, executor, streamQueries: streamStore) { + assert(_handleInstantiated()); + } + + /// Used by generated code to connect to a database that is already open. + GeneratedDatabase.connect(DatabaseConnection connection) + : super.fromConnection(connection) { + assert(_handleInstantiated()); + } + + bool _handleInstantiated() { + if (!_openedDbCount.containsKey(runtimeType) || + driftRuntimeOptions.dontWarnAboutMultipleDatabases) { + _openedDbCount[runtimeType] = 1; + return true; + } + + final count = + _openedDbCount[runtimeType] = _openedDbCount[runtimeType]! + 1; + if (count > 1) { + driftRuntimeOptions.debugPrint( + 'WARNING (moor): It looks like you\'ve created the database class ' + '$runtimeType multiple times. When these two databases use the same ' + 'QueryExecutor, race conditions will occur and might corrupt the ' + 'database. \n' + 'Try to follow the advice at https://moor.simonbinder.eu/faq/#using-the-database ' + 'or, if you know what you\'re doing, set ' + 'moorRuntimeOptions.dontWarnAboutMultipleDatabases = true\n' + 'Here is the stacktrace from when the database was opened a second ' + 'time:\n${StackTrace.current}\n' + 'This warning will only appear on debug builds.', + ); + } + + return true; + } + + /// Creates a [Migrator] with the provided query executor. Migrators generate + /// sql statements to create or drop tables. + /// + /// This api is mainly used internally in moor, especially to implement the + /// [beforeOpen] callback from the database site. + /// However, it can also be used if you need to create tables manually and + /// outside of a [MigrationStrategy]. For almost all use cases, overriding + /// [migration] should suffice. + @protected + @visibleForTesting + Migrator createMigrator() => Migrator(this); + + @override + @nonVirtual + Future beforeOpen(QueryExecutor executor, OpeningDetails details) { + return _runConnectionZoned(BeforeOpenRunner(this, executor), () async { + if (details.wasCreated) { + final migrator = createMigrator(); + await _resolvedMigration.onCreate(migrator); + } else if (details.hadUpgrade) { + final migrator = createMigrator(); + await _resolvedMigration.onUpgrade( + migrator, details.versionBefore!, details.versionNow); + } + + await _resolvedMigration.beforeOpen?.call(details); + }); + } + + /// Closes this database and releases associated resources. + Future close() async { + await streamQueries.close(); + await executor.close(); + + assert(() { + if (_openedDbCount[runtimeType] != null) { + _openedDbCount[runtimeType] = _openedDbCount[runtimeType]! - 1; + } + return true; + }()); + } +} diff --git a/drift/lib/src/runtime/api/runtime_api.dart b/drift/lib/src/runtime/api/runtime_api.dart new file mode 100644 index 00000000..e5832bea --- /dev/null +++ b/drift/lib/src/runtime/api/runtime_api.dart @@ -0,0 +1,36 @@ +import 'dart:async'; + +import 'package:drift/drift.dart'; +import 'package:drift/src/runtime/executor/delayed_stream_queries.dart'; +import 'package:drift/src/runtime/executor/stream_queries.dart'; +import 'package:drift/src/runtime/executor/transactions.dart'; +import 'package:meta/meta.dart'; + +part 'batch.dart'; +part 'connection.dart'; +part 'connection_user.dart'; +part 'dao_base.dart'; +part 'db_base.dart'; +part 'stream_updates.dart'; + +/// Defines additional runtime behavior for moor. Changing the fields of this +/// class is rarely necessary. +class DriftRuntimeOptions { + /// Don't warn when a database class isn't used as singleton. + bool dontWarnAboutMultipleDatabases = false; + + /// The [ValueSerializer] that will be used by default in [DataClass.toJson]. + ValueSerializer defaultSerializer = const ValueSerializer.defaults(); + + /// The function used by moor to emit debug prints. + /// + /// This is the function used with `logStatements: true` on databases and + /// `debugLog` on isolates. + void Function(String) debugPrint = print; +} + +/// Stores the [DriftRuntimeOptions] describing global drift behavior across +/// databases. +/// +/// Note that is is adapting this behavior is rarely needed. +DriftRuntimeOptions driftRuntimeOptions = DriftRuntimeOptions(); diff --git a/drift/lib/src/runtime/api/stream_updates.dart b/drift/lib/src/runtime/api/stream_updates.dart new file mode 100644 index 00000000..cd9c4319 --- /dev/null +++ b/drift/lib/src/runtime/api/stream_updates.dart @@ -0,0 +1,157 @@ +part of 'runtime_api.dart'; + +/// Collects a set of [UpdateRule]s which can be used to express how a set of +/// direct updates to a table affects other updates. +/// +/// This is used to implement query streams in databases that have triggers. +class StreamQueryUpdateRules { + /// All rules active in a database. + final List rules; + + /// Creates a [StreamQueryUpdateRules] from the underlying [rules]. + const StreamQueryUpdateRules(this.rules); + + /// The default implementation, which doesn't have any rules. + const StreamQueryUpdateRules.none() : this(const []); + + /// Obtain a set of all tables that might be affected by direct updates in + /// [input]. + Set apply(Iterable input) { + // Most users don't have any update rules, and this check is much faster + // than crawling through all updates. + if (rules.isEmpty) return input.toSet(); + + final pending = List.of(input); + final seen = {}; + while (pending.isNotEmpty) { + final update = pending.removeLast(); + seen.add(update); + + for (final rule in rules) { + if (rule is WritePropagation && rule.on.matches(update)) { + pending.addAll(rule.result.where((u) => !seen.contains(u))); + } + } + } + + return seen; + } +} + +/// A common rule that describes how a [TableUpdate] has other [TableUpdate]s. +/// +/// Users should not extend or implement this class. +abstract class UpdateRule { + /// Common const constructor so that subclasses can be const. + const UpdateRule._(); +} + +/// An [UpdateRule] for triggers that exist in a database. +/// +/// An update on [on] implicitly triggers updates on [result]. +/// +/// This class is for use by generated or moor-internal code only. It does not +/// adhere to Semantic Versioning and should not be used manually. +class WritePropagation extends UpdateRule { + /// The updates that cause further writes in [result]. + final TableUpdateQuery on; + + /// All updates that will be performed by the trigger listening on [on]. + final List result; + + /// Default constructor. See [WritePropagation] for details. + const WritePropagation({required this.on, required this.result}) : super._(); +} + +/// Classifies a [TableUpdate] by what kind of write happened - an insert, an +/// update or a delete operation. +enum UpdateKind { + /// An insert statement ran on the affected table. + insert, + + /// An update statement ran on the affected table. + update, + + /// A delete statement ran on the affected table. + delete +} + +/// Contains information on how a table was updated, which can be used to find +/// queries that are affected by this. +class TableUpdate { + /// What kind of update was applied to the [table]. + /// + /// Can be null, which indicates that the update is not known. + final UpdateKind? kind; + + /// Name of the table that was updated. + final String table; + + /// Default constant constructor. + const TableUpdate(this.table, {this.kind}); + + /// Creates a [TableUpdate] instance based on a [TableInfo] instead of the raw + /// name. + factory TableUpdate.onTable(TableInfo table, {UpdateKind? kind}) { + return TableUpdate(table.actualTableName, kind: kind); + } + + @override + int get hashCode => Object.hash(kind, table); + + @override + bool operator ==(Object other) { + return other is TableUpdate && other.kind == kind && other.table == table; + } + + @override + String toString() { + return 'TableUpdate($table, kind: $kind)'; + } +} + +/// A table update query describes information to listen for [TableUpdate]s. +/// +/// Users should not extend implement this class. +abstract class TableUpdateQuery { + /// Default const constructor so that subclasses can have constant + /// constructors. + const TableUpdateQuery(); + + /// A query that listens for all table updates in a database. + const factory TableUpdateQuery.any() = AnyUpdateQuery; + + /// A query that listens for all updates that match any query in [queries]. + const factory TableUpdateQuery.allOf(List queries) = + MultipleUpdateQuery; + + /// A query that listens for all updates on a specific [table] by its name. + /// + /// The optional [limitUpdateKind] parameter can be used to limit the updates + /// to a certain kind. + const factory TableUpdateQuery.onTableName(String table, + {UpdateKind? limitUpdateKind}) = SpecificUpdateQuery; + + /// A query that listens for all updates on a specific [table]. + /// + /// The optional [limitUpdateKind] parameter can be used to limit the updates + /// to a certain kind. + factory TableUpdateQuery.onTable(ResultSetImplementation table, + {UpdateKind? limitUpdateKind}) { + return TableUpdateQuery.onTableName( + table.entityName, + limitUpdateKind: limitUpdateKind, + ); + } + + /// A query that listens for any change on any table in [tables]. + factory TableUpdateQuery.onAllTables( + Iterable tables) { + return TableUpdateQuery.allOf( + [for (final table in tables) TableUpdateQuery.onTable(table)], + ); + } + + /// Determines whether the [update] would be picked up by this query. + bool matches(TableUpdate update); +} diff --git a/drift/lib/src/runtime/cancellation_zone.dart b/drift/lib/src/runtime/cancellation_zone.dart new file mode 100644 index 00000000..37172df9 --- /dev/null +++ b/drift/lib/src/runtime/cancellation_zone.dart @@ -0,0 +1,98 @@ +import 'dart:async'; + +import 'package:meta/meta.dart'; + +const _key = #moor.runtime.cancellation; + +/// Runs an asynchronous operation with support for cancellations. +/// +/// The [CancellationToken] can be used to cancel the operation and to get the +/// eventual result. +CancellationToken runCancellable( + Future Function() operation, +) { + final token = CancellationToken(); + runZonedGuarded( + () => operation().then(token._resultCompleter.complete), + token._resultCompleter.completeError, + zoneValues: {_key: token}, + ); + + return token; +} + +/// A token that can be used to cancel an asynchronous operation running in a +/// child zone. +@internal +class CancellationToken { + final Completer _resultCompleter = Completer(); + final List _cancellationCallbacks = []; + bool _cancellationRequested = false; + + /// Loads the result for the cancellable operation. + /// + /// When a cancellation has been requested and was honored, the future will + /// complete with a [CancellationException]. + Future get result => _resultCompleter.future; + + /// Requests the inner asynchronous operation to be cancelled. + void cancel() { + if (_cancellationRequested) return; + + for (final callback in _cancellationCallbacks) { + callback(); + } + _cancellationRequested = true; + } +} + +/// Extensions that can be used on cancellable operations if they return a non- +/// nullable value. +extension NonNullableCancellationExtension + on CancellationToken { + /// Wait for the result, or return `null` if the operation was cancelled. + /// + /// To avoid situations where `null` could be a valid result from an async + /// operation, this getter is only available on non-nullable operations. This + /// avoids ambiguity. + /// + /// The future will still complete with an error if anything but a + /// [CancellationException] is thrown in [result]. + Future get resultOrNullIfCancelled async { + try { + return await result; + } on CancellationException { + return null; + } + } +} + +/// Thrown inside a cancellation zone when it has been cancelled. +@internal +class CancellationException implements Exception { + /// Default const constructor + const CancellationException(); + + @override + String toString() { + return 'Operation was cancelled'; + } +} + +/// Checks whether the active zone is a cancellation zone that has been +/// cancelled. If it is, a [CancellationException] will be thrown. +void checkIfCancelled() { + final token = Zone.current[_key]; + if (token is CancellationToken && token._cancellationRequested) { + throw const CancellationException(); + } +} + +/// Requests the [callback] to be invoked when the enclosing asynchronous +/// operation is cancelled. +void doOnCancellation(void Function() callback) { + final token = Zone.current[_key]; + if (token is CancellationToken) { + token._cancellationCallbacks.add(callback); + } +} diff --git a/drift/lib/src/runtime/custom_result_set.dart b/drift/lib/src/runtime/custom_result_set.dart new file mode 100644 index 00000000..05e51b65 --- /dev/null +++ b/drift/lib/src/runtime/custom_result_set.dart @@ -0,0 +1,10 @@ +import 'package:drift/drift.dart'; + +/// Base class for classes generated by custom queries in `.drift` files. +abstract class CustomResultSet { + /// The raw [QueryRow] from where this result set was extracted. + final QueryRow row; + + /// Default constructor. + CustomResultSet(this.row); +} diff --git a/drift/lib/src/runtime/data_class.dart b/drift/lib/src/runtime/data_class.dart new file mode 100644 index 00000000..6ad76a5b --- /dev/null +++ b/drift/lib/src/runtime/data_class.dart @@ -0,0 +1,239 @@ +import 'dart:convert'; + +import 'package:collection/collection.dart'; +import 'package:drift/drift.dart'; +import 'package:meta/meta.dart'; + +/// Common interface for objects which can be inserted or updated into a +/// database. +/// [D] is the associated data class. +@optionalTypeArgs +abstract class Insertable { + /// Converts this object into a map of column names to expressions to insert + /// or update. + /// + /// Note that the keys in the map are the raw column names, they're not + /// escaped. + /// + /// The [nullToAbsent] can be used on [DataClass]es to control whether null + /// fields should be set to a null constant in sql or absent from the map. + /// Other implementations ignore that [nullToAbsent], it mainly exists for + /// legacy reasons. + Map toColumns(bool nullToAbsent); +} + +/// A common supertype for all data classes generated by moor. Data classes are +/// immutable structures that represent a single row in a database table. +abstract class DataClass { + /// Constant constructor so that generated data classes can be constant. + const DataClass(); + + /// Converts this object into a representation that can be encoded with + /// [json]. The [serializer] can be used to configure how individual values + /// will be encoded. By default, [DriftRuntimeOptions.defaultSerializer] will + /// be used. See [ValueSerializer.defaults] for details. + Map toJson({ValueSerializer? serializer}); + + /// Converts this object into a json representation. The [serializer] can be + /// used to configure how individual values will be encoded. By default, + /// [DriftRuntimeOptions.defaultSerializer] will be used. See + /// [ValueSerializer.defaults] for details. + String toJsonString({ValueSerializer? serializer}) { + return json.encode(toJson(serializer: serializer)); + } + + /// Used internally be generated code + @protected + static dynamic parseJson(String jsonString) { + return json.decode(jsonString); + } +} + +/// An update companion for a [DataClass] which is used to write data into a +/// database using [InsertStatement.insert] or [UpdateStatement.write]. +/// +/// [D] is the associated data class for this companion. +/// +/// See also: +/// - the explanation in the changelog for 1.5 +/// - https://github.com/simolus3/moor/issues/25 +abstract class UpdateCompanion implements Insertable { + /// Constant constructor so that generated companion classes can be constant. + const UpdateCompanion(); + + static const _mapEquality = MapEquality(); + + @override + int get hashCode { + return _mapEquality.hash(toColumns(false)); + } + + @override + bool operator ==(Object other) { + if (identical(this, other)) return true; + if (other is! UpdateCompanion) return false; + + return _mapEquality.equals(other.toColumns(false), toColumns(false)); + } +} + +/// An [Insertable] implementation based on raw column expressions. +/// +/// Mostly used in generated code. +class RawValuesInsertable implements Insertable { + /// A map from column names to a value that should be inserted or updated. + /// + /// See also: + /// - [toColumns], which returns [data] in a [RawValuesInsertable] + final Map data; + + /// Creates a [RawValuesInsertable] based on the [data] to insert or update. + const RawValuesInsertable(this.data); + + @override + Map toColumns(bool nullToAbsent) => data; + + @override + String toString() { + return 'RawValuesInsertable($data)'; + } +} + +/// A wrapper around arbitrary data [T] to indicate presence or absence +/// explicitly. +/// +/// [Value]s are commonly used in companions to distringuish between `null` and +/// absent values. +/// For instance, consider a table with a nullable column with a non-nullable +/// default value: +/// +/// ```sql +/// CREATE TABLE orders ( +/// priority INT DEFAULT 1 -- may be null if there's no assigned priority +/// ); +/// +/// For inserts in Dart, there are three different scenarios for the `priority` +/// column: +/// +/// - It may be set to `null`, overriding the default value +/// - It may be absent, meaning that the default value should be used +/// - It may be set to an `int` to override the default value +/// ``` +/// +/// As you can see, a simple `int?` does not provide enough information to +/// distinguish between the three cases. A `null` value could mean that the +/// column is absent, or that it should explicitly be set to `null`. +/// For this reason, moor introduces the [Value] wrapper to make the distinction +/// explicit. +class Value { + /// Whether this [Value] wrapper contains a present [value] that should be + /// inserted or updated. + final bool present; + + final T? _value; + + /// If this value is [present], contains the value to update or insert. + T get value => _value as T; + + /// Create a (present) value by wrapping the [value] provided. + const Value(T value) + : _value = value, + present = true; + + /// Create an absent value that will not be written into the database, the + /// default value or null will be used instead. + const Value.absent() + : _value = null, + present = false; + + /// Create a value that is absent if [value] is `null` and [present] if it's + /// not. + /// + /// The functionality is equiavalent to the following: + /// `x != null ? Value(x) : Value.absent()`. + /// + /// This constructor should only be used when [T] is not nullable. If [T] were + /// nullable, there wouldn't be a clear interpretation for a `null` [value]. + /// See the overall documentation on [Value] for details. + const Value.ofNullable(T? value) + : assert( + value != null || null is! T, + "Value.absentIfNull(null) can't be used for a nullable T, since the " + 'null value could be both absent and present.', + ), + _value = value, + present = value != null; + + @override + String toString() => present ? 'Value($value)' : 'Value.absent()'; + + @override + bool operator ==(Object other) => + identical(this, other) || + other is Value && present == other.present && _value == other._value; + + @override + int get hashCode => present.hashCode ^ _value.hashCode; +} + +/// Serializer responsible for mapping atomic types from and to json. +abstract class ValueSerializer { + /// Constant super-constructor to allow constant child classes. + const ValueSerializer(); + + /// The builtin default serializer. + /// + /// This serializer won't transform numbers or strings. Date times will be + /// encoded as a unix-timestamp. + /// + /// To override the default serializer moor uses, you can change the + /// [DriftRuntimeOptions.defaultSerializer] field. + const factory ValueSerializer.defaults() = _DefaultValueSerializer; + + /// Converts the [value] to something that can be passed to + /// [JsonCodec.encode]. + dynamic toJson(T value); + + /// Inverse of [toJson]: Converts a value obtained from [JsonCodec.decode] + /// into a value that can be hold by data classes. + T fromJson(dynamic json); +} + +class _DefaultValueSerializer extends ValueSerializer { + const _DefaultValueSerializer(); + + @override + T fromJson(dynamic json) { + if (json == null) { + return null as T; + } + + final _typeList = []; + + if (_typeList is List) { + return DateTime.fromMillisecondsSinceEpoch(json as int) as T; + } + + if (_typeList is List && json is int) { + return json.toDouble() as T; + } + + // blobs are encoded as a regular json array, so we manually convert that to + // a Uint8List + if (_typeList is List && json is! Uint8List) { + final asList = (json as List).cast(); + return Uint8List.fromList(asList) as T; + } + + return json as T; + } + + @override + dynamic toJson(T value) { + if (value is DateTime) { + return value.millisecondsSinceEpoch; + } + + return value; + } +} diff --git a/drift/lib/src/runtime/data_verification.dart b/drift/lib/src/runtime/data_verification.dart new file mode 100644 index 00000000..2e850022 --- /dev/null +++ b/drift/lib/src/runtime/data_verification.dart @@ -0,0 +1,76 @@ +import 'package:drift/drift.dart'; + +/// Additional information that is passed to [GeneratedColumn]s when verifying +/// data to provide more helpful error messages. +class VerificationMeta { + /// The dart getter name of the property being validated. + final String dartGetterName; + + /// Used internally by moor + const VerificationMeta(this.dartGetterName); +} + +/// Returned by [GeneratedColumn.isAcceptableValue] to provide a description +/// when a valid is invalid. +class VerificationResult { + /// Whether data for a column passed Dart-side integrity checks + final bool success; + + /// If not [success]-ful, contains a human readable description of what went + /// wrong. + final String? message; + + /// Used internally by moor + const VerificationResult(this.success, this.message); + + /// Used internally by moor + const VerificationResult.success() + : success = true, + message = null; + + /// Used internally by moor + const VerificationResult.failure(this.message) : success = false; +} + +/// Used internally by moor for integrity checks. +class VerificationContext { + final Map _errors; + + /// Used internally by moor + bool get dataValid => _errors.isEmpty; + + /// Creates a verification context, which stores the individual integrity + /// check results. Used by generated code. + VerificationContext() : _errors = {}; + + /// Constructs a verification context that can't be used to report errors. + /// This is used internally by moor if integrity checks have been disabled. + const VerificationContext.notEnabled() : _errors = const {}; + + /// Used internally by moor when inserting + void handle(VerificationMeta meta, VerificationResult result) { + if (!result.success) { + _errors[meta] = result; + } + } + + /// Used internally by moor + void missing(VerificationMeta meta) { + _errors[meta] = const VerificationResult.failure( + "This value was required, but isn't present"); + } + + /// Used internally by moor + void throwIfInvalid(dynamic dataObject) { + if (dataValid) return; + + final messageBuilder = + StringBuffer('Sorry, $dataObject cannot be used for that because: \n'); + + _errors.forEach((meta, result) { + messageBuilder.write('• ${meta.dartGetterName}: ${result.message}\n'); + }); + + throw InvalidDataException(messageBuilder.toString(), _errors); + } +} diff --git a/drift/lib/src/runtime/exceptions.dart b/drift/lib/src/runtime/exceptions.dart new file mode 100644 index 00000000..6c6bfb59 --- /dev/null +++ b/drift/lib/src/runtime/exceptions.dart @@ -0,0 +1,75 @@ +import 'data_verification.dart'; + +/// Thrown when one attempts to insert or update invalid data into a table. +class InvalidDataException implements Exception { + /// A message explaining why the data couldn't be inserted into the database. + final String message; + + /// All errors that were found in this [InvalidDataException]. + final Map errors; + + /// Construct a new [InvalidDataException] from the [message]. + InvalidDataException(this.message, [this.errors = const {}]); + + @override + String toString() { + return 'InvalidDataException: $message'; + } +} + +/// A wrapper class for internal exceptions thrown by the underlying database +/// engine when moor can give additional context or help. +/// +/// For instance, when we know that an invalid statement has been constructed, +/// we catch the database exception and try to explain why that has happened. +class MoorWrappedException implements Exception { + /// Contains a possible description of why the underlying [cause] occurred, + /// for instance because a moor api was misused. + final String message; + + /// The underlying exception caught by moor + final Object? cause; + + /// The original stacktrace when caught by moor + final StackTrace? trace; + + /// Creates a new [MoorWrappedException] to provide additional details about + /// an underlying error from the database. + MoorWrappedException({required this.message, this.cause, this.trace}); + + @override + String toString() { + return '$cause at \n$trace\n' + 'Moor detected a possible cause for this: $message'; + } +} + +/// Exception thrown by moor when rolling back a transaction fails. +/// +/// When using a `transaction` block, transactions are automatically rolled back +/// when the inner block throws an exception. +/// If sending the `ROLLBACK TRANSACTION` command fails as well, moor reports +/// both that and the initial error with a [CouldNotRollBackException]. +class CouldNotRollBackException implements Exception { + /// The original exception that caused the transaction to be rolled back. + final Object cause; + + /// The [StackTrace] of the original [cause]. + final StackTrace originalStackTrace; + + /// The exception thrown by the database implementation when attempting to + /// issue the `ROLLBACK` command.s + final Object exception; + + /// Creates a [CouldNotRollBackException] from the [cause], its + /// [originalStackTrace] and the [exception]. + CouldNotRollBackException( + this.cause, this.originalStackTrace, this.exception); + + @override + String toString() { + return 'CouldNotRollBackException: $exception. \n' + 'For context: The transaction was rolled back because of $cause, which ' + 'was thrown here: \n$originalStackTrace'; + } +} diff --git a/drift/lib/src/runtime/executor/connection_pool.dart b/drift/lib/src/runtime/executor/connection_pool.dart new file mode 100644 index 00000000..fd6477db --- /dev/null +++ b/drift/lib/src/runtime/executor/connection_pool.dart @@ -0,0 +1,91 @@ +import 'package:drift/backends.dart'; +import 'package:drift/drift.dart'; + +/// A query executor for moor that delegates work to multiple executors. +abstract class MultiExecutor extends QueryExecutor { + /// Creates a query executor that will delegate work to different executors. + /// + /// Updating statements, or statements that run in a transaction, will be run + /// with [write]. Select statements outside of a transaction are executed on + /// [read]. + factory MultiExecutor( + {required QueryExecutor read, required QueryExecutor write}) { + return _MultiExecutorImpl(read, write); + } + + MultiExecutor._(); +} + +class _MultiExecutorImpl extends MultiExecutor { + final QueryExecutor _reads; + final QueryExecutor _writes; + + _MultiExecutorImpl(this._reads, this._writes) : super._(); + + @override + Future ensureOpen(QueryExecutorUser user) async { + // note: It's crucial that we open the writes first. The reading connection + // doesn't run migrations, but has to set the user version. + await _writes.ensureOpen(user); + await _reads.ensureOpen(_NoMigrationsWrapper(user)); + + return true; + } + + @override + TransactionExecutor beginTransaction() { + return _writes.beginTransaction(); + } + + @override + Future runBatched(BatchedStatements statements) async { + await _writes.runBatched(statements); + } + + @override + Future runCustom(String statement, [List? args]) async { + await _writes.runCustom(statement, args); + } + + @override + Future runDelete(String statement, List args) async { + return await _writes.runDelete(statement, args); + } + + @override + Future runInsert(String statement, List args) async { + return await _writes.runInsert(statement, args); + } + + @override + Future>> runSelect( + String statement, List args) async { + return await _reads.runSelect(statement, args); + } + + @override + Future runUpdate(String statement, List args) async { + return await _writes.runUpdate(statement, args); + } + + @override + Future close() async { + await _writes.close(); + await _reads.close(); + } +} + +class _NoMigrationsWrapper extends QueryExecutorUser { + final QueryExecutorUser inner; + + _NoMigrationsWrapper(this.inner); + + @override + int get schemaVersion => inner.schemaVersion; + + @override + Future beforeOpen( + QueryExecutor executor, OpeningDetails details) async { + // don't run any migrations + } +} diff --git a/drift/lib/src/runtime/executor/delayed_stream_queries.dart b/drift/lib/src/runtime/executor/delayed_stream_queries.dart new file mode 100644 index 00000000..1aac364c --- /dev/null +++ b/drift/lib/src/runtime/executor/delayed_stream_queries.dart @@ -0,0 +1,53 @@ +import 'package:drift/src/runtime/api/runtime_api.dart'; +import 'package:meta/meta.dart'; + +import 'stream_queries.dart'; + +/// Version of [StreamQueryStore] that delegates work to an asynchronously- +/// available delegate. +/// This class is internal and should not be exposed to moor users. It's used +/// through a delayed database connection. +@internal +class DelayedStreamQueryStore implements StreamQueryStore { + late Future _delegate; + StreamQueryStore? _resolved; + + /// Creates a [StreamQueryStore] that will work after [delegate] is + /// available. + DelayedStreamQueryStore(Future delegate) { + _delegate = delegate.then((value) => _resolved = value); + } + + @override + Future close() async => (await _delegate).close(); + + @override + void handleTableUpdates(Set updates) { + _resolved?.handleTableUpdates(updates); + } + + @override + void markAsClosed(QueryStream stream, Function() whenRemoved) { + throw UnimplementedError('The stream will call this on the delegate'); + } + + @override + void markAsOpened(QueryStream stream) { + throw UnimplementedError('The stream will call this on the delegate'); + } + + @override + Stream>> registerStream( + QueryStreamFetcher fetcher) { + return Stream.fromFuture(_delegate) + .asyncExpand((resolved) => resolved.registerStream(fetcher)) + .asBroadcastStream(); + } + + @override + Stream> updatesForSync(TableUpdateQuery query) { + return Stream.fromFuture(_delegate) + .asyncExpand((resolved) => resolved.updatesForSync(query)) + .asBroadcastStream(); + } +} diff --git a/drift/lib/src/runtime/executor/executor.dart b/drift/lib/src/runtime/executor/executor.dart new file mode 100644 index 00000000..03076be9 --- /dev/null +++ b/drift/lib/src/runtime/executor/executor.dart @@ -0,0 +1,161 @@ +import 'dart:async'; + +import 'package:collection/collection.dart'; +import 'package:drift/backends.dart'; +import 'package:drift/drift.dart' show OpeningDetails; + +/// A query executor is responsible for executing statements on a database and +/// return their results in a raw form. +/// +/// This is an internal api of moor, which can break often. If you want to +/// implement custom database backends, consider using the new `backends` API. +/// The [moor_flutter implementation](https://github.com/simolus3/moor/blob/develop/moor_flutter/lib/moor_flutter.dart) +/// might be useful as a reference. If you want to write your own database +/// engine to use with moor and run into issues, please consider creating an +/// issue. +abstract class QueryExecutor { + /// The [SqlDialect] to use for this database engine. + SqlDialect get dialect => SqlDialect.sqlite; + + /// Opens the executor, if it has not yet been opened. + Future ensureOpen(QueryExecutorUser user); + + /// Runs a select statement with the given variables and returns the raw + /// results. + Future>> runSelect( + String statement, List args); + + /// Runs an insert statement with the given variables. Returns the row id or + /// the auto_increment id of the inserted row. + Future runInsert(String statement, List args); + + /// Runs an update statement with the given variables and returns how many + /// rows where affected. + Future runUpdate(String statement, List args); + + /// Runs an delete statement and returns how many rows where affected. + Future runDelete(String statement, List args); + + /// Runs a custom SQL statement without any variables. The result of that + /// statement will be ignored. + Future runCustom(String statement, [List? args]); + + /// Prepares and runs [statements]. + /// + /// Running them doesn't need to happen in a transaction. When using moor's + /// batch api, moor will call this method from a transaction either way. This + /// method mainly exists to save duplicate parsing costs, allowing each + /// statement to be prepared only once. + Future runBatched(BatchedStatements statements); + + /// Starts a [TransactionExecutor]. + TransactionExecutor beginTransaction(); + + /// Closes this database connection and releases all resources associated with + /// it. Implementations should also handle [close] calls in a state where the + /// database isn't open. + Future close() async { + // no-op per default for backwards compatibility + } +} + +/// Callbacks passed to [QueryExecutor.ensureOpen] to run schema migrations when +/// the database is first opened. +abstract class QueryExecutorUser { + /// The schema version to set on the database when it's opened. + int get schemaVersion; + + /// A callbacks that runs after the database connection has been established, + /// but before any other query is sent. + /// + /// The query executor will wait for this future to complete before running + /// any other query. Queries running on the [executor] are an exception to + /// this, they can be used to run migrations. + /// No matter how often [QueryExecutor.ensureOpen] is called, this method will + /// not be called more than once. + Future beforeOpen(QueryExecutor executor, OpeningDetails details); +} + +const _equality = ListEquality(); + +/// Stores information needed to run batched statements in the order they were +/// issued without preparing statements multiple times. +class BatchedStatements { + /// All sql statements that need to be prepared. + /// + /// A statement might run multiple times with different arguments. + final List statements; + + /// Stores which sql statement should be run with what arguments. + final List arguments; + + /// Creates a collection of batched statements by splitting the sql and the + /// bound arguments. + BatchedStatements(this.statements, this.arguments); + + @override + int get hashCode { + return Object.hash(_equality.hash(statements), _equality.hash(arguments)); + } + + @override + bool operator ==(Object other) { + return other is BatchedStatements && + _equality.equals(other.statements, statements) && + _equality.equals(other.arguments, arguments); + } + + @override + String toString() { + return 'BatchedStatements($statements, $arguments)'; + } +} + +/// Instruction to run a batched sql statement with the arguments provided. +class ArgumentsForBatchedStatement { + /// Index of the sql statement in the [BatchedStatements.statements] of the + /// [BatchedStatements] containing this argument set. + final int statementIndex; + + /// Bound arguments for the referenced statement. + final List arguments; + + /// Used internally by moor. + ArgumentsForBatchedStatement(this.statementIndex, this.arguments); + + @override + int get hashCode { + return Object.hash(statementIndex, _equality); + } + + @override + bool operator ==(Object other) { + return other is ArgumentsForBatchedStatement && + other.statementIndex == statementIndex && + _equality.equals(other.arguments, arguments); + } + + @override + String toString() { + return 'ArgumentsForBatchedStatement($statementIndex, $arguments)'; + } +} + +/// A [QueryExecutor] that runs multiple queries atomically. +abstract class TransactionExecutor extends QueryExecutor { + /// Completes the transaction. No further queries may be sent to to this + /// [QueryExecutor] after this method was called. + /// + /// This may be called before [ensureOpen] was awaited, implementations must + /// support this. That state implies that no query was sent, so it should be + /// a no-op. + Future send(); + + /// Cancels this transaction. No further queries may be sent ot this + /// [QueryExecutor] after this method was called. + /// + /// This may be called before [ensureOpen] was awaited, implementations must + /// support this. That state implies that no query was sent, so it should be + /// a no-op. + Future rollback(); +} diff --git a/drift/lib/src/runtime/executor/helpers/delegates.dart b/drift/lib/src/runtime/executor/helpers/delegates.dart new file mode 100644 index 00000000..31421f43 --- /dev/null +++ b/drift/lib/src/runtime/executor/helpers/delegates.dart @@ -0,0 +1,196 @@ +import 'dart:async' show FutureOr; +import 'dart:typed_data' show Uint8List; + +import 'package:drift/drift.dart'; +import 'package:drift/src/runtime/executor/helpers/results.dart'; + +/// An interface that supports sending database queries. Used as a backend for +/// drift. +/// +/// Database implementations should support the following types both for +/// variables and result sets: +/// - [int] +/// - [double] +/// - [String] +/// - [Uint8List] +abstract class DatabaseDelegate extends QueryDelegate { + /// Whether the database managed by this delegate is in a transaction at the + /// moment. This field is only set when the [transactionDelegate] is a + /// [NoTransactionDelegate], because in that case transactions are run on + /// this delegate. + bool isInTransaction = false; + + /// Returns an appropriate class to resolve the current schema version in + /// this database. + /// + /// Common implementations will be: + /// - [NoVersionDelegate] for databases without a schema version (such as an + /// MySql server we connect to) + /// - [OnOpenVersionDelegate] for databases whose schema version can only be + /// set while opening it (such as sqflite) + /// - [DynamicVersionDelegate] for databases where moor can set the schema + /// version at any time (used for the web and VM implementation) + DbVersionDelegate get versionDelegate; + + /// The way this database engine starts transactions. + TransactionDelegate get transactionDelegate; + + /// A future that completes with `true` when this database is open and with + /// `false` when its not. The future may never complete with an error or with + /// null. It should return relatively quickly, as moor queries it before each + /// statement it sends to the database. + FutureOr get isOpen; + + /// Opens the database. Moor will only call this when [isOpen] has returned + /// false before. Further, moor will not attempt to open a database multiple + /// times, so you don't have to worry about a connection being created + /// multiple times. + /// + /// The [QueryExecutorUser] is the user-defined database annotated with + /// [UseMoor]. It might be useful to read the + /// [QueryExecutorUser.schemaVersion] if that information is required while + /// opening the database. + Future open(QueryExecutorUser db); + + /// Closes this database. When the future completes, all resources used + /// by this database should have been disposed. + Future close() async { + // default no-op implementation + } + + /// Callback from moor after the database has been fully opened and all + /// migrations ran. + void notifyDatabaseOpened(OpeningDetails details) { + // default no-op + } + + /// The [SqlDialect] understood by this database engine. + SqlDialect get dialect => SqlDialect.sqlite; +} + +/// An interface which can execute sql statements. +abstract class QueryDelegate { + /// Prepares and executes the [statement], binding the variables to [args]. + /// Its safe to assume that the [statement] is a select statement, the + /// [QueryResult] that it returns should be returned from here. + /// + /// If the statement can't be executed, an exception should be thrown. See + /// the class documentation of [DatabaseDelegate] on what types are supported. + Future runSelect(String statement, List args); + + /// Prepares and executes the [statement] with the variables bound to [args]. + /// The statement will either be an `UPDATE` or `DELETE` statement. + /// + /// If the statement completes successfully, the amount of changed rows should + /// be returned, or `0` if no rows where updated. Should throw if the + /// statement can't be executed. + Future runUpdate(String statement, List args); + + /// Prepares and executes the [statement] with the variables bound to [args]. + /// The statement will be an `INSERT` statement. + /// + /// If the statement completes successfully, the insert id of the row can be + /// returned. If that information is not available, `null` can be returned. + /// The method should throw if the statement can't be executed. + Future runInsert(String statement, List args); + + /// Runs a custom [statement] with the given [args]. Ignores all results, but + /// throws when the statement can't be executed. + Future runCustom(String statement, List args); + + /// Runs multiple [statements] without having to prepare the same statement + /// multiple times. + /// + /// See also: + /// - [QueryExecutor.runBatched]. + Future runBatched(BatchedStatements statements) async { + // default, inefficient implementation + for (final application in statements.arguments) { + final sql = statements.statements[application.statementIndex]; + + await runCustom(sql, application.arguments); + } + } +} + +/// An interface to start and manage transactions. +/// +/// Clients may not extend, implement or mix-in this class directly. +abstract class TransactionDelegate { + /// Const constructor on superclass + const TransactionDelegate(); +} + +/// A [TransactionDelegate] for database APIs which don't already support +/// creating transactions. Moor will send a `BEGIN TRANSACTION` statement at the +/// beginning, then block the database, and finally send a `COMMIT` statement +/// at the end. +class NoTransactionDelegate extends TransactionDelegate { + /// The statement that starts a transaction on this database engine. + final String start; + + /// The statement that commits a transaction on this database engine. + final String commit; + + /// The statement that will perform a rollback of a transaction on this + /// database engine. + final String rollback; + + /// Construct a transaction delegate indicating that native transactions + /// aren't supported and need to be emulated by issuing statements and + /// locking the database. + const NoTransactionDelegate({ + this.start = 'BEGIN TRANSACTION', + this.commit = 'COMMIT TRANSACTION', + this.rollback = 'ROLLBACK TRANSACTION', + }); +} + +/// A [TransactionDelegate] for database APIs which do support creating and +/// managing transactions themselves. +abstract class SupportedTransactionDelegate extends TransactionDelegate { + /// Constant constructor on superclass + const SupportedTransactionDelegate(); + + /// Start a transaction, which we assume implements [QueryDelegate], and call + /// [run] with the transaction. + /// + /// If [run] completes with an error, rollback. Otherwise, commit. + void startTransaction(Future Function(QueryDelegate) run); +} + +/// An interface that supports setting the database version. +/// +/// Clients may not extend, implement or mix-in this class directly. +abstract class DbVersionDelegate { + /// Constant constructor on superclass + const DbVersionDelegate(); +} + +/// A database that doesn't support setting schema versions. +class NoVersionDelegate extends DbVersionDelegate { + /// Delegate indicating that the underlying database does not support schema + /// versions. + const NoVersionDelegate(); +} + +/// A database that only support setting the schema version while being opened. +class OnOpenVersionDelegate extends DbVersionDelegate { + /// Function that returns with the current schema version. + final Future Function() loadSchemaVersion; + + /// See [OnOpenVersionDelegate]. + const OnOpenVersionDelegate(this.loadSchemaVersion); +} + +/// A database that supports setting the schema version at any time. +abstract class DynamicVersionDelegate extends DbVersionDelegate { + /// See [DynamicVersionDelegate] + const DynamicVersionDelegate(); + + /// Load the current schema version stored in this database. + Future get schemaVersion; + + /// Writes the schema [version] to the database. + Future setSchemaVersion(int version); +} diff --git a/drift/lib/src/runtime/executor/helpers/engines.dart b/drift/lib/src/runtime/executor/helpers/engines.dart new file mode 100644 index 00000000..7d2521a0 --- /dev/null +++ b/drift/lib/src/runtime/executor/helpers/engines.dart @@ -0,0 +1,380 @@ +import 'dart:async'; + +import 'package:drift/drift.dart'; +import 'package:pedantic/pedantic.dart'; + +import '../../../utils/synchronized.dart'; +import '../../cancellation_zone.dart'; +import '../executor.dart'; +import 'delegates.dart'; + +abstract class _BaseExecutor extends QueryExecutor { + final Lock _lock = Lock(); + + QueryDelegate get impl; + + bool get isSequential => false; + + bool get logStatements => false; + + /// Used to provide better error messages when calling operations without + /// calling [ensureOpen] before. + bool _ensureOpenCalled = false; + + /// Whether this executor has explicitly been closed. + bool _closed = false; + + bool _debugCheckIsOpen() { + if (!_ensureOpenCalled) { + throw StateError(''' +Tried to run an operation without first calling QueryExecutor.ensureOpen()! + +If you're seeing this exception from a moor database, it may indicate a bug in +moor itself. Please consider opening an issue with the stack trace and details +on how to reproduce this.'''); + } + + if (_closed) { + throw StateError(''' +This database or transaction runner has already been closed and may not be used +anymore. + +If this is happening in a transaction, you might be using the transaction +without awaiting every statement in it.'''); + } + + return true; + } + + Future _synchronized(Future Function() action) { + if (isSequential) { + return _lock.synchronized(() { + checkIfCancelled(); + return action(); + }); + } else { + // support multiple operations in parallel, so just run right away + return action(); + } + } + + void _log(String sql, List args) { + if (logStatements) { + driftRuntimeOptions.debugPrint('Moor: Sent $sql with args $args'); + } + } + + @override + Future>> runSelect( + String statement, List args) async { + final result = await _synchronized(() { + assert(_debugCheckIsOpen()); + _log(statement, args); + return impl.runSelect(statement, args); + }); + return result.asMap.toList(); + } + + @override + Future runUpdate(String statement, List args) { + return _synchronized(() { + assert(_debugCheckIsOpen()); + _log(statement, args); + return impl.runUpdate(statement, args); + }); + } + + @override + Future runDelete(String statement, List args) { + return _synchronized(() { + assert(_debugCheckIsOpen()); + _log(statement, args); + return impl.runUpdate(statement, args); + }); + } + + @override + Future runInsert(String statement, List args) { + return _synchronized(() { + assert(_debugCheckIsOpen()); + _log(statement, args); + return impl.runInsert(statement, args); + }); + } + + @override + Future runCustom(String statement, [List? args]) { + return _synchronized(() { + assert(_debugCheckIsOpen()); + final resolvedArgs = args ?? const []; + _log(statement, resolvedArgs); + return impl.runCustom(statement, resolvedArgs); + }); + } + + @override + Future runBatched(BatchedStatements statements) { + return _synchronized(() { + assert(_debugCheckIsOpen()); + if (logStatements) { + driftRuntimeOptions + .debugPrint('Moor: Executing $statements in a batch'); + } + return impl.runBatched(statements); + }); + } +} + +class _TransactionExecutor extends _BaseExecutor + implements TransactionExecutor { + final DelegatedDatabase _db; + + @override + late QueryDelegate impl; + + @override + bool get isSequential => _db.isSequential; + + @override + bool get logStatements => _db.logStatements; + + final Completer _sendCalled = Completer(); + Completer? _openingCompleter; + + String? _sendOnCommit; + String? _sendOnRollback; + + Future get completed => _sendCalled.future; + bool _sendFakeErrorOnRollback = false; + + _TransactionExecutor(this._db); + + @override + TransactionExecutor beginTransaction() { + throw Exception("Nested transactions aren't supported"); + } + + @override + Future ensureOpen(_) async { + assert( + !_closed, + 'Transaction was used after it completed. Are you missing an await ' + 'somewhere?', + ); + + _ensureOpenCalled = true; + if (_openingCompleter != null) { + return await _openingCompleter!.future; + } + + _openingCompleter = Completer(); + + final transactionManager = _db.delegate.transactionDelegate; + final transactionStarted = Completer(); + + if (transactionManager is NoTransactionDelegate) { + assert( + _db.isSequential, + 'When using the default NoTransactionDelegate, the database must be ' + 'sequential.'); + // run all the commands on the main database, which we block while the + // transaction is running. + unawaited(_db._synchronized(() async { + impl = _db.delegate; + await runCustom(transactionManager.start, const []); + _db.delegate.isInTransaction = true; + + _sendOnCommit = transactionManager.commit; + _sendOnRollback = transactionManager.rollback; + + transactionStarted.complete(); + + // release the database lock after the transaction completes + await _sendCalled.future; + })); + } else if (transactionManager is SupportedTransactionDelegate) { + transactionManager.startTransaction((transaction) async { + impl = transaction; + // specs say that the db implementation will perform a rollback when + // this future completes with an error. + _sendFakeErrorOnRollback = true; + transactionStarted.complete(); + + // this callback must be running as long as the transaction, so we do + // that until send() was called. + await _sendCalled.future; + }); + } else { + throw Exception('Invalid delegate: Has unknown transaction delegate'); + } + + await transactionStarted.future; + _openingCompleter!.complete(true); + return true; + } + + @override + Future send() async { + // don't do anything if the transaction completes before it was opened + if (_openingCompleter == null) return; + + if (_sendOnCommit != null) { + await runCustom(_sendOnCommit!, const []); + _db.delegate.isInTransaction = false; + } + + _sendCalled.complete(); + _closed = true; + } + + @override + Future rollback() async { + // don't do anything if the transaction completes before it was opened + if (_openingCompleter == null) return; + + if (_sendOnRollback != null) { + await runCustom(_sendOnRollback!, const []); + _db.delegate.isInTransaction = false; + } + + if (_sendFakeErrorOnRollback) { + _sendCalled.completeError( + Exception('artificial exception to rollback the transaction')); + } else { + _sendCalled.complete(); + } + _closed = true; + } +} + +/// A database engine (implements [QueryExecutor]) that delegates the relevant +/// work to a [DatabaseDelegate]. +class DelegatedDatabase extends _BaseExecutor { + /// The [DatabaseDelegate] to send queries to. + final DatabaseDelegate delegate; + + @override + bool logStatements; + @override + final bool isSequential; + + @override + QueryDelegate get impl => delegate; + + @override + SqlDialect get dialect => delegate.dialect; + + final Lock _openingLock = Lock(); + + /// Constructs a delegated database by providing the [delegate]. + DelegatedDatabase(this.delegate, + {bool? logStatements, this.isSequential = false}) + : logStatements = logStatements ?? false; + + @override + Future ensureOpen(QueryExecutorUser user) { + return _openingLock.synchronized(() async { + if (_closed) { + return Future.error(StateError( + "Can't re-open a database after closing it. Please create a new " + 'database connection and open that instead.')); + } + + final alreadyOpen = await delegate.isOpen; + if (alreadyOpen) { + _ensureOpenCalled = true; + return true; + } + + await delegate.open(user); + _ensureOpenCalled = true; + await _runMigrations(user); + return true; + }); + } + + Future _runMigrations(QueryExecutorUser user) async { + final versionDelegate = delegate.versionDelegate; + int? oldVersion; + final currentVersion = user.schemaVersion; + + if (versionDelegate is NoVersionDelegate) { + // this one is easy. There is no version mechanism, so we don't run any + // migrations. Assume database is on latest version. + oldVersion = user.schemaVersion; + } else if (versionDelegate is OnOpenVersionDelegate) { + // version has already been set during open + oldVersion = await versionDelegate.loadSchemaVersion(); + } else if (versionDelegate is DynamicVersionDelegate) { + oldVersion = await versionDelegate.schemaVersion; + // Note: We only update the schema version after migrations ran + } else { + throw Exception('Invalid delegate: $delegate. The versionDelegate getter ' + 'must not subclass DBVersionDelegate directly'); + } + + if (oldVersion == 0) { + // some database implementations use version 0 to indicate that the + // database was just created. We normalize that to null. + oldVersion = null; + } + + final openingDetails = OpeningDetails(oldVersion, currentVersion); + await user.beforeOpen(_BeforeOpeningExecutor(this), openingDetails); + + if (versionDelegate is DynamicVersionDelegate) { + // set version now, after migrations ran successfully + await versionDelegate.setSchemaVersion(currentVersion); + } + + delegate.notifyDatabaseOpened(openingDetails); + } + + @override + TransactionExecutor beginTransaction() { + return _TransactionExecutor(this); + } + + @override + Future close() { + return _openingLock.synchronized(() { + if (_ensureOpenCalled && !_closed) { + _closed = true; + + // Make sure the other methods throw an exception when used after + // close() + _ensureOpenCalled = false; + return delegate.close(); + } else { + // User never attempted to open the database, so this is a no-op. + return Future.value(); + } + }); + } +} + +/// Inside a `beforeOpen` callback, all moor apis must be available. At the same +/// time, the `beforeOpen` callback must complete before any query sent outside +/// of a `beforeOpen` callback can run. We do this by introducing a special +/// executor that delegates all work to the original executor, but without +/// blocking on `ensureOpen` +class _BeforeOpeningExecutor extends _BaseExecutor { + final DelegatedDatabase _base; + + _BeforeOpeningExecutor(this._base); + + @override + TransactionExecutor beginTransaction() => _base.beginTransaction(); + + @override + Future ensureOpen(_) { + _ensureOpenCalled = true; + return Future.value(true); + } + + @override + QueryDelegate get impl => _base.impl; + + @override + bool get logStatements => _base.logStatements; +} diff --git a/drift/lib/src/runtime/executor/helpers/results.dart b/drift/lib/src/runtime/executor/helpers/results.dart new file mode 100644 index 00000000..3ded3ec9 --- /dev/null +++ b/drift/lib/src/runtime/executor/helpers/results.dart @@ -0,0 +1,45 @@ +/// A result from an select statement. +class QueryResult { + /// Names of the columns returned by the select statement. + final List columnNames; + + /// The data returned by the select statement. Each list represents a row, + /// which has the data in the same order as [columnNames]. + final List> rows; + + final Map _columnIndexes; + + /// Constructs a [QueryResult] by specifying the order of column names in + /// [columnNames] and the associated data in [rows]. + QueryResult(this.columnNames, this.rows) + : _columnIndexes = { + for (var column in columnNames) + column: columnNames.lastIndexOf(column) + }; + + /// Converts the [rows] into [columnNames] and raw data [QueryResult.rows]. + /// We assume that each map in [rows] has the same keys. + factory QueryResult.fromRows(List> rows) { + if (rows.isEmpty) { + return QueryResult(const [], const []); + } + + final keys = rows.first.keys.toList(); + final mappedRows = [ + for (var row in rows) [for (var key in keys) row[key]] + ]; + + return QueryResult(keys, mappedRows); + } + + /// Returns a "list of maps" representation of this result set. Each map has + /// the same keys - the [columnNames]. The values are the actual values in + /// the row. + Iterable> get asMap { + return rows.map((row) { + return { + for (var column in columnNames) column: row[_columnIndexes[column]!], + }; + }); + } +} diff --git a/drift/lib/src/runtime/executor/stream_queries.dart b/drift/lib/src/runtime/executor/stream_queries.dart new file mode 100644 index 00000000..c9f49bd1 --- /dev/null +++ b/drift/lib/src/runtime/executor/stream_queries.dart @@ -0,0 +1,319 @@ +import 'dart:async'; +import 'dart:collection'; + +import 'package:collection/collection.dart'; +import 'package:drift/drift.dart'; +import 'package:drift/src/utils/start_with_value_transformer.dart'; +import 'package:meta/meta.dart'; +import 'package:pedantic/pedantic.dart'; + +import '../cancellation_zone.dart'; + +const _listEquality = ListEquality(); + +// This is an internal moor library that's never exported to users. +// ignore_for_file: public_member_api_docs + +/// Representation of a select statement that knows from which tables the +/// statement is reading its data and how to execute the query. +@internal +class QueryStreamFetcher { + /// Table updates that will affect this stream. + /// + /// If any of these tables changes, the stream must fetch its data again. + final TableUpdateQuery readsFrom; + + /// Key that can be used to check whether two fetchers will yield the same + /// result when operating on the same data. + final StreamKey? key; + + /// Function that asynchronously fetches the latest set of data. + final Future>> Function() fetchData; + + QueryStreamFetcher( + {required this.readsFrom, this.key, required this.fetchData}); +} + +/// Key that uniquely identifies a select statement. If two keys created from +/// two select statements are equal, the statements are equal as well. +/// +/// As two equal statements always yield the same result when operating on the +/// same data, this can make streams more efficient as we can return the same +/// stream for two equivalent queries. +@internal +class StreamKey { + final String sql; + final List variables; + + StreamKey(this.sql, this.variables); + + @override + int get hashCode { + return Object.hash(sql, _listEquality.hash(variables)); + } + + @override + bool operator ==(Object other) { + return identical(this, other) || + (other is StreamKey && + other.sql == sql && + _listEquality.equals(other.variables, variables)); + } +} + +/// Keeps track of active streams created from [SimpleSelectStatement]s and +/// updates them when needed. +@internal +class StreamQueryStore { + final Map _activeKeyStreams = {}; + final HashSet _keysPendingRemoval = HashSet(); + + bool _isShuttingDown = false; + + // we track pending timers since Flutter throws an exception when timers + // remain after a test run. + final Set _pendingTimers = {}; + + // Why is this stream synchronous? We want to dispatch table updates before + // the future from the query completes. This allows streams to invalidate + // their cached data before the user can send another query. + // There shouldn't be a problem as this stream is not exposed in any user- + // facing api. + final StreamController> _tableUpdates = + StreamController.broadcast(sync: true); + + StreamQueryStore(); + + /// Creates a new stream from the select statement. + Stream>> registerStream( + QueryStreamFetcher fetcher) { + final key = fetcher.key; + + if (key != null) { + final cached = _activeKeyStreams[key]; + if (cached != null) { + return cached.stream; + } + } + + // no cached instance found, create a new stream and register it so later + // requests with the same key can be cached. + final stream = QueryStream(fetcher, this); + // todo this adds the stream to a map, where it will only be removed when + // somebody listens to it and later calls .cancel(). Failing to do so will + // cause a memory leak. Is there any way we can work around it? Perhaps a + // weak reference with an Expando could help. + markAsOpened(stream); + + return stream.stream; + } + + Stream> updatesForSync(TableUpdateQuery query) { + return _tableUpdates.stream + .map((e) => e.where(query.matches).toSet()) + .where((e) => e.isNotEmpty); + } + + /// Handles updates on a given table by re-executing all queries that read + /// from that table. + void handleTableUpdates(Set updates) { + if (_isShuttingDown) return; + _tableUpdates.add(updates); + } + + void markAsClosed(QueryStream stream, Function() whenRemoved) { + if (_isShuttingDown) return; + + final key = stream._fetcher.key; + _keysPendingRemoval.add(key); + + // sync because it's only triggered after the timer + final completer = Completer.sync(); + _pendingTimers.add(completer); + + // Hey there! If you're sent here because your Flutter tests fail, please + // call and await Database.close() in your Flutter widget tests! + // Moor uses timers internally so that after you stopped listening to a + // stream, it can keep its cache just a bit longer. When you listen to + // streams a lot, this helps reduce duplicate statements, especially with + // Flutter's StreamBuilder. + Timer.run(() { + completer.complete(); + _pendingTimers.remove(completer); + + // if no other subscriber was found during this event iteration, remove + // the stream from the cache. + if (_keysPendingRemoval.contains(key)) { + _keysPendingRemoval.remove(key); + _activeKeyStreams.remove(key); + whenRemoved(); + } + }); + } + + void markAsOpened(QueryStream stream) { + final key = stream._fetcher.key; + + if (key != null) { + _keysPendingRemoval.remove(key); + _activeKeyStreams[key] = stream; + } + } + + Future close() async { + _isShuttingDown = true; + + for (final stream in _activeKeyStreams.values) { + // Note: StreamController.close waits until the done event has been + // received by a subscriber. If there is a paused StreamSubscription on + // a query stream, this would pause forever. In particular, this is + // causing deadlocks in tests. + // https://github.com/dart-lang/test/issues/1183#issuecomment-588357154 + unawaited(stream._controller.close()); + } + // awaiting this is fine - the stream is never exposed to users and we don't + // pause any subscriptions on it. + await _tableUpdates.close(); + + while (_pendingTimers.isNotEmpty) { + await _pendingTimers.first.future; + } + + _activeKeyStreams.clear(); + } +} + +class QueryStream { + final QueryStreamFetcher _fetcher; + final StreamQueryStore _store; + + late final StreamController>> _controller = + StreamController.broadcast( + onListen: _onListen, + onCancel: _onCancel, + ); + StreamSubscription? _tablesChangedSubscription; + + List>? _lastData; + final List _runningOperations = []; + + Stream>> get stream { + return _controller.stream.transform(StartWithValueTransformer(_cachedData)); + } + + bool get hasKey => _fetcher.key != null; + + QueryStream(this._fetcher, this._store); + + /// Called when we have a new listener, makes the stream query behave similar + /// to an `BehaviorSubject` from rxdart. + List>? _cachedData() => _lastData; + + void _onListen() { + _store.markAsOpened(this); + + // fetch new data whenever any table referenced in this stream updates. + // It could be that we have an outstanding subscription when the + // stream was closed but another listener attached quickly enough. In that + // case we don't have to re-send the query + if (_tablesChangedSubscription == null) { + // first listener added, fetch query + fetchAndEmitData(); + + _tablesChangedSubscription = + _store.updatesForSync(_fetcher.readsFrom).listen((_) { + // table has changed, invalidate cache + _lastData = null; + fetchAndEmitData(); + }); + } + } + + void _onCancel() { + _store.markAsClosed(this, () { + // last listener gone, dispose + _tablesChangedSubscription?.cancel(); + + // we don't listen for table updates anymore, and we're guaranteed to + // re-fetch data after a new listener comes in. We can't know if the table + // was updated in the meantime, but let's delete the cached data just in + // case + _lastData = null; + _tablesChangedSubscription = null; + + for (final op in _runningOperations) { + op.cancel(); + } + }); + } + + Future fetchAndEmitData() async { + final operation = runCancellable(_fetcher.fetchData); + _runningOperations.add(operation); + + try { + final data = await operation.resultOrNullIfCancelled; + if (data == null) return; + + _lastData = data; + if (!_controller.isClosed) { + _controller.add(data); + } + } catch (e, s) { + if (!_controller.isClosed) { + _controller.addError(e, s); + } + } finally { + _runningOperations.remove(operation); + } + } + + void close() { + _controller.close(); + } +} + +// Note: These classes are here because we want them to be public, but not +// exposed without an src import. + +class AnyUpdateQuery extends TableUpdateQuery { + const AnyUpdateQuery(); + + @override + bool matches(TableUpdate update) => true; +} + +class MultipleUpdateQuery extends TableUpdateQuery { + final List queries; + + const MultipleUpdateQuery(this.queries); + + @override + bool matches(TableUpdate update) => queries.any((q) => q.matches(update)); +} + +class SpecificUpdateQuery extends TableUpdateQuery { + final UpdateKind? limitUpdateKind; + final String table; + + const SpecificUpdateQuery(this.table, {this.limitUpdateKind}); + + @override + bool matches(TableUpdate update) { + if (update.table != table) return false; + + return update.kind == null || + limitUpdateKind == null || + update.kind == limitUpdateKind; + } + + @override + int get hashCode => Object.hash(limitUpdateKind, table); + + @override + bool operator ==(Object other) { + return other is SpecificUpdateQuery && + other.limitUpdateKind == limitUpdateKind && + other.table == table; + } +} diff --git a/drift/lib/src/runtime/executor/transactions.dart b/drift/lib/src/runtime/executor/transactions.dart new file mode 100644 index 00000000..5b204aa3 --- /dev/null +++ b/drift/lib/src/runtime/executor/transactions.dart @@ -0,0 +1,103 @@ +import 'package:drift/drift.dart'; +import 'package:drift/src/runtime/executor/stream_queries.dart'; +import 'package:meta/meta.dart'; + +/// Runs multiple statements transactionally. +@internal +class Transaction extends DatabaseConnectionUser { + final DatabaseConnectionUser _parent; + + @override + // ignore: invalid_use_of_visible_for_overriding_member + GeneratedDatabase get attachedDatabase => _parent.attachedDatabase; + + /// Constructs a transaction executor from the [_parent] engine and the + /// underlying [executor]. + Transaction(this._parent, TransactionExecutor executor) + : super.delegate( + _parent, + executor: executor, + streamQueries: _TransactionStreamStore(_parent.streamQueries), + ); + + /// Instructs the underlying executor to execute this instructions. Batched + /// table updates will also be send to the stream query store. + Future complete() async { + await (executor as TransactionExecutor).send(); + } + + /// Closes all streams created in this transactions and applies table updates + /// to the main stream store. + Future disposeChildStreams() async { + final streams = streamQueries as _TransactionStreamStore; + await streams._dispatchAndClose(); + } +} + +/// Stream query store that doesn't allow creating new streams and dispatches +/// updates to the outer stream query store when the transaction is completed. +class _TransactionStreamStore extends StreamQueryStore { + final StreamQueryStore parent; + + final Set affectedTables = {}; + final Set _queriesWithoutKey = {}; + + _TransactionStreamStore(this.parent); + + @override + void handleTableUpdates(Set updates) { + super.handleTableUpdates(updates); + affectedTables.addAll(updates); + } + + // Override lifecycle hooks for each stream. The regular StreamQueryStore + // keeps track of created streams if they have a key. It also takes care of + // closing the underlying stream controllers when calling close(), which we + // do. + // However, it doesn't keep track of keyless queries, as those can't be + // cached and keeping a reference would leak. A transaction is usually + // completed quickly, so we can keep a list and close that too. + + @override + void markAsOpened(QueryStream stream) { + super.markAsOpened(stream); + + if (!stream.hasKey) { + _queriesWithoutKey.add(stream); + } + } + + @override + void markAsClosed(QueryStream stream, Function() whenRemoved) { + super.markAsClosed(stream, whenRemoved); + + _queriesWithoutKey.add(stream); + } + + Future _dispatchAndClose() async { + parent.handleTableUpdates(affectedTables); + + await super.close(); + for (final query in _queriesWithoutKey) { + query.close(); + } + } +} + +/// Special query engine to run the [MigrationStrategy.beforeOpen] callback. +/// +/// To use this api, moor users should use the [MigrationStrategy.beforeOpen] +/// parameter inside the [GeneratedDatabase.migration] getter. +@internal +class BeforeOpenRunner extends DatabaseConnectionUser { + final DatabaseConnectionUser _parent; + + @override + // ignore: invalid_use_of_visible_for_overriding_member + GeneratedDatabase get attachedDatabase => _parent.attachedDatabase; + + /// Creates a [BeforeOpenRunner] from a [DatabaseConnectionUser] and the + /// special [executor] running the queries. + BeforeOpenRunner(this._parent, QueryExecutor executor) + : super.delegate(_parent, executor: executor); +} diff --git a/drift/lib/src/runtime/query_builder/components/group_by.dart b/drift/lib/src/runtime/query_builder/components/group_by.dart new file mode 100644 index 00000000..217bece7 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/components/group_by.dart @@ -0,0 +1,23 @@ +part of '../query_builder.dart'; + +/// A "group by" clause in sql. +class GroupBy extends Component { + /// The expressions to group by. + final List groupBy; + + /// Optional, a having clause to exclude some groups. + final Expression? having; + + GroupBy._(this.groupBy, this.having); + + @override + void writeInto(GenerationContext context) { + context.buffer.write('GROUP BY '); + _writeCommaSeparated(context, groupBy); + + if (having != null) { + context.buffer.write(' HAVING '); + having!.writeInto(context); + } + } +} diff --git a/drift/lib/src/runtime/query_builder/components/join.dart b/drift/lib/src/runtime/query_builder/components/join.dart new file mode 100644 index 00000000..d30df7cd --- /dev/null +++ b/drift/lib/src/runtime/query_builder/components/join.dart @@ -0,0 +1,104 @@ +part of '../query_builder.dart'; + +/// A type for a [Join] (e.g. inner, outer). +enum _JoinType { + /// Perform an inner join, see the [innerJoin] function for details. + inner, + + /// Perform a (left) outer join, see also [leftOuterJoin] + leftOuter, + + /// Perform a full cross join, see also [crossJoin]. + cross +} + +const Map<_JoinType, String> _joinKeywords = { + _JoinType.inner: 'INNER', + _JoinType.leftOuter: 'LEFT OUTER', + _JoinType.cross: 'CROSS', +}; + +/// Used internally by moor when calling [SimpleSelectStatement.join]. +/// +/// You should use [innerJoin], [leftOuterJoin] or [crossJoin] to obtain a +/// [Join] instance. +class Join extends Component { + /// The [_JoinType] of this join. + final _JoinType type; + + /// The [TableInfo] that will be added to the query + final ResultSetImplementation table; + + /// For joins that aren't [_JoinType.cross], contains an additional predicate + /// that must be matched for the join. + final Expression? on; + + /// Whether [table] should appear in the result set (defaults to true). + /// + /// It can be useful to exclude some tables. Sometimes, tables are used in a + /// join only to run aggregate functions on them. + final bool includeInResult; + + /// Constructs a [Join] by providing the relevant fields. [on] is optional for + /// [_JoinType.cross]. + Join._(this.type, this.table, this.on, {bool? includeInResult}) + : includeInResult = includeInResult ?? true; + + @override + void writeInto(GenerationContext context) { + context.buffer.write(_joinKeywords[type]); + context.buffer.write(' JOIN '); + + context.buffer.write(table.tableWithAlias); + context.watchedTables.add(table); + + if (type != _JoinType.cross) { + context.buffer.write(' ON '); + on!.writeInto(context); + } + } +} + +/// Creates a sql inner join that can be used in [SimpleSelectStatement.join]. +/// +/// {@template moor_join_include_results} +/// The optional [useColumns] parameter (defaults to true) can be used to +/// exclude the [other] table from the result set. When set to false, +/// [TypedResult.readTable] will return `null` for that table. +/// {@endtemplate} +/// +/// See also: +/// - https://moor.simonbinder.eu/docs/advanced-features/joins/#joins +/// - http://www.sqlitetutorial.net/sqlite-inner-join/ +Join innerJoin( + ResultSetImplementation other, Expression on, + {bool? useColumns}) { + return Join._(_JoinType.inner, other, on, includeInResult: useColumns); +} + +/// Creates a sql left outer join that can be used in +/// [SimpleSelectStatement.join]. +/// +/// {@macro moor_join_include_results} +/// +/// See also: +/// - https://moor.simonbinder.eu/docs/advanced-features/joins/#joins +/// - http://www.sqlitetutorial.net/sqlite-left-join/ +Join leftOuterJoin( + ResultSetImplementation other, Expression on, + {bool? useColumns}) { + return Join._(_JoinType.leftOuter, other, on, includeInResult: useColumns); +} + +/// Creates a sql cross join that can be used in +/// [SimpleSelectStatement.join]. +/// +/// {@macro moor_join_include_results} +/// +/// See also: +/// - https://moor.simonbinder.eu/docs/advanced-features/joins/#joins +/// - http://www.sqlitetutorial.net/sqlite-cross-join/ +Join crossJoin(ResultSetImplementation other, + {bool? useColumns}) { + return Join._(_JoinType.cross, other, null, includeInResult: useColumns); +} diff --git a/drift/lib/src/runtime/query_builder/components/limit.dart b/drift/lib/src/runtime/query_builder/components/limit.dart new file mode 100644 index 00000000..f287c0df --- /dev/null +++ b/drift/lib/src/runtime/query_builder/components/limit.dart @@ -0,0 +1,24 @@ +part of '../query_builder.dart'; + +/// A limit clause inside a select, update or delete statement. +class Limit extends Component { + /// The maximum amount of rows that should be returned by the query. + final int amount; + + /// When the offset is non null, the first offset rows will be skipped an not + /// included in the result. + final int? offset; + + /// Construct a limit clause from the [amount] of rows to include an a + /// nullable [offset]. + Limit(this.amount, this.offset); + + @override + void writeInto(GenerationContext context) { + if (offset != null) { + context.buffer.write('LIMIT $amount OFFSET $offset'); + } else { + context.buffer.write('LIMIT $amount'); + } + } +} diff --git a/drift/lib/src/runtime/query_builder/components/order_by.dart b/drift/lib/src/runtime/query_builder/components/order_by.dart new file mode 100644 index 00000000..6a1a8c94 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/components/order_by.dart @@ -0,0 +1,73 @@ +part of '../query_builder.dart'; + +/// Describes how to order rows +enum OrderingMode { + /// Ascending ordering mode (lowest items first) + asc, + + /// Descending ordering mode (highest items first) + desc +} + +const _modeToString = { + OrderingMode.asc: 'ASC', + OrderingMode.desc: 'DESC', +}; + +/// A single term in a [OrderBy] clause. The priority of this term is determined +/// by its position in [OrderBy.terms]. +class OrderingTerm extends Component { + /// The expression after which the ordering should happen + final Expression expression; + + /// The ordering mode (ascending or descending). + final OrderingMode mode; + + /// Creates an ordering term by the [expression] and the [mode] (defaults to + /// ascending). + OrderingTerm({required this.expression, this.mode = OrderingMode.asc}); + + /// Creates an ordering term that sorts for ascending values of [expression]. + factory OrderingTerm.asc(Expression expression) { + return OrderingTerm(expression: expression, mode: OrderingMode.asc); + } + + /// Creates an ordering term that sorts for descending values of [expression]. + factory OrderingTerm.desc(Expression expression) { + return OrderingTerm(expression: expression, mode: OrderingMode.desc); + } + + @override + void writeInto(GenerationContext context) { + expression.writeInto(context); + context.writeWhitespace(); + context.buffer.write(_modeToString[mode]); + } +} + +/// An order-by clause as part of a select statement. The clause can consist +/// of multiple [OrderingTerm]s, with the first terms being more important and +/// the later terms only being considered if the first term considers two rows +/// equal. +class OrderBy extends Component { + /// The list of ordering terms to respect. Terms appearing earlier in this + /// list are more important, the others will only considered when two rows + /// are equal by the first [OrderingTerm]. + final List terms; + + /// Constructs an order by clause by the [terms]. + const OrderBy(this.terms); + + /// Orders by nothing. + /// + /// In this case, the ordering of result rows is undefined. + const OrderBy.nothing() : this(const []); + + @override + void writeInto(GenerationContext context) { + if (terms.isEmpty) return; + + context.buffer.write('ORDER BY '); + _writeCommaSeparated(context, terms); + } +} diff --git a/drift/lib/src/runtime/query_builder/components/where.dart b/drift/lib/src/runtime/query_builder/components/where.dart new file mode 100644 index 00000000..5ff4b418 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/components/where.dart @@ -0,0 +1,26 @@ +part of '../query_builder.dart'; + +/// A where clause in a select, update or delete statement. +class Where extends Component { + /// The expression that determines whether a given row should be included in + /// the result. + final Expression predicate; + + /// Construct a [Where] clause from its [predicate]. + Where(this.predicate); + + @override + void writeInto(GenerationContext context) { + context.buffer.write('WHERE '); + predicate.writeInto(context); + } + + @override + int get hashCode => predicate.hashCode * 7; + + @override + bool operator ==(Object other) { + return identical(this, other) || + other is Where && other.predicate == predicate; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/aggregate.dart b/drift/lib/src/runtime/query_builder/expressions/aggregate.dart new file mode 100644 index 00000000..3e05f70c --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/aggregate.dart @@ -0,0 +1,179 @@ +part of '../query_builder.dart'; + +/// Returns the amount of rows in the current group matching the optional +/// [filter]. +/// +/// {@templace moor_aggregate_filter} +/// To only consider rows matching a predicate, you can set the optional +/// [filter]. Note that [filter] is only available from sqlite 3.30, released on +/// 2019-10-04. Most devices will use an older sqlite version. +/// {@endtemplate} +/// +/// This is equivalent to the `COUNT(*) FILTER (WHERE filter)` sql function. The +/// filter will be omitted if null. +Expression countAll({Expression? filter}) { + return _AggregateExpression('COUNT', const _StarFunctionParameter(), + filter: filter); +} + +/// Provides aggregate functions that are available for each expression. +extension BaseAggregate
on Expression
{ + /// Returns how often this expression is non-null in the current group. + /// + /// For `COUNT(*)`, which would count all rows, see [countAll]. + /// + /// If [distinct] is set (defaults to false), duplicate values will not be + /// counted twice. + /// {@macro moor_aggregate_filter} + Expression count({bool? distinct, Expression? filter}) { + return _AggregateExpression('COUNT', this, + filter: filter, distinct: distinct); + } + + /// Returns the concatenation of all non-null values in the current group, + /// joined by the [separator]. + /// + /// The order of the concatenated elements is arbitrary. + /// + /// See also: + /// - the sqlite documentation: https://www.sqlite.org/lang_aggfunc.html#groupconcat + /// - the conceptually similar [Iterable.join] + Expression groupConcat({String separator = ','}) { + const sqliteDefaultSeparator = ','; + if (separator == sqliteDefaultSeparator) { + return _AggregateExpression('GROUP_CONCAT', this); + } else { + return FunctionCallExpression( + 'GROUP_CONCAT', [this, Variable.withString(separator)]); + } + } +} + +/// Provides aggregate functions that are available for numeric expressions. +extension ArithmeticAggregates
on Expression { + /// Return the average of all non-null values in this group. + /// + /// {@macro moor_aggregate_filter} + Expression avg({Expression? filter}) => + _AggregateExpression('AVG', this, filter: filter); + + /// Return the maximum of all non-null values in this group. + /// + /// If there are no non-null values in the group, returns null. + /// {@macro moor_aggregate_filter} + Expression max({Expression? filter}) => + _AggregateExpression('MAX', this, filter: filter); + + /// Return the minimum of all non-null values in this group. + /// + /// If there are no non-null values in the group, returns null. + /// {@macro moor_aggregate_filter} + Expression min({Expression? filter}) => + _AggregateExpression('MIN', this, filter: filter); + + /// Calculate the sum of all non-null values in the group. + /// + /// If all values are null, evaluates to null as well. If an overflow occurs + /// during calculation, sqlite will terminate the query with an "integer + /// overflow" exception. + /// + /// See also [total], which behaves similarly but returns a floating point + /// value and doesn't throw an overflow exception. + /// {@macro moor_aggregate_filter} + Expression sum({Expression? filter}) => + _AggregateExpression('SUM', this, filter: filter); + + /// Calculate the sum of all non-null values in the group. + /// + /// If all values in the group are null, [total] returns `0.0`. This function + /// uses floating-point values internally. + /// {@macro moor_aggregate_filter} + Expression total({Expression? filter}) => + _AggregateExpression('TOTAL', this, filter: filter); +} + +/// Provides aggregate functions that are available on date time expressions. +extension DateTimeAggregate on Expression { + /// Return the average of all non-null values in this group. + /// {@macro moor_aggregate_filter} + Expression avg({Expression? filter}) => + secondsSinceEpoch.avg(filter: filter).roundToInt().dartCast(); + + /// Return the maximum of all non-null values in this group. + /// + /// If there are no non-null values in the group, returns null. + /// {@macro moor_aggregate_filter} + Expression max({Expression? filter}) => + _AggregateExpression('MAX', this, filter: filter); + + /// Return the minimum of all non-null values in this group. + /// + /// If there are no non-null values in the group, returns null. + /// {@macro moor_aggregate_filter} + Expression min({Expression? filter}) => + _AggregateExpression('MIN', this, filter: filter); +} + +class _AggregateExpression extends Expression { + final String functionName; + final bool distinct; + final FunctionParameter parameter; + + final Where? filter; + + _AggregateExpression(this.functionName, this.parameter, + {Expression? filter, bool? distinct}) + : filter = filter != null ? Where(filter) : null, + distinct = distinct ?? false; + + @override + final Precedence precedence = Precedence.primary; + + @override + void writeInto(GenerationContext context) { + context.buffer + ..write(functionName) + ..write('('); + + if (distinct) { + context.buffer.write('DISTINCT '); + } + + parameter.writeInto(context); + context.buffer.write(')'); + + if (filter != null) { + context.buffer.write(' FILTER ('); + filter!.writeInto(context); + context.buffer.write(')'); + } + } + + @override + int get hashCode { + return Object.hash(functionName, distinct, parameter, filter); + } + + @override + bool operator ==(Object other) { + if (!identical(this, other) && other.runtimeType != runtimeType) { + return false; + } + + // ignore: test_types_in_equals + final typedOther = other as _AggregateExpression; + return typedOther.functionName == functionName && + typedOther.distinct == distinct && + typedOther.parameter == parameter && + typedOther.filter == filter; + } +} + +class _StarFunctionParameter implements FunctionParameter { + const _StarFunctionParameter(); + + @override + void writeInto(GenerationContext context) { + context.buffer.write('*'); + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/algebra.dart b/drift/lib/src/runtime/query_builder/expressions/algebra.dart new file mode 100644 index 00000000..fe4198c9 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/algebra.dart @@ -0,0 +1,43 @@ +part of '../query_builder.dart'; + +/// Defines the `-`, `*` and `/` operators on sql expressions that support it. +extension ArithmeticExpr
on Expression
{ + /// Performs an addition (`this` + [other]) in sql. + Expression
operator +(Expression
other) { + return _BaseInfixOperator(this, '+', other, + precedence: Precedence.plusMinus); + } + + /// Performs a subtraction (`this` - [other]) in sql. + Expression
operator -(Expression
other) { + return _BaseInfixOperator(this, '-', other, + precedence: Precedence.plusMinus); + } + + /// Returns the negation of this value. + Expression
operator -() { + return _UnaryMinus(this); + } + + /// Performs a multiplication (`this` * [other]) in sql. + Expression
operator *(Expression
other) { + return _BaseInfixOperator(this, '*', other, + precedence: Precedence.mulDivide); + } + + /// Performs a division (`this` / [other]) in sql. + Expression
operator /(Expression
other) { + return _BaseInfixOperator(this, '/', other, + precedence: Precedence.mulDivide); + } + + /// Calculates the absolute value of this number. + Expression
abs() { + return FunctionCallExpression('abs', [this]); + } + + /// Rounds this expression to the nearest integer. + Expression roundToInt() { + return FunctionCallExpression('round', [this]).cast(); + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/bools.dart b/drift/lib/src/runtime/query_builder/expressions/bools.dart new file mode 100644 index 00000000..3eb4ad6a --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/bools.dart @@ -0,0 +1,41 @@ +part of '../query_builder.dart'; + +/// Defines operations on boolean values. +extension BooleanExpressionOperators on Expression { + /// Negates this boolean expression. The returned expression is true if + /// `this` is false, and vice versa. + Expression not() => _NotExpression(this); + + /// Returns an expression that is true iff both `this` and [other] are true. + Expression operator &(Expression other) { + return _BaseInfixOperator(this, 'AND', other, precedence: Precedence.and); + } + + /// Returns an expression that is true if `this` or [other] are true. + Expression operator |(Expression other) { + return _BaseInfixOperator(this, 'OR', other, precedence: Precedence.or); + } +} + +class _NotExpression extends Expression { + final Expression inner; + + _NotExpression(this.inner); + + @override + Precedence get precedence => Precedence.unary; + + @override + void writeInto(GenerationContext context) { + context.buffer.write('NOT '); + writeInner(context, inner); + } + + @override + int get hashCode => inner.hashCode << 1; + + @override + bool operator ==(Object other) { + return other is _NotExpression && other.inner == inner; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/case_when.dart b/drift/lib/src/runtime/query_builder/expressions/case_when.dart new file mode 100644 index 00000000..2668b798 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/case_when.dart @@ -0,0 +1,44 @@ +import 'package:meta/meta.dart'; + +import '../query_builder.dart'; + +/// A `CASE WHEN` expression in sqlite. +/// +/// This class supports when expressions with or without a base expression. +@internal +class CaseWhenExpression extends Expression { + /// The optional base expression. If it's set, the keys in [whenThen] will be + /// compared to this expression. + final Expression? base; + + /// The when entries for this expression. This expression will evaluate to the + /// value of the entry with a matching key. + final List> whenThen; + + /// The expression to use if no entry in [whenThen] matched. + final Expression? orElse; + + /// Creates a `CASE WHEN` expression from the independent components. + CaseWhenExpression(this.base, this.whenThen, this.orElse); + + @override + void writeInto(GenerationContext context) { + context.buffer.write('CASE '); + base?.writeInto(context); + + for (final entry in whenThen) { + context.buffer.write(' WHEN '); + entry.key.writeInto(context); + context.buffer.write(' THEN '); + entry.value.writeInto(context); + } + + final orElse = this.orElse; + if (orElse != null) { + context.buffer.write(' ELSE '); + orElse.writeInto(context); + } + + context.buffer.write(' END'); + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/comparable.dart b/drift/lib/src/runtime/query_builder/expressions/comparable.dart new file mode 100644 index 00000000..a102a620 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/comparable.dart @@ -0,0 +1,119 @@ +part of '../query_builder.dart'; + +/// Defines extension functions to express comparisons in sql +extension ComparableExpr
?> on Expression
{ + /// Returns an expression that is true if this expression is strictly bigger + /// than the other expression. + Expression isBiggerThan(Expression
other) { + return _Comparison(this, _ComparisonOperator.more, other); + } + + /// Returns an expression that is true if this expression is strictly bigger + /// than the other value. + Expression isBiggerThanValue(DT other) { + return isBiggerThan(Variable(other)); + } + + /// Returns an expression that is true if this expression is bigger than or + /// equal to he other expression. + Expression isBiggerOrEqual(Expression
other) { + return _Comparison(this, _ComparisonOperator.moreOrEqual, other); + } + + /// Returns an expression that is true if this expression is bigger than or + /// equal to he other value. + Expression isBiggerOrEqualValue(DT other) { + return isBiggerOrEqual(Variable(other)); + } + + /// Returns an expression that is true if this expression is strictly smaller + /// than the other expression. + Expression isSmallerThan(Expression
other) { + return _Comparison(this, _ComparisonOperator.less, other); + } + + /// Returns an expression that is true if this expression is strictly smaller + /// than the other value. + Expression isSmallerThanValue(DT other) => + isSmallerThan(Variable(other)); + + /// Returns an expression that is true if this expression is smaller than or + /// equal to he other expression. + Expression isSmallerOrEqual(Expression
other) { + return _Comparison(this, _ComparisonOperator.lessOrEqual, other); + } + + /// Returns an expression that is true if this expression is smaller than or + /// equal to he other value. + Expression isSmallerOrEqualValue(DT other) { + return isSmallerOrEqual(Variable(other)); + } + + /// Returns an expression evaluating to true if this expression is between + /// [lower] and [higher] (both inclusive). + /// + /// If [not] is set, the expression will be negated. To compare this + /// expression against two values, see + Expression isBetween(Expression
lower, Expression
higher, + {bool not = false}) { + return _BetweenExpression( + target: this, lower: lower, higher: higher, not: not); + } + + /// Returns an expression evaluating to true if this expression is between + /// [lower] and [higher] (both inclusive). + /// + /// If [not] is set, the expression will be negated. + Expression isBetweenValues(DT lower, DT higher, {bool not = false}) { + return _BetweenExpression( + target: this, + lower: Variable
(lower), + higher: Variable
(higher), + not: not, + ); + } +} + +class _BetweenExpression extends Expression { + final Expression target; + + // https://www.sqlite.org/lang_expr.html#between + @override + final Precedence precedence = Precedence.comparisonEq; + + /// Whether to negate this between expression + final bool not; + + final Expression lower; + final Expression higher; + + _BetweenExpression( + {required this.target, + required this.lower, + required this.higher, + this.not = false}); + + @override + void writeInto(GenerationContext context) { + writeInner(context, target); + + if (not) context.buffer.write(' NOT'); + context.buffer.write(' BETWEEN '); + + writeInner(context, lower); + context.buffer.write(' AND '); + writeInner(context, higher); + } + + @override + int get hashCode => Object.hash(target, lower, higher, not); + + @override + bool operator ==(Object other) { + return other is _BetweenExpression && + other.target == target && + other.not == not && + other.lower == lower && + other.higher == higher; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/custom.dart b/drift/lib/src/runtime/query_builder/expressions/custom.dart new file mode 100644 index 00000000..d9baf1c5 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/custom.dart @@ -0,0 +1,44 @@ +part of '../query_builder.dart'; + +/// A custom expression that can appear in a sql statement. +/// The [CustomExpression.content] will be written into the query without any +/// modification. +/// +/// See also: +/// - [currentDate] and [currentDateAndTime], which use a [CustomExpression] +/// internally. +class CustomExpression extends Expression { + /// The SQL of this expression + final String content; + + /// Additional tables that this expression is watching. + /// + /// When this expression is used in a stream query, the stream will update + /// when any table in [watchedTables] changes. + /// Usually, expressions don't introduce new tables to watch. This field is + /// mainly used for subqueries used as expressions. + final Iterable watchedTables; + + @override + final Precedence precedence; + + /// Constructs a custom expression by providing the raw sql [content]. + const CustomExpression(this.content, + {this.watchedTables = const [], this.precedence = Precedence.unknown}); + + @override + void writeInto(GenerationContext context) { + context.buffer.write(content); + context.watchedTables.addAll(watchedTables); + } + + @override + int get hashCode => content.hashCode * 3; + + @override + bool operator ==(Object other) { + return other.runtimeType == runtimeType && + // ignore: test_types_in_equals + (other as CustomExpression).content == content; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/datetimes.dart b/drift/lib/src/runtime/query_builder/expressions/datetimes.dart new file mode 100644 index 00000000..7b4e58be --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/datetimes.dart @@ -0,0 +1,93 @@ +part of '../query_builder.dart'; + +/// A sql expression that evaluates to the current date represented as a unix +/// timestamp. The hour, minute and second fields will be set to 0. +const Expression currentDate = + _CustomDateTimeExpression("strftime('%s', CURRENT_DATE)"); + +/// A sql expression that evaluates to the current date and time, similar to +/// [DateTime.now]. Timestamps are stored with a second accuracy. +const Expression currentDateAndTime = + _CustomDateTimeExpression("strftime('%s', CURRENT_TIMESTAMP)"); + +class _CustomDateTimeExpression extends CustomExpression { + @override + Precedence get precedence => Precedence.primary; + + const _CustomDateTimeExpression(String content) : super(content); +} + +/// Provides expressions to extract information from date time values, or to +/// calculate the difference between datetimes. +extension DateTimeExpressions on Expression { + /// Extracts the (UTC) year from `this` datetime expression. + Expression get year => _StrftimeSingleFieldExpression('%Y', this); + + /// Extracts the (UTC) month from `this` datetime expression. + Expression get month => _StrftimeSingleFieldExpression('%m', this); + + /// Extracts the (UTC) day from `this` datetime expression. + Expression get day => _StrftimeSingleFieldExpression('%d', this); + + /// Extracts the (UTC) hour from `this` datetime expression. + Expression get hour => _StrftimeSingleFieldExpression('%H', this); + + /// Extracts the (UTC) minute from `this` datetime expression. + Expression get minute => _StrftimeSingleFieldExpression('%M', this); + + /// Extracts the (UTC) second from `this` datetime expression. + Expression get second => _StrftimeSingleFieldExpression('%S', this); + + /// Formats this datetime in the format `year-month-day`. + Expression get date { + return FunctionCallExpression( + 'DATE', + [this, const Constant('unixepoch')], + ); + } + + /// Returns an expression containing the amount of seconds from the unix + /// epoch (January 1st, 1970) to `this` datetime expression. The datetime is + /// assumed to be in utc. + // for moor, date times are just unix timestamps, so we don't need to rewrite + // anything when converting + Expression get secondsSinceEpoch => dartCast(); + + /// Adds [duration] from this date. + Expression operator +(Duration duration) { + return _BaseInfixOperator(this, '+', Variable(duration.inSeconds), + precedence: Precedence.plusMinus); + } + + /// Subtracts [duration] from this date. + Expression operator -(Duration duration) { + return _BaseInfixOperator(this, '-', Variable(duration.inSeconds), + precedence: Precedence.plusMinus); + } +} + +/// Expression that extracts components out of a date time by using the builtin +/// sqlite function "strftime" and casting the result to an integer. +class _StrftimeSingleFieldExpression extends Expression { + final String format; + final Expression date; + + _StrftimeSingleFieldExpression(this.format, this.date); + + @override + void writeInto(GenerationContext context) { + context.buffer.write("CAST(strftime('$format', "); + date.writeInto(context); + context.buffer.write(", 'unixepoch') AS INTEGER)"); + } + + @override + int get hashCode => Object.hash(format, date); + + @override + bool operator ==(Object other) { + return other is _StrftimeSingleFieldExpression && + other.format == format && + other.date == date; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/exists.dart b/drift/lib/src/runtime/query_builder/expressions/exists.dart new file mode 100644 index 00000000..853ebfed --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/exists.dart @@ -0,0 +1,44 @@ +part of '../query_builder.dart'; + +/// The `EXISTS` operator checks whether the [select] subquery returns any rows. +Expression existsQuery(BaseSelectStatement select) { + return _ExistsExpression(select, false); +} + +/// The `NOT EXISTS` operator evaluates to `true` if the [select] subquery does +/// not return any rows. +Expression notExistsQuery(BaseSelectStatement select) { + return _ExistsExpression(select, true); +} + +class _ExistsExpression extends Expression { + final BaseSelectStatement _select; + final bool _not; + + @override + Precedence get precedence => Precedence.comparisonEq; + + _ExistsExpression(this._select, this._not); + + @override + void writeInto(GenerationContext context) { + if (_not) { + context.buffer.write('NOT '); + } + context.buffer.write('EXISTS '); + + context.buffer.write('('); + _select.writeInto(context); + context.buffer.write(')'); + } + + @override + int get hashCode => Object.hash(_select, _not); + + @override + bool operator ==(Object other) { + return other is _ExistsExpression && + other._select == _select && + other._not == _not; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/expression.dart b/drift/lib/src/runtime/query_builder/expressions/expression.dart new file mode 100644 index 00000000..4b5b3bac --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/expression.dart @@ -0,0 +1,482 @@ +part of '../query_builder.dart'; + +const _equality = ListEquality(); + +/// Base class for everything that can be used as a function parameter in sql. +/// +/// Most prominently, this includes [Expression]s. +/// +/// Used internally by moor. +abstract class FunctionParameter implements Component {} + +/// Any sql expression that evaluates to some generic value. This does not +/// include queries (which might evaluate to multiple values) but individual +/// columns, functions and operators. +/// +/// It's important that all subclasses properly implement [hashCode] and +/// [==]. +abstract class Expression implements FunctionParameter { + /// Constant constructor so that subclasses can be constant. + const Expression(); + + /// The precedence of this expression. This can be used to automatically put + /// parentheses around expressions as needed. + Precedence get precedence => Precedence.unknown; + + /// Whether this expression is a literal. Some use-sites need to put + /// parentheses around non-literals. + bool get isLiteral => false; + + /// Whether this expression is equal to the given expression. + Expression equalsExp(Expression compare) => + _Comparison.equal(this, compare); + + /// Whether this column is equal to the given value, which must have a fitting + /// type. The [compare] value will be written + /// as a variable using prepared statements, so there is no risk of + /// an SQL-injection. + Expression equals(D compare) => + _Comparison.equal(this, Variable(compare)); + + /// Casts this expression to an expression of [D]. + /// + /// Calling [dartCast] will not affect the generated sql. In particular, it + /// will __NOT__ generate a `CAST` expression in sql. To generate a `CAST` + /// in sql, use [cast]. + /// + /// This method is used internally by moor. + Expression dartCast() { + return _DartCastExpression(this); + } + + /// Generates a `CAST(expression AS TYPE)` expression. + /// + /// Note that this does not do a meaningful conversion for moor-only types + /// like `bool` or `DateTime`. Both would simply generate a `CAST AS INT` + /// expression. + Expression cast() => _CastInSqlExpression(this); + + /// An expression that is true if `this` resolves to any of the values in + /// [values]. + Expression isIn(Iterable values) { + return _InExpression(this, values.toList(), false); + } + + /// An expression that is true if `this` does not resolve to any of the values + /// in [values]. + Expression isNotIn(Iterable values) { + return _InExpression(this, values.toList(), true); + } + + /// An expression checking whether `this` is included in any row of the + /// provided [select] statement. + /// + /// The [select] statement may only have one column. + Expression isInQuery(BaseSelectStatement select) { + _checkSubquery(select); + return _InSelectExpression(select, this, false); + } + + /// An expression checking whether `this` is _not_ included in any row of the + /// provided [select] statement. + /// + /// The [select] statement may only have one column. + Expression isNotInQuery(BaseSelectStatement select) { + _checkSubquery(select); + return _InSelectExpression(select, this, true); + } + + /// A `CASE WHEN` construct using the current expression as a base. + /// + /// The expression on which [caseMatch] is invoked will be used as a base and + /// compared against the keys in [when]. If an equal key is found in the map, + /// the expression returned evaluates to the respective value. + /// If no matching keys are found in [when], the [orElse] expression is + /// evaluated and returned. If no [orElse] expression is provided, `NULL` will + /// be returned instead. + /// + /// For example, consider this expression mapping numerical weekdays to their + /// name: + /// + /// ```dart + /// final weekday = myTable.createdOnWeekDay; + /// weekday.caseMatch( + /// when: { + /// Constant(1): Constant('Monday'), + /// Constant(2): Constant('Tuesday'), + /// Constant(3): Constant('Wednesday'), + /// Constant(4): Constant('Thursday'), + /// Constant(5): Constant('Friday'), + /// Constant(6): Constant('Saturday'), + /// Constant(7): Constant('Sunday'), + /// }, + /// orElse: Constant('(unknown)'), + /// ); + /// ``` + Expression caseMatch({ + required Map, Expression> when, + Expression? orElse, + }) { + if (when.isEmpty) { + throw ArgumentError.value(when, 'when', 'Must not be empty'); + } + + return CaseWhenExpression(this, when.entries.toList(), orElse); + } + + /// Writes this expression into the [GenerationContext], assuming that there's + /// an outer expression with [precedence]. If the [Expression.precedence] of + /// `this` expression is lower, it will be wrap}ped in + /// + /// See also: + /// - [Component.writeInto], which doesn't take any precedence relation into + /// account. + void writeAroundPrecedence(GenerationContext context, Precedence precedence) { + if (this.precedence < precedence) { + context.buffer.write('('); + writeInto(context); + context.buffer.write(')'); + } else { + writeInto(context); + } + } + + /// If this [Expression] wraps an [inner] expression, this utility method can + /// be used inside [writeInto] to write that inner expression while wrapping + /// it in parentheses if necessary. + @protected + void writeInner(GenerationContext ctx, Expression inner) { + assert(precedence != Precedence.unknown, + "Expressions with unknown precedence shouldn't have inner expressions"); + inner.writeAroundPrecedence(ctx, precedence); + } + + /// Finds the runtime implementation of [D] in the provided [types]. + SqlType findType(SqlTypeSystem types) { + return types.forDartType(); + } +} + +/// Used to order the precedence of sql expressions so that we can avoid +/// unnecessary parens when generating sql statements. +class Precedence implements Comparable { + /// Higher means higher precedence. + final int _value; + + const Precedence._(this._value); + + @override + int compareTo(Precedence other) { + return _value.compareTo(other._value); + } + + @override + int get hashCode => _value; + + @override + bool operator ==(Object other) { + // runtimeType comparison isn't necessary, the private constructor prevents + // subclasses + return other is Precedence && other._value == _value; + } + + /// Returns true if this [Precedence] is lower than [other]. + bool operator <(Precedence other) => compareTo(other) < 0; + + /// Returns true if this [Precedence] is lower or equal to [other]. + bool operator <=(Precedence other) => compareTo(other) <= 0; + + /// Returns true if this [Precedence] is higher than [other]. + bool operator >(Precedence other) => compareTo(other) > 0; + + /// Returns true if this [Precedence] is higher or equal to [other]. + bool operator >=(Precedence other) => compareTo(other) >= 0; + + /// Precedence is unknown, assume lowest. This can be used for a + /// [CustomExpression] to always put parens around it. + static const Precedence unknown = Precedence._(-1); + + /// Precedence for the `OR` operator in sql + static const Precedence or = Precedence._(10); + + /// Precedence for the `AND` operator in sql + static const Precedence and = Precedence._(11); + + /// Precedence for most of the comparisons operators in sql, including + /// equality, is (not) checks, in, like, glob, match, regexp. + static const Precedence comparisonEq = Precedence._(12); + + /// Precedence for the <, <=, >, >= operators in sql + static const Precedence comparison = Precedence._(13); + + /// Precedence for bitwise operators in sql + static const Precedence bitwise = Precedence._(14); + + /// Precedence for the (binary) plus and minus operators in sql + static const Precedence plusMinus = Precedence._(15); + + /// Precedence for the *, / and % operators in sql + static const Precedence mulDivide = Precedence._(16); + + /// Precedence for the || operator in sql + static const Precedence stringConcatenation = Precedence._(17); + + /// Precedence for unary operators in sql + static const Precedence unary = Precedence._(20); + + /// Precedence for postfix operators (like collate) in sql + static const Precedence postfix = Precedence._(21); + + /// Highest precedence in sql, used for variables and literals. + static const Precedence primary = Precedence._(100); +} + +/// An expression that looks like "$a operator $b", where $a and $b itself +/// are expressions and the operator is any string. +abstract class _InfixOperator extends Expression { + /// The left-hand side of this expression + Expression get left; + + /// The right-hand side of this expresion + Expression get right; + + /// The sql operator to write + String get operator; + + @override + void writeInto(GenerationContext context) { + writeInner(context, left); + context.writeWhitespace(); + context.buffer.write(operator); + context.writeWhitespace(); + writeInner(context, right); + } + + @override + int get hashCode => Object.hash(left, right, operator); + + @override + bool operator ==(Object other) { + return other is _InfixOperator && + other.left == left && + other.right == right && + other.operator == operator; + } +} + +class _BaseInfixOperator extends _InfixOperator { + @override + final Expression left; + + @override + final String operator; + + @override + final Expression right; + + @override + final Precedence precedence; + + _BaseInfixOperator(this.left, this.operator, this.right, + {this.precedence = Precedence.unknown}); +} + +/// Defines the possible comparison operators that can appear in a +/// [_Comparison]. +enum _ComparisonOperator { + /// '<' in sql + less, + + /// '<=' in sql + lessOrEqual, + + /// '=' in sql + equal, + + /// '>=' in sql + moreOrEqual, + + /// '>' in sql + more +} + +/// An expression that compares two child expressions. +class _Comparison extends _InfixOperator { + static const Map<_ComparisonOperator, String> _operatorNames = { + _ComparisonOperator.less: '<', + _ComparisonOperator.lessOrEqual: '<=', + _ComparisonOperator.equal: '=', + _ComparisonOperator.moreOrEqual: '>=', + _ComparisonOperator.more: '>' + }; + + @override + final Expression left; + @override + final Expression right; + + /// The operator to use for this comparison + final _ComparisonOperator op; + + @override + String get operator => _operatorNames[op]!; + + @override + Precedence get precedence { + if (op == _ComparisonOperator.equal) { + return Precedence.comparisonEq; + } else { + return Precedence.comparison; + } + } + + /// Constructs a comparison from the [left] and [right] expressions to compare + /// and the [ComparisonOperator] [op]. + _Comparison(this.left, this.op, this.right); + + /// Like [Comparison(left, op, right)], but uses [_ComparisonOperator.equal]. + _Comparison.equal(this.left, this.right) : op = _ComparisonOperator.equal; +} + +class _UnaryMinus
extends Expression
{ + final Expression
inner; + + _UnaryMinus(this.inner); + + @override + Precedence get precedence => Precedence.unary; + + @override + void writeInto(GenerationContext context) { + context.buffer.write('-'); + inner.writeInto(context); + } + + @override + int get hashCode => inner.hashCode * 5; + + @override + bool operator ==(Object other) { + return other is _UnaryMinus && other.inner == inner; + } +} + +class _DartCastExpression extends Expression { + final Expression inner; + + _DartCastExpression(this.inner); + + @override + Precedence get precedence => inner.precedence; + + @override + bool get isLiteral => inner.isLiteral; + + @override + void writeInto(GenerationContext context) { + return inner.writeInto(context); + } + + @override + int get hashCode => inner.hashCode * 7; + + @override + bool operator ==(Object other) { + return other is _DartCastExpression && other.inner == inner; + } +} + +class _CastInSqlExpression extends Expression { + final Expression inner; + + @override + final Precedence precedence = Precedence.primary; + + _CastInSqlExpression(this.inner); + + @override + void writeInto(GenerationContext context) { + final type = context.typeSystem.forDartType(); + + context.buffer.write('CAST('); + inner.writeInto(context); + context.buffer.write(' AS ${type.sqlName})'); + } +} + +/// A sql expression that calls a function. +/// +/// This class is mainly used by moor internally. If you find yourself using +/// this class, consider [creating an issue](https://github.com/simolus3/moor/issues/new) +/// to request native support in moor. +class FunctionCallExpression extends Expression { + /// The name of the function to call + final String functionName; + + /// The arguments passed to the function, as expressions. + final List arguments; + + @override + final Precedence precedence = Precedence.primary; + + /// Constructs a function call expression in sql from the [functionName] and + /// the target [arguments]. + FunctionCallExpression(this.functionName, this.arguments); + + @override + void writeInto(GenerationContext context) { + context.buffer + ..write(functionName) + ..write('('); + _writeCommaSeparated(context, arguments); + context.buffer.write(')'); + } + + @override + int get hashCode => Object.hash(functionName, _equality); + + @override + bool operator ==(Object other) { + return other is FunctionCallExpression && + other.functionName == functionName && + _equality.equals(other.arguments, arguments); + } +} + +void _checkSubquery(BaseSelectStatement statement) { + final columns = statement._returnedColumnCount; + if (columns != 1) { + throw ArgumentError.value(statement, 'statement', + 'Must return exactly one column (actually returns $columns)'); + } +} + +/// Creates a subquery expression from the given [statement]. +/// +/// The statement, which can be created via [DatabaseConnectionUser.select] in +/// a database class, must return exactly one row with exactly one column. +Expression subqueryExpression(BaseSelectStatement statement) { + _checkSubquery(statement); + return _SubqueryExpression(statement); +} + +class _SubqueryExpression extends Expression { + final BaseSelectStatement statement; + + _SubqueryExpression(this.statement); + + @override + void writeInto(GenerationContext context) { + context.buffer.write('('); + statement.writeInto(context); + context.buffer.write(')'); + } + + @override + int get hashCode => statement.hashCode; + + @override + bool operator ==(Object? other) { + return other is _SubqueryExpression && other.statement == statement; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/in.dart b/drift/lib/src/runtime/query_builder/expressions/in.dart new file mode 100644 index 00000000..4715a64f --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/in.dart @@ -0,0 +1,83 @@ +part of '../query_builder.dart'; + +abstract class _BaseInExpression extends Expression { + final Expression _expression; + final bool _not; + + _BaseInExpression(this._expression, this._not); + + @override + Precedence get precedence => Precedence.comparisonEq; + + @override + void writeInto(GenerationContext context) { + writeInner(context, _expression); + + if (_not) { + context.buffer.write(' NOT'); + } + context.buffer.write(' IN ('); + + _writeValues(context); + context.buffer.write(')'); + } + + void _writeValues(GenerationContext context); +} + +class _InExpression extends _BaseInExpression { + final List _values; + + _InExpression(Expression expression, this._values, bool not) + : super(expression, not); + + @override + void _writeValues(GenerationContext context) { + var first = true; + for (final value in _values) { + final variable = Variable(value); + + if (first) { + first = false; + } else { + context.buffer.write(', '); + } + + variable.writeInto(context); + } + } + + @override + int get hashCode => Object.hash(_expression, _equality, _not); + + @override + bool operator ==(Object other) { + return other is _InExpression && + other._expression == _expression && + _equality.equals(other._values, _values) && + other._not == _not; + } +} + +class _InSelectExpression extends _BaseInExpression { + final BaseSelectStatement _select; + + _InSelectExpression(this._select, Expression expression, bool not) + : super(expression, not); + + @override + void _writeValues(GenerationContext context) { + _select.writeInto(context); + } + + @override + int get hashCode => Object.hash(_expression, _select, _not); + + @override + bool operator ==(Object other) { + return other is _InSelectExpression && + other._expression == _expression && + other._select == _select && + other._not == _not; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/null_check.dart b/drift/lib/src/runtime/query_builder/expressions/null_check.dart new file mode 100644 index 00000000..15bd2438 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/null_check.dart @@ -0,0 +1,61 @@ +part of '../query_builder.dart'; + +/// Expression that is true if the inner expression resolves to a null value. +@Deprecated('Use isNull through the SqlIsNull extension') +Expression isNull(Expression inner) => _NullCheck(inner, true); + +/// Expression that is true if the inner expression resolves to a non-null +/// value. +@Deprecated('Use isNotNull through the SqlIsNull extension') +Expression isNotNull(Expression inner) => _NullCheck(inner, false); + +/// Extension defines the `isNull` and `isNotNull` members to check whether the +/// expression evaluates to null or not. +extension SqlIsNull on Expression { + /// Expression that is true if the inner expression resolves to a null value. + Expression isNull() => _NullCheck(this, true); + + /// Expression that is true if the inner expression resolves to a non-null + /// value. + Expression isNotNull() => _NullCheck(this, false); +} + +/// Evaluates to the first expression in [expressions] that's not null, or +/// null if all [expressions] evaluate to null. +Expression coalesce(List> expressions) { + assert(expressions.length >= 2, + 'coalesce must have at least 2 arguments, got ${expressions.length}'); + + return FunctionCallExpression('COALESCE', expressions); +} + +class _NullCheck extends Expression { + final Expression _inner; + final bool _isNull; + + @override + final Precedence precedence = Precedence.comparisonEq; + + _NullCheck(this._inner, this._isNull); + + @override + void writeInto(GenerationContext context) { + writeInner(context, _inner); + + context.buffer.write(' IS '); + if (!_isNull) { + context.buffer.write('NOT '); + } + context.buffer.write('NULL'); + } + + @override + int get hashCode => Object.hash(_inner, _isNull); + + @override + bool operator ==(Object other) { + return other is _NullCheck && + other._inner == _inner && + other._isNull == _isNull; + } +} diff --git a/drift/lib/src/runtime/query_builder/expressions/text.dart b/drift/lib/src/runtime/query_builder/expressions/text.dart new file mode 100644 index 00000000..af717ef6 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/text.dart @@ -0,0 +1,223 @@ +part of '../query_builder.dart'; + +/// Defines methods that operate on a column storing [String] values. +extension StringExpressionOperators on Expression { + /// Whether this column matches the given pattern. For details on what patters + /// are valid and how they are interpreted, check out + /// [this tutorial](http://www.sqlitetutorial.net/sqlite-like/). + Expression like(String regex) { + return _LikeOperator(this, Variable.withString(regex)); + } + + /// Matches this string against the regular expression in [regex]. + /// + /// The [multiLine], [caseSensitive], [unicode] and [dotAll] parameters + /// correspond to the parameters on [RegExp]. + /// + /// Note that this function is only available when using `moor_ffi`. If you + /// need to support the web or `moor_flutter`, consider using [like] instead. + Expression regexp( + String regex, { + bool multiLine = false, + bool caseSensitive = true, + bool unicode = false, + bool dotAll = false, + }) { + // moor_ffi has a special regexp sql function that takes a third parameter + // to encode flags. If the least significant bit is set, multiLine is + // enabled. The next three bits enable case INSENSITIVITY (it's sensitive + // by default), unicode and dotAll. + var flags = 0; + + if (multiLine) { + flags |= 1; + } + if (!caseSensitive) { + flags |= 2; + } + if (unicode) { + flags |= 4; + } + if (dotAll) { + flags |= 8; + } + + if (flags != 0) { + return FunctionCallExpression( + 'regexp_moor_ffi', + [ + Variable.withString(regex), + this, + Variable.withInt(flags), + ], + ); + } + + // No special flags enabled, use the regular REGEXP operator + return _LikeOperator(this, Variable.withString(regex), operator: 'REGEXP'); + } + + /// Whether this expression contains [substring]. + /// + /// Note that this is case-insensitive for the English alphabet only. + /// + /// This is equivalent to calling [like] with `%%`. + Expression contains(String substring) { + return like('%$substring%'); + } + + /// Uses the given [collate] sequence when comparing this column to other + /// values. + Expression collate(Collate collate) { + return _CollateOperator(this, collate); + } + + /// Performs a string concatenation in sql by appending [other] to `this`. + Expression operator +(Expression other) { + return _BaseInfixOperator(this, '||', other, + precedence: Precedence.stringConcatenation); + } + + /// Calls the sqlite function `UPPER` on `this` string. Please note that, in + /// most sqlite installations, this only affects ascii chars. + /// + /// See also: + /// - https://www.w3resource.com/sqlite/core-functions-upper.php + Expression upper() { + return FunctionCallExpression('UPPER', [this]); + } + + /// Calls the sqlite function `LOWER` on `this` string. Please note that, in + /// most sqlite installations, this only affects ascii chars. + /// + /// See also: + /// - https://www.w3resource.com/sqlite/core-functions-lower.php + Expression lower() { + return FunctionCallExpression('LOWER', [this]); + } + + /// Calls the sqlite function `LENGTH` on `this` string, which counts the + /// number of characters in this string. Note that, in most sqlite + /// installations, [length] may not support all unicode rules. + /// + /// See also: + /// - https://www.w3resource.com/sqlite/core-functions-length.php + Expression get length { + return FunctionCallExpression('LENGTH', [this]); + } + + /// Removes spaces from both ends of this string. + Expression trim() { + return FunctionCallExpression('TRIM', [this]); + } + + /// Removes spaces from the beginning of this string. + Expression trimLeft() { + return FunctionCallExpression('LTRIM', [this]); + } + + /// Removes spaces from the end of this string. + Expression trimRight() { + return FunctionCallExpression('RTRIM', [this]); + } +} + +/// A `text LIKE pattern` expression that will be true if the first expression +/// matches the pattern given by the second expression. +class _LikeOperator extends Expression { + /// The target expression that will be tested + final Expression target; + + /// The regex-like expression to test the [target] against. + final Expression regex; + + /// The operator to use when matching. Defaults to `LIKE`. + final String operator; + + @override + final Precedence precedence = Precedence.comparisonEq; + + /// Perform a like operator with the target and the regex. + _LikeOperator(this.target, this.regex, {this.operator = 'LIKE'}); + + @override + void writeInto(GenerationContext context) { + writeInner(context, target); + context.writeWhitespace(); + context.buffer.write(operator); + context.writeWhitespace(); + writeInner(context, regex); + } + + @override + int get hashCode => Object.hash(target, regex, operator); + + @override + bool operator ==(Object other) { + return other is _LikeOperator && + other.target == target && + other.regex == regex && + other.operator == operator; + } +} + +/// Builtin collating functions from sqlite. +/// +/// See also: +/// - https://www.sqlite.org/datatype3.html#collation +enum Collate { + /// Instruct sqlite to compare string data using memcmp(), regardless of text + /// encoding. + binary, + + /// The same as [Collate.binary], except the 26 upper case characters of ASCII + /// are folded to their lower case equivalents before the comparison is + /// performed. Note that only ASCII characters are case folded. SQLite does + /// not attempt to do full UTF case folding due to the size of the tables + /// required. + noCase, + + /// The same as [Collate.binary], except that trailing space characters are + /// ignored. + rTrim, +} + +/// A `text COLLATE collate` expression in sqlite. +class _CollateOperator extends Expression { + /// The expression on which the collate function will be run + final Expression inner; + + /// The [Collate] to use. + final Collate collate; + + @override + final Precedence precedence = Precedence.postfix; + + /// Constructs a collate expression on the [inner] expression and the + /// [Collate]. + _CollateOperator(this.inner, this.collate); + + @override + void writeInto(GenerationContext context) { + writeInner(context, inner); + context.buffer + ..write(' COLLATE ') + ..write(_operatorNames[collate]); + } + + @override + int get hashCode => Object.hash(inner, collate); + + @override + bool operator ==(Object other) { + return other is _CollateOperator && + other.inner == inner && + other.collate == collate; + } + + static const Map _operatorNames = { + Collate.binary: 'BINARY', + Collate.noCase: 'NOCASE', + Collate.rTrim: 'RTRIM', + }; +} diff --git a/drift/lib/src/runtime/query_builder/expressions/variables.dart b/drift/lib/src/runtime/query_builder/expressions/variables.dart new file mode 100644 index 00000000..5cad3eb4 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/expressions/variables.dart @@ -0,0 +1,113 @@ +part of '../query_builder.dart'; + +// ignoring the lint because we can't have parameterized factories +// ignore_for_file: prefer_constructors_over_static_methods + +/// An expression that represents the value of a dart object encoded to sql +/// using prepared statements. +class Variable extends Expression { + /// The Dart value that will be sent to the database + final T value; + + // note that we keep the identity hash/equals here because each variable would + // get its own index in sqlite and is thus different. + + @override + Precedence get precedence => Precedence.primary; + + @override + int get hashCode => value.hashCode; + + /// Constructs a new variable from the [value]. + const Variable(this.value); + + /// Creates a variable that holds the specified boolean. + static Variable withBool(bool value) { + return Variable(value); + } + + /// Creates a variable that holds the specified int. + static Variable withInt(int value) { + return Variable(value); + } + + /// Creates a variable that holds the specified string. + static Variable withString(String value) { + return Variable(value); + } + + /// Creates a variable that holds the specified date. + static Variable withDateTime(DateTime value) { + return Variable(value); + } + + /// Creates a variable that holds the specified data blob. + static Variable withBlob(Uint8List value) { + return Variable(value); + } + + /// Creates a variable that holds the specified floating point value. + static Variable withReal(double value) { + return Variable(value); + } + + /// Maps [value] to something that should be understood by the underlying + /// database engine. For instance, a [DateTime] will me mapped to its unix + /// timestamp. + dynamic mapToSimpleValue(GenerationContext context) { + return context.typeSystem.mapToVariable(value); + } + + @override + void writeInto(GenerationContext context) { + if (value != null) { + context.buffer.write('?'); + context.introduceVariable(this, mapToSimpleValue(context)); + } else { + context.buffer.write('NULL'); + } + } + + @override + String toString() => 'Variable($value)'; + + @override + bool operator ==(Object other) { + return other is Variable && other.value == value; + } +} + +/// An expression that represents the value of a dart object encoded to sql +/// by writing them into the sql statements. For most cases, consider using +/// [Variable] instead. +class Constant extends Expression { + /// Constructs a new constant (sql literal) holding the [value]. + const Constant(this.value); + + @override + Precedence get precedence => Precedence.primary; + + /// The value that will be converted to an sql literal. + final T value; + + @override + bool get isLiteral => true; + + @override + void writeInto(GenerationContext context) { + context.buffer.write(SqlTypeSystem.mapToSqlConstant(value)); + } + + @override + int get hashCode => value.hashCode; + + @override + bool operator ==(Object other) { + return other.runtimeType == runtimeType && + // ignore: test_types_in_equals + (other as Constant).value == value; + } + + @override + String toString() => 'Constant($value)'; +} diff --git a/drift/lib/src/runtime/query_builder/generation_context.dart b/drift/lib/src/runtime/query_builder/generation_context.dart new file mode 100644 index 00000000..8d60e046 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/generation_context.dart @@ -0,0 +1,68 @@ +part of 'query_builder.dart'; + +/// Contains information about a query while it's being constructed. +class GenerationContext { + /// Whether the query obtained by this context operates on multiple tables. + /// + /// If it does, columns should prefix their table name to avoid ambiguous + /// queries. + bool hasMultipleTables = false; + + /// All tables that the generated query reads from. + final List watchedTables = []; + + /// The [SqlTypeSystem] to use when mapping variables to values that the + /// underlying database understands. + final SqlTypeSystem typeSystem; + + /// The [SqlDialect] that should be respected when generating the query. + final SqlDialect dialect; + + /// The actual [DatabaseConnectionUser] that's going to execute the generated + /// query. + final DatabaseConnectionUser? executor; + + final List _boundVariables = []; + + /// The values of [introducedVariables] that will be sent to the underlying + /// engine. + List get boundVariables => _boundVariables; + + /// All variables ("?" in sql) that were added to this context. + final List introducedVariables = []; + + /// Returns the amount of variables that have been introduced when writing + /// this query. + int get amountOfVariables => boundVariables.length; + + /// The string buffer contains the sql query as it's being constructed. + final StringBuffer buffer = StringBuffer(); + + /// Gets the generated sql statement + String get sql => buffer.toString(); + + /// Constructs a [GenerationContext] by copying the relevant fields from the + /// database. + GenerationContext.fromDb(this.executor) + : typeSystem = executor?.typeSystem ?? SqlTypeSystem.defaultInstance, + // ignore: invalid_null_aware_operator, (doesn't seem to actually work) + dialect = executor?.executor?.dialect ?? SqlDialect.sqlite; + + /// Constructs a custom [GenerationContext] by setting the fields manually. + /// See [GenerationContext.fromDb] for a more convenient factory. + GenerationContext(this.typeSystem, this.executor, + {this.dialect = SqlDialect.sqlite}); + + /// Introduces a variable that will be sent to the database engine. Whenever + /// this method is called, a question mark should be added to the [buffer] so + /// that the prepared statement can be executed with the variable. The value + /// must be a type that is supported by the sqflite library. A list of + /// supported types can be found [here](https://github.com/tekartik/sqflite#supported-sqlite-types). + void introduceVariable(Variable v, dynamic value) { + introducedVariables.add(v); + _boundVariables.add(value); + } + + /// Shortcut to add a single space to the buffer because it's used very often. + void writeWhitespace() => buffer.write(' '); +} diff --git a/drift/lib/src/runtime/query_builder/migration.dart b/drift/lib/src/runtime/query_builder/migration.dart new file mode 100644 index 00000000..180296a2 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/migration.dart @@ -0,0 +1,512 @@ +part of 'query_builder.dart'; + +/// Signature of a function that will be invoked when a database is created. +typedef OnCreate = Future Function(Migrator m); + +/// Signature of a function that will be invoked when a database is upgraded +/// or downgraded. +/// In version upgrades: from < to +/// In version downgrades: from > to +typedef OnUpgrade = Future Function(Migrator m, int from, int to); + +/// Signature of a function that's called before a database is marked opened by +/// moor, but after migrations took place. This is a suitable callback to to +/// populate initial data or issue `PRAGMA` statements that you want to use. +typedef OnBeforeOpen = Future Function(OpeningDetails details); + +Future _defaultOnCreate(Migrator m) => m.createAll(); +Future _defaultOnUpdate(Migrator m, int from, int to) async => + throw Exception("You've bumped the schema version for your moor database " + "but didn't provide a strategy for schema updates. Please do that by " + 'adapting the migrations getter in your database class.'); + +/// Handles database migrations by delegating work to [OnCreate] and [OnUpgrade] +/// methods. +class MigrationStrategy { + /// Executes when the database is opened for the first time. + final OnCreate onCreate; + + /// Executes when the database has been opened previously, but the last access + /// happened at a different [GeneratedDatabase.schemaVersion]. + /// Schema version upgrades and downgrades will both be run here. + final OnUpgrade onUpgrade; + + /// Executes after the database is ready to be used (ie. it has been opened + /// and all migrations ran), but before any other queries will be sent. This + /// makes it a suitable place to populate data after the database has been + /// created or set sqlite `PRAGMAS` that you need. + final OnBeforeOpen? beforeOpen; + + /// Construct a migration strategy from the provided [onCreate] and + /// [onUpgrade] methods. + MigrationStrategy({ + this.onCreate = _defaultOnCreate, + this.onUpgrade = _defaultOnUpdate, + this.beforeOpen, + }); +} + +/// Runs migrations declared by a [MigrationStrategy]. +class Migrator { + final GeneratedDatabase _db; + + /// Used internally by moor when opening the database. + Migrator(this._db); + + /// Creates all tables specified for the database, if they don't exist + @Deprecated('Use createAll() instead') + Future createAllTables() async { + for (final table in _db.allTables) { + await createTable(table); + } + } + + /// Creates all tables, triggers, views, indexes and everything else defined + /// in the database, if they don't exist. + Future createAll() async { + for (final entity in _db.allSchemaEntities) { + if (entity is TableInfo) { + await createTable(entity); + } else if (entity is Trigger) { + await createTrigger(entity); + } else if (entity is Index) { + await createIndex(entity); + } else if (entity is OnCreateQuery) { + await _issueCustomQuery(entity.sql, const []); + } else if (entity is View) { + await createView(entity); + } else { + throw AssertionError('Unknown entity: $entity'); + } + } + } + + GenerationContext _createContext() { + return GenerationContext.fromDb(_db); + } + + /// Creates the given table if it doesn't exist + Future createTable(TableInfo table) async { + final context = _createContext(); + + if (table is VirtualTableInfo) { + _writeCreateVirtual(table, context); + } else { + _writeCreateTable(table, context); + } + + return _issueCustomQuery(context.sql, context.boundVariables); + } + + /// Experimental utility method to alter columns of an existing table. + /// + /// Since sqlite does not provide a way to alter the type or constraint of an + /// individual column, one needs to write a fairly complex migration procedure + /// for this. + /// [alterTable] will run the [12 step procedure][other alter] recommended by + /// sqlite. + /// + /// The [migration] to run describes the transformation to apply to the table. + /// The individual fields of the [TableMigration] class contain more + /// information on the transformations supported at the moment. Moor's + /// [documentation][moor docs] also contains more details and examples for + /// common migrations that can be run with [alterTable]. + /// + /// When deleting columns from a table, make sure to migrate tables that have + /// a foreign key constraint on those columns first. + /// + /// While this function will re-create affected indexes and triggers, it does + /// not reliably handle views at the moment. + /// + /// [other alter]: https://www.sqlite.org/lang_altertable.html#otheralter + /// [moor docs]: https://moor.simonbinder.eu/docs/advanced-features/migrations/#complex-migrations + @experimental + Future alterTable(TableMigration migration) async { + final foreignKeysEnabled = + (await _db.customSelect('PRAGMA foreign_keys').getSingle()) + .readBool('foreign_keys'); + + if (foreignKeysEnabled) { + await _db.customStatement('PRAGMA foreign_keys = OFF;'); + } + + final table = migration.affectedTable; + final tableName = table.actualTableName; + + await _db.transaction(() async { + // We will drop the original table later, which will also delete + // associated triggers, indices and and views. We query sqlite_schema to + // re-create those later. + // We use the legacy sqlite_master table since the _schema rename happened + // in a very recent version (3.33.0) + final schemaQuery = await _db.customSelect( + 'SELECT type, name, sql FROM sqlite_master WHERE tbl_name = ?;', + variables: [Variable(tableName)], + ).get(); + + final createAffected = []; + + for (final row in schemaQuery) { + final type = row.readString('type'); + final sql = row.read('sql'); + final name = row.readString('name'); + + if (sql == null) { + // These indexes are created by sqlite to enforce different kinds of + // special constraints. + // They do not have any SQL create statement as they are created + // automatically by the constraints on the table. + // They can not be re-created and need to be skipped. + assert(name.startsWith('sqlite_autoindex')); + continue; + } + + switch (type) { + case 'trigger': + case 'view': + case 'index': + createAffected.add(sql); + break; + } + } + + // Step 4: Create the new table in the desired format + final temporaryName = 'tmp_for_copy_$tableName'; + final temporaryTable = table.createAlias(temporaryName); + await createTable(temporaryTable); + + // Step 5: Transfer old content into the new table + final context = _createContext(); + final expressionsForSelect = []; + + context.buffer.write('INSERT INTO $temporaryName ('); + var first = true; + for (final column in table.$columns) { + final transformer = migration.columnTransformer[column]; + + if (transformer != null || !migration.newColumns.contains(column)) { + // New columns without a transformer have a default value, so we don't + // include them in the column list of the insert. + // Otherwise, we prefer to use the column transformer if set. If there + // isn't a transformer, just copy the column from the old table, + // without any transformation. + final expression = migration.columnTransformer[column] ?? column; + expressionsForSelect.add(expression); + + if (!first) context.buffer.write(', '); + context.buffer.write(column.escapedName); + first = false; + } + } + + context.buffer.write(') SELECT '); + first = true; + for (final expr in expressionsForSelect) { + if (!first) context.buffer.write(', '); + expr.writeInto(context); + first = false; + } + context.buffer.write(' FROM ${escapeIfNeeded(tableName)};'); + await _issueCustomQuery(context.sql, context.introducedVariables); + + // Step 6: Drop the old table + await _issueCustomQuery('DROP TABLE ${escapeIfNeeded(tableName)}'); + + // Step 7: Rename the new table to the old name + await _issueCustomQuery('ALTER TABLE ${escapeIfNeeded(temporaryName)} ' + 'RENAME TO ${escapeIfNeeded(tableName)}'); + + // Step 8: Re-create associated indexes, triggers and views + for (final stmt in createAffected) { + await _issueCustomQuery(stmt); + } + + // We don't currently check step 9 and 10, step 11 happens implicitly. + }); + + // Finally, re-enable foreign keys if they were enabled originally. + if (foreignKeysEnabled) { + await _db.customStatement('PRAGMA foreign_keys = ON;'); + } + } + + void _writeCreateTable(TableInfo table, GenerationContext context) { + context.buffer.write('CREATE TABLE IF NOT EXISTS ' + '${escapeIfNeeded(table.$tableName)} ('); + + var hasAutoIncrement = false; + for (var i = 0; i < table.$columns.length; i++) { + final column = table.$columns[i]; + if (column.hasAutoIncrement) { + hasAutoIncrement = true; + } + + column.writeColumnDefinition(context); + + if (i < table.$columns.length - 1) context.buffer.write(', '); + } + + final dslTable = table.asDslTable; + + // we're in a bit of a hacky situation where we don't write the primary + // as table constraint if it has already been written on a primary key + // column, even though that column appears in table.$primaryKey because we + // need to know all primary keys for the update(table).replace(row) API + final hasPrimaryKey = table.$primaryKey.isNotEmpty; + final dontWritePk = dslTable.dontWriteConstraints || hasAutoIncrement; + if (hasPrimaryKey && !dontWritePk) { + context.buffer.write(', PRIMARY KEY ('); + final pkList = table.$primaryKey.toList(growable: false); + for (var i = 0; i < pkList.length; i++) { + final column = pkList[i]; + + context.buffer.write(escapeIfNeeded(column.$name)); + + if (i != pkList.length - 1) context.buffer.write(', '); + } + context.buffer.write(')'); + } + + final constraints = dslTable.customConstraints; + + for (var i = 0; i < constraints.length; i++) { + context.buffer + ..write(', ') + ..write(constraints[i]); + } + + context.buffer.write(')'); + + // == true because of nullability + if (dslTable.withoutRowId == true) { + context.buffer.write(' WITHOUT ROWID'); + } + + context.buffer.write(';'); + } + + void _writeCreateVirtual(VirtualTableInfo table, GenerationContext context) { + context.buffer + ..write('CREATE VIRTUAL TABLE IF NOT EXISTS ') + ..write(escapeIfNeeded(table.$tableName)) + ..write(' USING ') + ..write(table.moduleAndArgs) + ..write(';'); + } + + /// Executes the `CREATE TRIGGER` statement that created the [trigger]. + Future createTrigger(Trigger trigger) { + return _issueCustomQuery(trigger.createTriggerStmt, const []); + } + + /// Executes a `CREATE INDEX` statement to create the [index]. + Future createIndex(Index index) { + return _issueCustomQuery(index.createIndexStmt, const []); + } + + /// Executes a `CREATE VIEW` statement to create the [view]. + Future createView(View view) { + return _issueCustomQuery(view.createViewStmt, const []); + } + + /// Drops a table, trigger or index. + Future drop(DatabaseSchemaEntity entity) async { + final escapedName = escapeIfNeeded(entity.entityName); + + String kind; + + if (entity is TableInfo) { + kind = 'TABLE'; + } else if (entity is Trigger) { + kind = 'TRIGGER'; + } else if (entity is Index) { + kind = 'INDEX'; + } else { + // Entity that can't be dropped. + return; + } + + await _issueCustomQuery('DROP $kind IF EXISTS $escapedName;'); + } + + /// Deletes the table with the given name. Note that this function does not + /// escape the [name] parameter. + Future deleteTable(String name) async { + return _issueCustomQuery('DROP TABLE IF EXISTS $name;'); + } + + /// Adds the given column to the specified table. + Future addColumn(TableInfo table, GeneratedColumn column) async { + final context = _createContext(); + + context.buffer + .write('ALTER TABLE ${escapeIfNeeded(table.$tableName)} ADD COLUMN '); + column.writeColumnDefinition(context); + context.buffer.write(';'); + + return _issueCustomQuery(context.sql); + } + + /// Changes the name of a column in a [table]. + /// + /// After renaming a column in a Dart table or a moor file and re-running the + /// generator, you can use [renameColumn] in a migration step to rename the + /// column for existing databases. + /// + /// The [table] argument must be set to the table enclosing the changed + /// column. The [oldName] must be set to the old name of the [column] in SQL. + /// For Dart tables, note that moor will transform `camelCase` column names in + /// Dart to `snake_case` column names in SQL. + /// + /// __Important compatibility information__: [renameColumn] uses an + /// `ALTER TABLE RENAME COLUMN` internally. Support for that syntax was added + /// in sqlite version 3.25.0, released on 2018-09-15. When you're using + /// Flutter and depend on `sqlite3_flutter_libs`, you're guaranteed to have + /// that version. Otherwise, please ensure that you only use [renameColumn] if + /// you know you'll run on sqlite 3.20.0 or later. + Future renameColumn( + TableInfo table, String oldName, GeneratedColumn column) async { + final context = _createContext(); + context.buffer + ..write('ALTER TABLE ${escapeIfNeeded(table.$tableName)} ') + ..write('RENAME COLUMN ${escapeIfNeeded(oldName)} ') + ..write('TO ${column.escapedName};'); + + return _issueCustomQuery(context.sql); + } + + /// Changes the [table] name from [oldName] to the current + /// [TableInfo.actualTableName]. + /// + /// After renaming a table in moor or Dart and re-running the generator, you + /// can use [renameTable] in a migration step to rename the table in existing + /// databases. + Future renameTable(TableInfo table, String oldName) async { + final context = _createContext(); + context.buffer.write('ALTER TABLE ${escapeIfNeeded(oldName)} ' + 'RENAME TO ${escapeIfNeeded(table.actualTableName)};'); + return _issueCustomQuery(context.sql); + } + + /// Executes the custom query. + @Deprecated('Use customStatement in the database class') + Future issueCustomQuery(String sql, [List? args]) { + return _issueCustomQuery(sql, args); + } + + Future _issueCustomQuery(String sql, [List? args]) { + return _db.customStatement(sql, args); + } +} + +/// Provides information about whether migrations ran before opening the +/// database. +class OpeningDetails { + /// The schema version before the database has been opened, or `null` if the + /// database has just been created. + final int? versionBefore; + + /// The schema version after running migrations. + final int versionNow; + + /// Whether the database has been created during this session. + bool get wasCreated => versionBefore == null; + + /// Whether a schema upgrade was performed while opening the database. + bool get hadUpgrade => !wasCreated && versionBefore != versionNow; + + /// Used internally by moor when opening a database. + const OpeningDetails(this.versionBefore, this.versionNow) + // Should use null instead of 0 for consistency + : assert(versionBefore != 0); +} + +/// Extension providing the [destructiveFallback] strategy. +extension DestructiveMigrationExtension on GeneratedDatabase { + /// Provides a destructive [MigrationStrategy] that will delete and then + /// re-create all tables, triggers and indices. + /// + /// To use this behavior, override the `migration` getter in your database: + /// + /// ```dart + /// @UseMoor(...) + /// class MyDatabase extends _$MyDatabase { + /// @override + /// MigrationStrategy get migration => destructiveFallback; + /// } + /// ``` + MigrationStrategy get destructiveFallback { + return MigrationStrategy( + onCreate: _defaultOnCreate, + onUpgrade: (m, from, to) async { + // allSchemaEntities are sorted topologically references between them. + // Reverse order for deletion in order to not break anything. + final reversedEntities = m._db.allSchemaEntities.toList().reversed; + + for (final entity in reversedEntities) { + await m.drop(entity); + } + + // Re-create them now + await m.createAll(); + }, + ); + } +} + +/// Contains instructions needed to run a complex migration on a table, using +/// the steps described in [Making other kinds of table schema changes][https://www.sqlite.org/lang_altertable.html#otheralter]. +/// +/// For examples and more details, see [the documentation](https://moor.simonbinder.eu/docs/advanced-features/migrations/#complex-migrations). +@experimental +class TableMigration { + /// The table to migrate. It is assumed that this table already exists at the + /// time the migration is running. If you need to create a new table, use + /// [Migrator.createTable] instead of the more complex [TableMigration]. + final TableInfo affectedTable; + + /// A list of new columns that are known to _not_ exist in the database yet. + /// + /// If these columns aren't set through the [columnTransformer], they must + /// have a default value. + final List newColumns; + + /// A map describing how to transform columns of the [affectedTable]. + /// + /// A key in the map refers to the new column in the table. If you're running + /// a [TableMigration] to add new columns, those columns doesn't have to exist + /// in the database yet. + /// The value associated with a column is the expression to use when + /// transforming the new table. + final Map columnTransformer; + + /// Creates migration description on the [affectedTable]. + TableMigration( + this.affectedTable, { + this.columnTransformer = const {}, + this.newColumns = const [], + }) { + // All new columns must either have a transformation or a default value of + // some kind + final problematicNewColumns = []; + for (final column in newColumns) { + // isRequired returns false if the column has a client default value that + // would be used for inserts. We can't apply the client default here + // though, so it doesn't count as a default value. + final isRequired = + column.requiredDuringInsert || column.clientDefault != null; + if (isRequired && !columnTransformer.containsKey(column)) { + problematicNewColumns.add(column.$name); + } + } + + if (problematicNewColumns.isNotEmpty) { + throw ArgumentError( + "Some of the newColumns don't have a default value and aren't included " + 'in columnTransformer: ${problematicNewColumns.join(', ')}. \n' + 'To add columns, make sure that they have a default value or write an ' + 'expression to use in the columnTransformer map.', + ); + } + } +} diff --git a/drift/lib/src/runtime/query_builder/query_builder.dart b/drift/lib/src/runtime/query_builder/query_builder.dart new file mode 100644 index 00000000..6bb1a9d4 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/query_builder.dart @@ -0,0 +1,84 @@ +// Mega compilation unit that includes all Dart apis related to generating SQL +// at runtime. + +import 'package:collection/collection.dart'; +import 'package:drift/drift.dart'; +import 'package:drift/sqlite_keywords.dart'; +import 'package:drift/src/runtime/executor/stream_queries.dart'; +import 'package:drift/src/utils/single_transformer.dart'; +import 'package:meta/meta.dart'; + +// New files should not be part of this mega library, which we're trying to +// split up. +import 'expressions/case_when.dart'; + +part 'components/group_by.dart'; +part 'components/join.dart'; +part 'components/limit.dart'; +part 'components/order_by.dart'; +part 'components/where.dart'; +part 'expressions/aggregate.dart'; +part 'expressions/algebra.dart'; +part 'expressions/bools.dart'; +part 'expressions/comparable.dart'; +part 'expressions/custom.dart'; +part 'expressions/datetimes.dart'; +part 'expressions/exists.dart'; +part 'expressions/expression.dart'; +part 'expressions/in.dart'; +part 'expressions/null_check.dart'; +part 'expressions/text.dart'; +part 'expressions/variables.dart'; + +part 'schema/column_impl.dart'; +part 'schema/entities.dart'; +part 'schema/table_info.dart'; + +part 'statements/select/custom_select.dart'; +part 'statements/select/select.dart'; +part 'statements/select/select_with_join.dart'; +part 'statements/delete.dart'; +part 'statements/insert.dart'; +part 'statements/query.dart'; +part 'statements/update.dart'; + +part 'generation_context.dart'; +part 'migration.dart'; + +/// A component is anything that can appear in a sql query. +abstract class Component { + /// Default, constant constructor. + const Component(); + + /// Writes this component into the [context] by writing to its + /// [GenerationContext.buffer] or by introducing bound variables. When writing + /// into the buffer, no whitespace around the this component should be + /// introduced. When a component consists of multiple composed component, it's + /// responsible for introducing whitespace between its child components. + void writeInto(GenerationContext context); +} + +/// Writes all [components] into the [context], separated by commas. +void _writeCommaSeparated( + GenerationContext context, Iterable components) { + var first = true; + for (final element in components) { + if (!first) { + context.buffer.write(', '); + } + element.writeInto(context); + first = false; + } +} + +/// An enumeration of database systems supported by moor. Only +/// [SqlDialect.sqlite] is officially supported, all others are in an +/// experimental state at the moment. +enum SqlDialect { + /// Use sqlite's sql dialect. This is the default option and the only + /// officially supported dialect at the moment. + sqlite, + + /// (currently unsupported) + mysql +} diff --git a/drift/lib/src/runtime/query_builder/schema/column_impl.dart b/drift/lib/src/runtime/query_builder/schema/column_impl.dart new file mode 100644 index 00000000..41433059 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/schema/column_impl.dart @@ -0,0 +1,249 @@ +part of '../query_builder.dart'; + +const VerificationResult _invalidNull = VerificationResult.failure( + "This column is not nullable and doesn't have a default value. " + "Null fields thus can't be inserted."); + +/// Implementation for a [Column] declared on a table. +class GeneratedColumn extends Column { + /// The sql name of this column. + final String $name; // todo: Remove, replace with `name` + + /// The name of the table that contains this column + final String tableName; + + /// Whether null values are allowed for this column. + final bool $nullable; + + /// Default constraints generated by moor. + final String? _defaultConstraints; + + /// Custom constraints that have been specified for this column. + /// + /// Some constraints, like `NOT NULL` or checks for booleans, are generated by + /// moor by default. + /// Constraints can also be overridden with [BuildColumn.customConstraint], + /// in which case the moor constraints will not be applied. + final String? $customConstraints; + + /// The default expression to be used during inserts when no value has been + /// specified. Can be null if no default value is set. + final Expression? defaultValue; + + /// A function that yields a default column for inserts if no value has been + /// set. This is different to [defaultValue] since the function is written in + /// Dart, not SQL. It's a compile-time error to declare columns where both + /// [defaultValue] and [clientDefault] are non-null. + /// + /// See also: [BuildColumn.clientDefault]. + final T Function()? clientDefault; + + /// Additional checks performed on values before inserts or updates. + final VerificationResult Function(T, VerificationMeta)? additionalChecks; + + /// The sql type name, such as TEXT for texts. + final String typeName; + + /// Whether a value is required for this column when inserting a new row. + final bool requiredDuringInsert; + + /// Whether this column has an `AUTOINCREMENT` primary key constraint that was + /// created by moor. + bool get hasAutoIncrement => + _defaultConstraints?.contains('AUTOINCREMENT') == true; + + @override + String get name => $name; + + /// Used by generated code. + GeneratedColumn( + this.$name, + this.tableName, + this.$nullable, { + this.clientDefault, + required this.typeName, + String? defaultConstraints, + this.$customConstraints, + this.defaultValue, + this.additionalChecks, + this.requiredDuringInsert = false, + }) : _defaultConstraints = defaultConstraints; + + /// Applies a type converter to this column. + /// + /// This is mainly used by the generator. + GeneratedColumnWithTypeConverter withConverter( + TypeConverter converter) { + return GeneratedColumnWithTypeConverter._( + converter, + $name, + tableName, + $nullable, + clientDefault, + typeName, + _defaultConstraints, + $customConstraints, + defaultValue, + additionalChecks, + requiredDuringInsert, + ); + } + + /// Writes the definition of this column, as defined + /// [here](https://www.sqlite.org/syntax/column-def.html), into the given + /// buffer. + void writeColumnDefinition(GenerationContext into) { + into.buffer.write('$escapedName $typeName'); + + if ($customConstraints == null) { + into.buffer.write($nullable ? ' NULL' : ' NOT NULL'); + + final defaultValue = this.defaultValue; + if (defaultValue != null) { + into.buffer.write(' DEFAULT '); + + // we need to write brackets if the default value is not a literal. + // see https://www.sqlite.org/syntax/column-constraint.html + final writeBrackets = !defaultValue.isLiteral; + + if (writeBrackets) into.buffer.write('('); + defaultValue.writeInto(into); + if (writeBrackets) into.buffer.write(')'); + } + + // these custom constraints refer to builtin constraints from moor + if (_defaultConstraints != null) { + into.buffer + ..write(' ') + ..write(_defaultConstraints); + } + } else if ($customConstraints?.isNotEmpty == true) { + into.buffer + ..write(' ') + ..write($customConstraints); + } + } + + @override + void writeInto(GenerationContext context, {bool ignoreEscape = false}) { + if (context.hasMultipleTables) { + context.buffer + ..write(tableName) + ..write('.'); + } + context.buffer.write(ignoreEscape ? $name : escapedName); + } + + /// Checks whether the given value fits into this column. The default + /// implementation only checks for nullability, but subclasses might enforce + /// additional checks. For instance, a text column might verify that a text + /// has a certain length. + /// + /// Note: The behavior of this method was changed in moor 1.5. Before, null + /// values were interpreted as an absent value during updates or if the + /// [defaultValue] is set. Verification was skipped for absent values. + /// This is no longer the case, all null values are assumed to be an sql + /// `NULL`. + VerificationResult isAcceptableValue(T value, VerificationMeta meta) { + final nullOk = $nullable; + if (!nullOk && value == null) { + return _invalidNull; + } else { + return additionalChecks?.call(value, meta) ?? + const VerificationResult.success(); + } + } + + /// A more general version of [isAcceptableValue] that supports any sql + /// expression. + /// + /// The default implementation will not perform any check if [value] is not + /// a [Variable]. + VerificationResult isAcceptableOrUnknown( + Expression value, VerificationMeta meta) { + if (value is Variable) { + return isAcceptableValue(value.value as T, meta); + } else { + return const VerificationResult.success(); + } + } + + @override + int get hashCode => Object.hash(tableName, $name); + + @override + bool operator ==(Object other) { + if (other.runtimeType != runtimeType) return false; + + // ignore: test_types_in_equals + final typedOther = other as GeneratedColumn; + return typedOther.tableName == tableName && typedOther.$name == $name; + } + + Variable _evaluateClientDefault() { + return Variable(clientDefault!()); + } + + /// A value for [additionalChecks] validating allowed text lengths. + /// + /// Used by generated code. + static VerificationResult Function(String?, VerificationMeta) checkTextLength( + {int? minTextLength, int? maxTextLength}) { + return (value, meta) { + if (value == null) return const VerificationResult.success(); + + final length = value.length; + if (minTextLength != null && minTextLength > length) { + return VerificationResult.failure( + 'Must at least be $minTextLength characters long.'); + } + if (maxTextLength != null && maxTextLength < length) { + return VerificationResult.failure( + 'Must at most be $maxTextLength characters long.'); + } + + return const VerificationResult.success(); + }; + } +} + +/// A [GeneratedColumn] with a type converter attached to it. +/// +/// This provides the [equalsValue] method, which can be used to compare this +/// column against a value mapped through a type converter. +class GeneratedColumnWithTypeConverter extends GeneratedColumn { + /// The type converted used on this column. + final TypeConverter converter; + + GeneratedColumnWithTypeConverter._( + this.converter, + String name, + String tableName, + bool nullable, + S Function()? clientDefault, + String typeName, + String? defaultConstraints, + String? customConstraints, + Expression? defaultValue, + VerificationResult Function(S, VerificationMeta)? additionalChecks, + bool requiredDuringInsert, + ) : super( + name, + tableName, + nullable, + clientDefault: clientDefault, + typeName: typeName, + defaultConstraints: defaultConstraints, + $customConstraints: customConstraints, + defaultValue: defaultValue, + additionalChecks: additionalChecks, + requiredDuringInsert: requiredDuringInsert, + ); + + /// Compares this column against the mapped [dartValue]. + /// + /// The value will be mapped using the [converter] applied to this column. + Expression equalsValue(D? dartValue) { + return equals(converter.mapToSql(dartValue) as S); + } +} diff --git a/drift/lib/src/runtime/query_builder/schema/entities.dart b/drift/lib/src/runtime/query_builder/schema/entities.dart new file mode 100644 index 00000000..9d84b0dc --- /dev/null +++ b/drift/lib/src/runtime/query_builder/schema/entities.dart @@ -0,0 +1,163 @@ +part of '../query_builder.dart'; + +/// Some abstract schema entity that can be stored in a database. This includes +/// tables, triggers, views, indexes, etc. +abstract class DatabaseSchemaEntity { + /// The (unalised) name of this entity in the database. + String get entityName; +} + +/// A sqlite trigger that's executed before, after or instead of a subset of +/// writes on a specific tables. +/// In moor, triggers can only be declared in `.moor` files. +/// +/// For more information on triggers, see the [CREATE TRIGGER][sqlite-docs] +/// documentation from sqlite, or the [entry on sqlitetutorial.net][sql-tut]. +/// +/// [sqlite-docs]: https://sqlite.org/lang_createtrigger.html +/// [sql-tut]: https://www.sqlitetutorial.net/sqlite-trigger/ +class Trigger extends DatabaseSchemaEntity { + /// The `CREATE TRIGGER` sql statement that can be used to create this + /// trigger. + final String createTriggerStmt; + @override + final String entityName; + + /// Creates a trigger representation by the [createTriggerStmt] and its + /// [entityName]. Mainly used by generated code. + Trigger(this.createTriggerStmt, this.entityName); +} + +/// A sqlite index on columns or expressions. +/// +/// For more information on triggers, see the [CREATE TRIGGER][sqlite-docs] +/// documentation from sqlite, or the [entry on sqlitetutorial.net][sql-tut]. +/// +/// [sqlite-docs]: https://www.sqlite.org/lang_createindex.html +/// [sql-tut]: https://www.sqlitetutorial.net/sqlite-index/ +class Index extends DatabaseSchemaEntity { + @override + final String entityName; + + /// The `CREATE INDEX` sql statement that can be used to create this index. + final String createIndexStmt; + + /// Creates an index model by the [createIndexStmt] and its [entityName]. + /// Mainly used by generated code. + Index(this.entityName, this.createIndexStmt); +} + +/// A sqlite view. +/// +/// In moor, views can only be declared in `.moor` files. +/// +/// For more information on views, see the [CREATE VIEW][sqlite-docs] +/// documentation from sqlite, or the [entry on sqlitetutorial.net][sql-tut]. +/// +/// [sqlite-docs]: https://www.sqlite.org/lang_createview.html +/// [sql-tut]: https://www.sqlitetutorial.net/sqlite-create-view/ +abstract class View extends ResultSetImplementation + implements HasResultSet { + @override + final String entityName; + + /// The `CREATE VIEW` sql statement that can be used to create this view. + final String createViewStmt; + + /// Creates an view model by the [createViewStmt] and its [entityName]. + /// Mainly used by generated code. + View(this.entityName, this.createViewStmt); +} + +/// An internal schema entity to run an sql statement when the database is +/// created. +/// +/// The generator uses this entity to implement `@create` statements in moor +/// files: +/// ```sql +/// CREATE TABLE users (name TEXT); +/// +/// @create: INSERT INTO users VALUES ('Bob'); +/// ``` +/// A [OnCreateQuery] is emitted for each `@create` statement in an included +/// moor file. +class OnCreateQuery extends DatabaseSchemaEntity { + /// The sql statement that should be run in the default `onCreate` clause. + final String sql; + + /// Create a query that will be run in the default `onCreate` migration. + OnCreateQuery(this.sql); + + @override + String get entityName => r'$internal$'; +} + +/// Interface for schema entities that have a result set. +/// +/// [Tbl] is the generated Dart class which implements [ResultSetImplementation] +/// and the user-defined [Table] class. [Row] is the class used to hold a result +/// row. +abstract class ResultSetImplementation extends DatabaseSchemaEntity { + /// The (potentially aliased) name of this table or view. + /// + /// If no alias is active, this is the same as [entityName]. + String get aliasedName => entityName; + + /// Type system sugar. Implementations are likely to inherit from both + /// [TableInfo] and [Tbl] and can thus just return their instance. + Tbl get asDslTable; + + /// All columns from this table or view. + List get $columns; + + /// Maps the given row returned by the database into the fitting data class. + Row map(Map data, {String? tablePrefix}); + + /// Creates an alias of this table or view that will write the name [alias] + /// when used in a query. + ResultSetImplementation createAlias(String alias) => + _AliasResultSet(alias, this); +} + +class _AliasResultSet extends ResultSetImplementation { + final String _alias; + final ResultSetImplementation _inner; + + _AliasResultSet(this._alias, this._inner); + + @override + List get $columns => _inner.$columns; + + @override + String get aliasedName => _alias; + + @override + ResultSetImplementation createAlias(String alias) { + return _AliasResultSet(alias, _inner); + } + + @override + String get entityName => _inner.entityName; + + @override + Row map(Map data, {String? tablePrefix}) { + return _inner.map(data, tablePrefix: tablePrefix); + } + + @override + Tbl get asDslTable => _inner.asDslTable; +} + +/// Extension to generate an alias for a table or a view. +extension NameWithAlias on ResultSetImplementation { + /// The table name, optionally suffixed with the alias if one exists. This + /// can be used in select statements, as it returns something like "users u" + /// for a table called users that has been aliased as "u". + String get tableWithAlias { + if (aliasedName == entityName) { + return entityName; + } else { + return '$entityName $aliasedName'; + } + } +} diff --git a/drift/lib/src/runtime/query_builder/schema/table_info.dart b/drift/lib/src/runtime/query_builder/schema/table_info.dart new file mode 100644 index 00000000..44252d13 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/schema/table_info.dart @@ -0,0 +1,172 @@ +part of '../query_builder.dart'; + +/// Base class for generated table classes. +/// +/// Moor generates a subclass of [TableInfo] for each table used in a database. +/// This classes contains information about the table's schema (e.g. its +/// [primaryKey] or [$columns]). +/// +/// [TableDsl] is the original table class written by the user. For tables +/// defined in moor files, this is the table implementation class itself. +/// [D] is the type of the data class generated from the table. +/// +/// To obtain an instance of this class, use a table getter from the database. +mixin TableInfo on Table + implements DatabaseSchemaEntity, ResultSetImplementation { + @override + TableDsl get asDslTable => this as TableDsl; + + /// The primary key of this table. Can be empty if no custom primary key has + /// been specified. + /// + /// Additional to the [Table.primaryKey] columns declared by an user, this + /// also contains auto-increment integers, which are primary key by default. + Set get $primaryKey => const {}; + + // ensure the primaryKey getter is consistent with $primaryKey, which can + // contain additional columns. + @override + Set get primaryKey => $primaryKey; + + /// The table name in the sql table. This can be an alias for the actual table + /// name. See [actualTableName] for a table name that is not aliased. + @Deprecated('Use aliasedName instead') + String get $tableName => aliasedName; + + @override + String get aliasedName => entityName; + + /// The name of the table in the database. Unless [$tableName], this can not + /// be aliased. + String get actualTableName; + + @override + String get entityName => actualTableName; + + Map? _columnsByName; + + /// Gets all [$columns] in this table, indexed by their (non-escaped) name. + Map get columnsByName { + return _columnsByName ??= { + for (final column in $columns) column.$name: column + }; + } + + /// Validates that the given entity can be inserted into this table, meaning + /// that it respects all constraints (nullability, text length, etc.). + VerificationContext validateIntegrity(Insertable instance, + {bool isInserting = false}) { + // default behavior when users chose to not verify the integrity (build time + // option) + return const VerificationContext.notEnabled(); + } + + /// Converts a [companion] to the real model class, [D]. + /// + /// Values that are [Value.absent] in the companion will be set to `null`. + D mapFromCompanion(Insertable companion) { + final asColumnMap = companion.toColumns(false); + + if (asColumnMap.values.any((e) => e is! Variable)) { + throw ArgumentError('The companion $companion cannot be transformed ' + 'into a dataclass as it contains expressions that need to be ' + 'evaluated by a database engine.'); + } + + final context = GenerationContext(SqlTypeSystem.defaultInstance, null); + final rawValues = asColumnMap + .cast() + .map((key, value) => MapEntry(key, value.mapToSimpleValue(context))); + + return map(rawValues); + } + + @override + TableInfo createAlias(String alias); + + @override + bool operator ==(Object other) { + // tables are singleton instances except for aliases + if (other is TableInfo) { + return other.runtimeType == runtimeType && other.$tableName == $tableName; + } + return false; + } + + @override + int get hashCode => Object.hash(aliasedName, actualTableName); +} + +/// Additional interface for tables in a moor file that have been created with +/// an `CREATE VIRTUAL TABLE STATEMENT`. +mixin VirtualTableInfo on TableInfo { + /// Returns the module name and the arguments that were used in the statement + /// that created this table. In that sense, `CREATE VIRTUAL TABLE + /// USING ;` can be used to create this table in sql. + String get moduleAndArgs; +} + +/// Static extension members for generated table classes. +/// +/// Most of these are accessed internally by moor or by generated code. +extension TableInfoUtils on ResultSetImplementation { + /// Like [map], but from a [row] instead of the low-level map. + D mapFromRow(QueryRow row, {String? tablePrefix}) { + return map(row.data, tablePrefix: tablePrefix); + } + + /// Like [mapFromRow], but returns null if a non-nullable column of this table + /// is null in [row]. + D? mapFromRowOrNull(QueryRow row, {String? tablePrefix}) { + final resolvedPrefix = tablePrefix == null ? '' : '$tablePrefix.'; + + final notInRow = $columns + .where((c) => !c.$nullable) + .any((e) => row.data['$resolvedPrefix${e.$name}'] == null); + + if (notInRow) return null; + + return mapFromRow(row, tablePrefix: tablePrefix); + } + + /// Like [mapFromRow], but maps columns from the result through [alias]. + /// + /// This is used internally by moor to support mapping to a table from a + /// select statement with different column names. For instance, for: + /// + /// ```sql + /// CREATE TABLE tbl (foo, bar); + /// + /// query: SELECT foo AS c1, bar AS c2 FROM tbl; + /// ``` + /// + /// Moor would generate code to call this method with `'c1': 'foo'` and + /// `'c2': 'bar'` in [alias]. + D mapFromRowWithAlias(QueryRow row, Map alias) { + return map({ + for (final entry in row.data.entries) alias[entry.key]!: entry.value, + }); + } +} + +/// Extension to use the `rowid` of a table in Dart queries. + +extension RowIdExtension on TableInfo { + /// In sqlite, each table that isn't virtual and hasn't been created with the + /// `WITHOUT ROWID` modified has a [row id](https://www.sqlite.org/rowidtable.html). + /// When the table has a single primary key column which is an integer, that + /// column is an _alias_ to the row id in sqlite3. + /// + /// If the row id has not explicitly been declared as a column aliasing it, + /// the [rowId] will not be part of a moor-generated data class. In this + /// case, the [rowId] getter can be used to refer to a table's row id in a + /// query. + Expression get rowId { + if (withoutRowId || this is VirtualTableInfo) { + throw ArgumentError('Cannot use rowId on a table without a rowid!'); + } + + return GeneratedColumn('_rowid_', aliasedName, false, + typeName: 'INTEGER'); + } +} diff --git a/drift/lib/src/runtime/query_builder/statements/delete.dart b/drift/lib/src/runtime/query_builder/statements/delete.dart new file mode 100644 index 00000000..563d3570 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/statements/delete.dart @@ -0,0 +1,50 @@ +part of '../query_builder.dart'; + +/// A `DELETE` statement in sql +class DeleteStatement extends Query + with SingleTableQueryMixin { + /// This constructor should be called by [DatabaseConnectionUser.delete] for + /// you. + DeleteStatement(DatabaseConnectionUser database, TableInfo table) + : super(database, table); + + @override + void writeStartPart(GenerationContext ctx) { + ctx.buffer.write('DELETE FROM ${table.tableWithAlias}'); + } + + /// Deletes just this entity. May not be used together with [where]. + /// + /// Returns the amount of rows that were deleted by this statement directly + /// (not including additional rows that might be affected through triggers or + /// foreign key constraints). + Future delete(Insertable entity) { + assert( + whereExpr == null, + 'When deleting an entity, you may not use where(...)' + 'as well. The where clause will be determined automatically'); + + whereSamePrimaryKey(entity); + return go(); + } + + /// Deletes all rows matched by the set [where] clause and the optional + /// limit. + /// + /// Returns the amount of rows that were deleted by this statement directly + /// (not including additional rows that might be affected through triggers or + /// foreign key constraints). + Future go() async { + final ctx = constructQuery(); + + return ctx.executor!.doWhenOpened((e) async { + final rows = await e.runDelete(ctx.sql, ctx.boundVariables); + + if (rows > 0) { + database.notifyUpdates( + {TableUpdate.onTable(_sourceTable, kind: UpdateKind.delete)}); + } + return rows; + }); + } +} diff --git a/drift/lib/src/runtime/query_builder/statements/insert.dart b/drift/lib/src/runtime/query_builder/statements/insert.dart new file mode 100644 index 00000000..24b68d40 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/statements/insert.dart @@ -0,0 +1,379 @@ +part of '../query_builder.dart'; + +/// Represents an insert statement +class InsertStatement { + /// The database to use then executing this statement + @protected + final DatabaseConnectionUser database; + + /// The table we're inserting into + @protected + final TableInfo table; + + /// Constructs an insert statement from the database and the table. Used + /// internally by moor. + InsertStatement(this.database, this.table); + + /// Inserts a row constructed from the fields in [entity]. + /// + /// All fields in the entity that don't have a default value or auto-increment + /// must be set and non-null. Otherwise, an [InvalidDataException] will be + /// thrown. + /// + /// By default, an exception will be thrown if another row with the same + /// primary key already exists. This behavior can be overridden with [mode], + /// for instance by using [InsertMode.replace] or [InsertMode.insertOrIgnore]. + /// + /// To apply a partial or custom update in case of a conflict, you can also + /// use an [upsert clause](https://sqlite.org/lang_UPSERT.html) by using + /// [onConflict]. + /// For instance, you could increase a counter whenever a conflict occurs: + /// + /// ```dart + /// class Words extends Table { + /// TextColumn get word => text()(); + /// IntColumn get occurrences => integer()(); + /// } + /// + /// Future addWord(String word) async { + /// await into(words).insert( + /// WordsCompanion.insert(word: word, occurrences: 1), + /// onConflict: DoUpdate((old) => WordsCompanion.custom( + /// occurrences: old.occurrences + Constant(1), + /// )), + /// ); + /// } + /// ``` + /// + /// When calling `addWord` with a word not yet saved, the regular insert will + /// write it with one occurrence. If it already exists however, the insert + /// behaves like an update incrementing occurrences by one. + /// Be aware that upsert clauses and [onConflict] are not available on older + /// sqlite versions. + /// + /// Returns the `rowid` of the inserted row. For tables with an auto-increment + /// column, the `rowid` is the generated value of that column. The returned + /// value can be inaccurate when [onConflict] is set and the insert behaved + /// like an update. + /// + /// If the table doesn't have a `rowid`, you can't rely on the return value. + /// Still, the future will always complete with an error if the insert fails. + Future insert( + Insertable entity, { + InsertMode? mode, + UpsertClause? onConflict, + }) async { + final ctx = createContext(entity, mode ?? InsertMode.insert, + onConflict: onConflict); + + return await database.doWhenOpened((e) async { + final id = await e.runInsert(ctx.sql, ctx.boundVariables); + database + .notifyUpdates({TableUpdate.onTable(table, kind: UpdateKind.insert)}); + return id; + }); + } + + /// Inserts a row into the table, and returns a generated instance. + /// + /// __Note__: This uses the `RETURNING` syntax added in sqlite3 version 3.35, + /// which is not available on most operating systems by default. When using + /// this method, make sure that you have a recent sqlite3 version available. + /// This is the case with `sqlite3_flutter_libs`. + Future insertReturning(Insertable entity, + {InsertMode? mode, UpsertClause? onConflict}) async { + final ctx = createContext(entity, mode ?? InsertMode.insert, + onConflict: onConflict, returning: true); + + return database.doWhenOpened((e) async { + final result = await e.runSelect(ctx.sql, ctx.boundVariables); + database + .notifyUpdates({TableUpdate.onTable(table, kind: UpdateKind.insert)}); + return table.map(result.single); + }); + } + + /// Attempts to [insert] [entity] into the database. If the insert would + /// violate a primary key or uniqueness constraint, updates the columns that + /// are present on [entity]. + /// + /// Note that this is subtly different from [InsertMode.replace]! When using + /// [InsertMode.replace], the old row will be deleted and replaced with the + /// new row. With [insertOnConflictUpdate], columns from the old row that are + /// not present on [entity] are unchanged, and no row will be deleted. + /// + /// Be aware that [insertOnConflictUpdate] uses an upsert clause, which is not + /// available on older sqlite implementations. + Future insertOnConflictUpdate(Insertable entity) { + return insert(entity, onConflict: DoUpdate((_) => entity)); + } + + /// Creates a [GenerationContext] which contains the sql necessary to run an + /// insert statement fro the [entry] with the [mode]. + /// + /// This method is used internally by moor. Consider using [insert] instead. + GenerationContext createContext(Insertable entry, InsertMode mode, + {UpsertClause? onConflict, bool returning = false}) { + _validateIntegrity(entry); + + final rawValues = entry.toColumns(true); + + // apply default values for columns that have one + final map = {}; + for (final column in table.$columns) { + final columnName = column.$name; + + if (rawValues.containsKey(columnName)) { + map[columnName] = rawValues[columnName]!; + } else { + if (column.clientDefault != null) { + map[columnName] = column._evaluateClientDefault(); + } + } + + // column not set, and doesn't have a client default. So just don't + // include this column + } + + final ctx = GenerationContext.fromDb(database); + ctx.buffer + ..write(_insertKeywords[mode]) + ..write(' INTO ') + ..write(table.$tableName) + ..write(' '); + + if (map.isEmpty) { + ctx.buffer.write('DEFAULT VALUES'); + } else { + writeInsertable(ctx, map); + } + + void writeDoUpdate(DoUpdate onConflict) { + if (onConflict._usesExcludedTable) { + ctx.hasMultipleTables = true; + } + final upsertInsertable = onConflict._createInsertable(table); + + if (!identical(entry, upsertInsertable)) { + // We run a ON CONFLICT DO UPDATE, so make sure upsertInsertable is + // valid for updates. + // the identical check is a performance optimization - for the most + // common call (insertOnConflictUpdate) we don't have to check twice. + table + .validateIntegrity(upsertInsertable, isInserting: false) + .throwIfInvalid(upsertInsertable); + } + + final updateSet = upsertInsertable.toColumns(true); + + ctx.buffer.write(' ON CONFLICT('); + + final conflictTarget = onConflict.target ?? table.$primaryKey.toList(); + + if (conflictTarget.isEmpty) { + throw ArgumentError( + 'Table has no primary key, so a conflict target is needed.'); + } + + var first = true; + for (final target in conflictTarget) { + if (!first) ctx.buffer.write(', '); + + // Writing the escaped name directly because it should not have a table + // name in front of it. + ctx.buffer.write(target.escapedName); + first = false; + } + + ctx.buffer.write(') DO UPDATE SET '); + + first = true; + for (final update in updateSet.entries) { + final column = escapeIfNeeded(update.key); + + if (!first) ctx.buffer.write(', '); + ctx.buffer.write('$column = '); + update.value.writeInto(ctx); + + first = false; + } + + if (onConflict._where != null) { + ctx.writeWhitespace(); + final where = onConflict._where!( + table.asDslTable, table.createAlias('excluded').asDslTable); + where.writeInto(ctx); + } + } + + if (onConflict is DoUpdate) { + writeDoUpdate(onConflict); + } else if (onConflict is UpsertMultiple) { + onConflict.clauses.forEach(writeDoUpdate); + } + + if (returning) { + ctx.buffer.write(' RETURNING *'); + } + + return ctx; + } + + void _validateIntegrity(Insertable? d) { + if (d == null) { + throw InvalidDataException( + 'Cannot write null row into ${table.$tableName}'); + } + + table.validateIntegrity(d, isInserting: true).throwIfInvalid(d); + } + + /// Writes column names and values from the [map]. + @internal + void writeInsertable(GenerationContext ctx, Map map) { + final columns = map.keys.map(escapeIfNeeded); + + ctx.buffer + ..write('(') + ..write(columns.join(', ')) + ..write(') ') + ..write('VALUES ('); + + var first = true; + for (final variable in map.values) { + if (!first) { + ctx.buffer.write(', '); + } + first = false; + + variable.writeInto(ctx); + } + + ctx.buffer.write(')'); + } +} + +/// Enumeration of different insert behaviors. See the documentation on the +/// individual fields for details. +enum InsertMode { + /// A regular `INSERT INTO` statement. When a row with the same primary or + /// unique key already exists, the insert statement will fail and an exception + /// will be thrown. If the exception is caught, previous statements made in + /// the same transaction will NOT be reverted. + insert, + + /// Identical to [InsertMode.insertOrReplace], included for the sake of + /// completeness. + replace, + + /// Like [insert], but if a row with the same primary or unique key already + /// exists, it will be deleted and re-created with the row being inserted. + insertOrReplace, + + /// Similar to [InsertMode.insertOrAbort], but it will revert the surrounding + /// transaction if a constraint is violated, even if the thrown exception is + /// caught. + insertOrRollback, + + /// Identical to [insert], included for the sake of completeness. + insertOrAbort, + + /// Like [insert], but if multiple values are inserted with the same insert + /// statement and one of them fails, the others will still be completed. + insertOrFail, + + /// Like [insert], but failures will be ignored. + insertOrIgnore, +} + +const _insertKeywords = { + InsertMode.insert: 'INSERT', + InsertMode.replace: 'REPLACE', + InsertMode.insertOrReplace: 'INSERT OR REPLACE', + InsertMode.insertOrRollback: 'INSERT OR ROLLBACK', + InsertMode.insertOrAbort: 'INSERT OR ABORT', + InsertMode.insertOrFail: 'INSERT OR FAIL', + InsertMode.insertOrIgnore: 'INSERT OR IGNORE', +}; + +/// A upsert clause controls how to behave when a uniqueness constraint is +/// violated during an insert. +/// +/// Typically, one would use [DoUpdate] to run an update instead in this case. +abstract class UpsertClause {} + +/// A [DoUpdate] upsert clause can be used to insert or update a custom +/// companion when the underlying companion already exists. +/// +/// For an example, see [InsertStatement.insert]. +class DoUpdate extends UpsertClause { + final Insertable Function(T old, T excluded) _creator; + final Where Function(T old, T excluded)? _where; + + final bool _usesExcludedTable; + + /// An optional list of columns to serve as an "conflict target", which + /// specifies the uniqueness constraint that will trigger the upsert. + /// + /// By default, the primary key of the table will be used. + final List? target; + + /// Creates a `DO UPDATE` clause. + /// + /// The [update] function will be used to construct an [Insertable] used to + /// update an old row that prevented an insert. + /// If you need to refer to both the old row and the row that would have + /// been inserted, use [DoUpdate.withExcluded]. + /// + /// The optional [where] clause can be used to disable the update based on + /// the old value. If a [where] clause is set and it evaluates to false, a + /// conflict will keep the old row without applying the update. + /// + /// For an example, see [InsertStatement.insert]. + DoUpdate(Insertable Function(T old) update, + {this.target, Expression Function(T old)? where}) + : _creator = ((old, _) => update(old)), + _where = where == null ? null : ((old, _) => Where(where(old))), + _usesExcludedTable = false; + + /// Creates a `DO UPDATE` clause. + /// + /// The [update] function will be used to construct an [Insertable] used to + /// update an old row that prevented an insert. + /// It can refer to the values from the old row in the first parameter and + /// to columns in the row that couldn't be inserted with the `excluded` + /// parameter. + /// + /// The optional [where] clause can be used to disable the update based on + /// the old value. If a [where] clause is set and it evaluates to false, a + /// conflict will keep the old row without applying the update. + /// + /// For an example, see [InsertStatement.insert]. + DoUpdate.withExcluded(Insertable Function(T old, T excluded) update, + {this.target, Expression Function(T old, T excluded)? where}) + : _creator = update, + _usesExcludedTable = true, + _where = where == null + ? null + : ((old, excluded) => Where(where(old, excluded))); + + Insertable _createInsertable(TableInfo table) { + return _creator(table.asDslTable, table.createAlias('excluded').asDslTable); + } +} + +/// Upsert clause that consists of multiple [clauses]. +/// +/// The first [DoUpdate.target] matched by this upsert will be run. +class UpsertMultiple extends UpsertClause { + /// All [DoUpdate] clauses that are part of this upsert. + /// + /// The first clause with a matching [DoUpdate.target] will be considered. + final List> clauses; + + /// Creates an upsert consisting of multiple [DoUpdate] clauses. + /// + /// This requires a fairly recent sqlite3 version (3.35.0, released on 2021- + /// 03-12). + UpsertMultiple(this.clauses); +} diff --git a/drift/lib/src/runtime/query_builder/statements/query.dart b/drift/lib/src/runtime/query_builder/statements/query.dart new file mode 100644 index 00000000..55f79a58 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/statements/query.dart @@ -0,0 +1,375 @@ +part of '../query_builder.dart'; + +/// Statement that operates with data that already exists (select, delete, +/// update). +abstract class Query extends Component { + /// The database this statement should be sent to. + @protected + DatabaseConnectionUser database; + + /// The (main) table or view that this query operates on. + ResultSetImplementation table; + + /// Used internally by moor. Users should use the appropriate methods on + /// [DatabaseConnectionUser] instead. + Query(this.database, this.table); + + /// The `WHERE` clause for this statement + @protected + Where? whereExpr; + + /// The `ORDER BY` clause for this statement + @protected + OrderBy? orderByExpr; + + /// The `LIMIT` clause for this statement. + @protected + Limit? limitExpr; + + GroupBy? _groupBy; + + /// Subclasses must override this and write the part of the statement that + /// comes before the where and limit expression.. + @visibleForOverriding + void writeStartPart(GenerationContext ctx); + + @override + void writeInto(GenerationContext context) { + // whether we need to insert a space before writing the next component + var needsWhitespace = false; + + void writeWithSpace(Component? component) { + if (component == null) return; + + if (needsWhitespace) context.writeWhitespace(); + component.writeInto(context); + needsWhitespace = true; + } + + writeStartPart(context); + needsWhitespace = true; + + writeWithSpace(whereExpr); + writeWithSpace(_groupBy); + writeWithSpace(orderByExpr); + writeWithSpace(limitExpr); + } + + /// Constructs the query that can then be sent to the database executor. + /// + /// This is used internally by moor to run the query. Users should use the + /// other methods explained in the [documentation][moor-docs]. + /// [moor-docs]: https://moor.simonbinder.eu/docs/getting-started/writing_queries/ + GenerationContext constructQuery() { + final ctx = GenerationContext.fromDb(database); + writeInto(ctx); + ctx.buffer.write(';'); + return ctx; + } +} + +/// [Selectable] methods for returning multiple results. +/// +/// Useful for refining the return type of a query, while still delegating +/// whether to [get] or [watch] results to the consuming code. +/// +/// {@template moor_multi_selectable_example} +/// ```dart +/// /// Retrieve a page of [Todo]s. +/// MultiSelectable pageOfTodos(int page, {int pageSize = 10}) { +/// return select(todos)..limit(pageSize, offset: page); +/// } +/// pageOfTodos(1).get(); +/// pageOfTodos(1).watch(); +/// ``` +/// {@endtemplate} +/// +/// See also: [SingleSelectable] and [SingleOrNullSelectable] for exposing +/// single value methods. +abstract class MultiSelectable { + /// Executes this statement and returns the result. + Future> get(); + + /// Creates an auto-updating stream of the result that emits new items + /// whenever any table used in this statement changes. + Stream> watch(); +} + +/// [Selectable] methods for returning or streaming single, +/// non-nullable results. +/// +/// Useful for refining the return type of a query, while still delegating +/// whether to [getSingle] or [watchSingle] results to the consuming code. +/// +/// {@template moor_single_selectable_example} +/// ```dart +/// // Retrieve a todo known to exist. +/// SingleSelectable entryById(int id) { +/// return select(todos)..where((t) => t.id.equals(id)); +/// } +/// final idGuaranteedToExist = 10; +/// entryById(idGuaranteedToExist).getSingle(); +/// entryById(idGuaranteedToExist).watchSingle(); +/// ``` +/// {@endtemplate} +/// +/// See also: [MultiSelectable] for exposing multi-value methods and +/// [SingleOrNullSelectable] for exposing nullable value methods. +abstract class SingleSelectable { + /// Executes this statement, like [Selectable.get], but only returns one + /// value. the query returns no or too many rows, the returned future will + /// complete with an error. + /// + /// {@template moor_single_query_expl} + /// Be aware that this operation won't put a limit clause on this statement, + /// if that's needed you would have to do use [SimpleSelectStatement.limit]: + /// ```dart + /// Future loadMostImportant() { + /// return (select(todos) + /// ..orderBy([(t) => + /// OrderingTerm(expression: t.priority, mode: OrderingMode.desc)]) + /// ..limit(1) + /// ).getSingle(); + /// } + /// ``` + /// You should only use this method if you know the query won't have more than + /// one row, for instance because you used `limit(1)` or you know the `where` + /// clause will only allow one row. + /// {@endtemplate} + /// + /// See also: [Selectable.getSingleOrNull], which returns `null` instead of + /// throwing if the query completes with no rows. + Future getSingle(); + + /// Creates an auto-updating stream of this statement, similar to + /// [Selectable.watch]. However, it is assumed that the query will only emit + /// one result, so instead of returning a `Stream>`, this returns a + /// `Stream`. If, at any point, the query emits no or more than one rows, + /// an error will be added to the stream instead. + /// + /// {@macro moor_single_query_expl} + Stream watchSingle(); +} + +/// [Selectable] methods for returning or streaming single, +/// nullable results. +/// +/// Useful for refining the return type of a query, while still delegating +/// whether to [getSingleOrNull] or [watchSingleOrNull] result to the +/// consuming code. +/// +/// {@template moor_single_or_null_selectable_example} +///```dart +/// // Retrieve a todo from an external link that may not be valid. +/// SingleOrNullSelectable entryFromExternalLink(int id) { +/// return select(todos)..where((t) => t.id.equals(id)); +/// } +/// final idFromEmailLink = 100; +/// entryFromExternalLink(idFromEmailLink).getSingleOrNull(); +/// entryFromExternalLink(idFromEmailLink).watchSingleOrNull(); +/// ``` +/// {@endtemplate} +/// +/// See also: [MultiSelectable] for exposing multi-value methods and +/// [SingleSelectable] for exposing non-nullable value methods. +abstract class SingleOrNullSelectable { + /// Executes this statement, like [Selectable.get], but only returns one + /// value. If the result too many values, this method will throw. If no + /// row is returned, `null` will be returned instead. + /// + /// {@macro moor_single_query_expl} + /// + /// See also: [Selectable.getSingle], which can be used if the query will + /// always evaluate to exactly one row. + Future getSingleOrNull(); + + /// Creates an auto-updating stream of this statement, similar to + /// [Selectable.watch]. However, it is assumed that the query will only + /// emit one result, so instead of returning a `Stream>`, this + /// returns a `Stream`. If the query emits more than one row at + /// some point, an error will be emitted to the stream instead. + /// If the query emits zero rows at some point, `null` will be added + /// to the stream instead. + /// + /// {@macro moor_single_query_expl} + Stream watchSingleOrNull(); +} + +/// Abstract class for queries which can return one-time values or a stream +/// of values. +/// +/// If you want to make your query consumable as either a [Future] or a +/// [Stream], you can refine your return type using one of Selectable's +/// base classes: +/// +/// {@macro moor_multi_selectable_example} +/// {@macro moor_single_selectable_example} +/// {@macro moor_single_or_null_selectable_example} +abstract class Selectable + implements + MultiSelectable, + SingleSelectable, + SingleOrNullSelectable { + @override + Future> get(); + + @override + Stream> watch(); + + @override + Future getSingle() async { + return (await get()).single; + } + + @override + Stream watchSingle() { + return watch().transform(singleElements()); + } + + @override + Future getSingleOrNull() async { + final list = await get(); + final iterator = list.iterator; + + if (!iterator.moveNext()) { + return null; + } + final element = iterator.current; + if (iterator.moveNext()) { + throw StateError('Expected exactly one result, but found more than one!'); + } + + return element; + } + + @override + Stream watchSingleOrNull() { + return watch().transform(singleElementsOrNull()); + } + + /// Maps this selectable by the [mapper] function. + /// + /// Each entry emitted by this [Selectable] will be transformed by the + /// [mapper] and then emitted to the selectable returned. + Selectable map(N Function(T) mapper) { + return _MappedSelectable(this, mapper); + } +} + +class _MappedSelectable extends Selectable { + final Selectable _source; + final T Function(S) _mapper; + + _MappedSelectable(this._source, this._mapper); + + @override + Future> get() { + return _source.get().then(_mapResults); + } + + @override + Stream> watch() { + return _source.watch().map(_mapResults); + } + + List _mapResults(List results) => results.map(_mapper).toList(); +} + +/// Mixin for a [Query] that operates on a single primary table only. +mixin SingleTableQueryMixin on Query { + /// Makes this statement only include rows that match the [filter]. + /// + /// For instance, if you have a table users with an id column, you could + /// select a user with a specific id by using + /// ```dart + /// (select(users)..where((u) => u.id.equals(42))).watchSingle() + /// ``` + /// + /// Please note that this [where] call is different to [Iterable.where] and + /// [Stream.where] in the sense that [filter] will NOT be called for each + /// row. Instead, it will only be called once (with the underlying table as + /// parameter). The result [Expression] will be written as a SQL string and + /// sent to the underlying database engine. The filtering does not happen in + /// Dart. + /// If a where condition has already been set before, the resulting filter + /// will be the conjunction of both calls. + /// + /// For more information, see: + /// - The docs on [expressions](https://moor.simonbinder.eu/docs/getting-started/expressions/), + /// which explains how to express most SQL expressions in Dart. + /// If you want to remove duplicate rows from a query, use the `distinct` + /// parameter on [DatabaseConnectionUser.select]. + void where(Expression Function(T tbl) filter) { + final predicate = filter(table.asDslTable); + + if (whereExpr == null) { + whereExpr = Where(predicate); + } else { + whereExpr = Where(whereExpr!.predicate & predicate); + } + } +} + +/// Extension for statements on a table. +/// +/// This adds the [whereSamePrimaryKey] method as an extension. The query could +/// run on a view, for which [whereSamePrimaryKey] is not defined. +extension QueryTableExtensions + on SingleTableQueryMixin { + TableInfo get _sourceTable => table as TableInfo; + + /// Applies a [where] statement so that the row with the same primary key as + /// [d] will be matched. + void whereSamePrimaryKey(Insertable d) { + final source = _sourceTable; + assert( + source.$primaryKey.isNotEmpty, + 'When using Query.whereSamePrimaryKey, which is also called from ' + 'DeleteStatement.delete and UpdateStatement.replace, the affected table' + 'must have a primary key. You can either specify a primary implicitly ' + 'by making an integer() column autoIncrement(), or by explictly ' + 'overriding the primaryKey getter in your table class. You\'ll also ' + 'have to re-run the code generation step.\n' + 'Alternatively, if you\'re using DeleteStatement.delete or ' + 'UpdateStatement.replace, consider using DeleteStatement.go or ' + 'UpdateStatement.write respectively. In that case, you need to use a ' + 'custom where statement.'); + + final primaryKeyColumns = Map.fromEntries(source.$primaryKey.map((column) { + return MapEntry(column.$name, column); + })); + + final updatedFields = d.toColumns(false); + // Construct a map of [GeneratedColumn] to [Expression] where each column is + // a primary key and the associated value was extracted from d. + final primaryKeyValues = Map.fromEntries(updatedFields.entries + .where((entry) => primaryKeyColumns.containsKey(entry.key))) + .map((columnName, value) { + return MapEntry(primaryKeyColumns[columnName]!, value); + }); + + Expression? predicate; + for (final entry in primaryKeyValues.entries) { + final comparison = + _Comparison(entry.key, _ComparisonOperator.equal, entry.value); + + if (predicate == null) { + predicate = comparison; + } else { + predicate = predicate & comparison; + } + } + + whereExpr = Where(predicate!); + } +} + +/// Mixin to provide the high-level [limit] methods for users. +mixin LimitContainerMixin on Query { + /// Limits the amount of rows returned by capping them at [limit]. If [offset] + /// is provided as well, the first [offset] rows will be skipped and not + /// included in the result. + void limit(int limit, {int? offset}) { + limitExpr = Limit(limit, offset); + } +} diff --git a/drift/lib/src/runtime/query_builder/statements/select/custom_select.dart b/drift/lib/src/runtime/query_builder/statements/select/custom_select.dart new file mode 100644 index 00000000..c0d99fa9 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/statements/select/custom_select.dart @@ -0,0 +1,106 @@ +part of '../../query_builder.dart'; + +/// A select statement that is constructed with a raw sql prepared statement +/// instead of the high-level moor api. +class CustomSelectStatement with Selectable { + /// Tables this select statement reads from. When turning this select query + /// into an auto-updating stream, that stream will emit new items whenever + /// any of these tables changes. + final Set tables; + + /// The sql query string for this statement. + final String query; + + /// The variables for the prepared statement, in the order they appear in + /// [query]. Variables are denoted using a question mark in the query. + final List variables; + final DatabaseConnectionUser _db; + + /// Constructs a new custom select statement for the query, the variables, + /// the affected tables and the database. + CustomSelectStatement(this.query, this.variables, this.tables, this._db); + + /// Constructs a fetcher for this query. The fetcher is responsible for + /// updating a stream at the right moment. + QueryStreamFetcher _constructFetcher() { + final args = _mapArgs(); + + return QueryStreamFetcher( + readsFrom: TableUpdateQuery.onAllTables(tables), + fetchData: () => _executeRaw(args), + key: StreamKey(query, args), + ); + } + + @override + Future> get() { + return _executeRaw(_mapArgs()).then(_mapDbResponse); + } + + @override + Stream> watch() { + return _db.createStream(_constructFetcher()).map(_mapDbResponse); + } + + List _mapArgs() { + final ctx = GenerationContext.fromDb(_db); + return variables.map((v) => v.mapToSimpleValue(ctx)).toList(); + } + + Future>> _executeRaw(List mappedArgs) { + return _db.doWhenOpened((e) => e.runSelect(query, mappedArgs)); + } + + List _mapDbResponse(List> rows) { + return rows.map((row) => QueryRow(row, _db)).toList(); + } +} + +/// For custom select statements, represents a row in the result set. +class QueryRow { + /// The raw data in this row. + /// + /// Note that the values in this map aren't mapped to Dart yet. For instance, + /// a [DateTime] would be stored as an [int] in [data] because that's the way + /// it's stored in the database. To read a value, use any of the [read] + /// methods. + final Map data; + final DatabaseConnectionUser _db; + + /// Construct a row from the raw data and the query engine that maps the raw + /// response to appropriate dart types. + QueryRow(this.data, this._db); + + /// Reads an arbitrary value from the row and maps it to a fitting dart type. + /// The dart type [T] must be supported by the type system of the database + /// used (mostly contains booleans, strings, numbers and dates). + T read(String key) { + final type = _db.typeSystem.forDartType(); + + return type.mapFromDatabaseResponse(data[key]) as T; + } + + /// Reads a bool from the column named [key]. + @Deprecated('Use read(key) directly') + bool readBool(String key) => read(key); + + /// Reads a string from the column named [key]. + @Deprecated('Use read(key) directly') + String readString(String key) => read(key); + + /// Reads a int from the column named [key]. + @Deprecated('Use read(key) directly') + int readInt(String key) => read(key); + + /// Reads a double from the column named [key]. + @Deprecated('Use read(key) directly') + double readDouble(String key) => read(key); + + /// Reads a [DateTime] from the column named [key]. + @Deprecated('Use read(key) directly') + DateTime readDateTime(String key) => read(key); + + /// Reads a [Uint8List] from the column named [key]. + @Deprecated('Use read(key) directly') + Uint8List readBlob(String key) => read(key); +} diff --git a/drift/lib/src/runtime/query_builder/statements/select/select.dart b/drift/lib/src/runtime/query_builder/statements/select/select.dart new file mode 100644 index 00000000..6d48a7cb --- /dev/null +++ b/drift/lib/src/runtime/query_builder/statements/select/select.dart @@ -0,0 +1,195 @@ +part of '../../query_builder.dart'; + +/// Signature of a function that generates an [OrderingTerm] when provided with +/// a table. +typedef OrderClauseGenerator = OrderingTerm Function(T tbl); + +/// The abstract base class for all select statements in the moor api. +/// +/// Users are not allowed to extend, implement or mix-in this class. +@sealed +abstract class BaseSelectStatement extends Component { + int get _returnedColumnCount; +} + +/// A select statement that doesn't use joins. +/// +/// For more information, see [DatabaseConnectionUser.select]. +class SimpleSelectStatement extends Query + with SingleTableQueryMixin, LimitContainerMixin, Selectable + implements BaseSelectStatement { + /// Whether duplicate rows should be eliminated from the result (this is a + /// `SELECT DISTINCT` statement in sql). Defaults to false. + final bool distinct; + + /// Used internally by moor, users will want to call + /// [DatabaseConnectionUser.select] instead. + SimpleSelectStatement( + DatabaseConnectionUser database, ResultSetImplementation table, + {this.distinct = false}) + : super(database, table); + + /// The tables this select statement reads from. + @visibleForOverriding + @Deprecated('Use watchedTables on the GenerationContext') + Set get watchedTables => {table}; + + @override + int get _returnedColumnCount => table.$columns.length; + + @override + void writeStartPart(GenerationContext ctx) { + ctx.buffer + ..write(_beginOfSelect(distinct)) + ..write(' * FROM ${table.tableWithAlias}'); + ctx.watchedTables.add(table); + } + + @override + Future> get() { + final ctx = constructQuery(); + return _getRaw(ctx).then(_mapResponse); + } + + @override + Stream> watch() { + final query = constructQuery(); + final fetcher = QueryStreamFetcher( + readsFrom: TableUpdateQuery.onAllTables(query.watchedTables), + fetchData: () => _getRaw(query), + key: StreamKey(query.sql, query.boundVariables), + ); + + return database.createStream(fetcher).map(_mapResponse); + } + + Future>> _getRaw(GenerationContext ctx) { + return database.doWhenOpened((e) { + return e.runSelect(ctx.sql, ctx.boundVariables); + }); + } + + List _mapResponse(List> rows) { + return rows.map(table.map).toList(); + } + + /// Creates a select statement that operates on more than one table by + /// applying the given joins. + /// + /// Example from the todolist example which will load the category for each + /// item: + /// ``` + /// final results = await select(todos).join([ + /// leftOuterJoin(categories, categories.id.equalsExp(todos.category)) + /// ]).get(); + /// + /// return results.map((row) { + /// final entry = row.readTable(todos); + /// final category = row.readTable(categories); + /// return EntryWithCategory(entry, category); + /// }).toList(); + /// ``` + /// + /// See also: + /// - https://moor.simonbinder.eu/docs/advanced-features/joins/#joins + /// - [innerJoin], [leftOuterJoin] and [crossJoin], which can be used to + /// construct a [Join]. + /// - [DatabaseConnectionUser.alias], which can be used to build statements + /// that refer to the same table multiple times. + JoinedSelectStatement join(List joins) { + final statement = JoinedSelectStatement(database, table, joins, distinct); + + if (whereExpr != null) { + statement.where(whereExpr!.predicate); + } + if (orderByExpr != null) { + statement.orderBy(orderByExpr!.terms); + } + if (limitExpr != null) { + statement.limitExpr = limitExpr; + } + + return statement; + } + + /// {@macro moor_select_addColumns} + JoinedSelectStatement addColumns(List expressions) { + return join([])..addColumns(expressions); + } + + /// Orders the result by the given clauses. The clauses coming first in the + /// list have a higher priority, the later clauses are only considered if the + /// first clause considers two rows to be equal. + /// + /// Example that first displays the users who are awesome and sorts users by + /// their id as a secondary criterion: + /// ``` + /// (db.select(db.users) + /// ..orderBy([ + /// (u) => + /// OrderingTerm(expression: u.isAwesome, mode: OrderingMode.desc), + /// (u) => OrderingTerm(expression: u.id) + /// ])) + /// .get() + /// ``` + void orderBy(List> clauses) { + orderByExpr = OrderBy(clauses.map((t) => t(table.asDslTable)).toList()); + } +} + +String _beginOfSelect(bool distinct) { + return distinct ? 'SELECT DISTINCT' : 'SELECT'; +} + +/// A result row in a [JoinedSelectStatement] that can parse the result of +/// multiple entities. +class TypedResult { + /// Creates the result from the parsed table data. + TypedResult(this._parsedData, this.rawData, + [this._parsedExpressions = const {}]); + + final Map _parsedData; + final Map _parsedExpressions; + + /// The raw data contained in this row. + final QueryRow rawData; + + /// Reads all data that belongs to the given [table] from this row. + /// + /// If this row does not contain non-null columns of the [table], this method + /// will throw an [ArgumentError]. Use [readTableOrNull] for nullable tables. + D readTable(ResultSetImplementation table) { + if (!_parsedData.containsKey(table)) { + throw ArgumentError( + 'Invalid table passed to readTable: ${table.aliasedName}. This row ' + 'does not contain values for that table. \n' + 'In moor version 4, you have to use readTableNull for outer joins.'); + } + + return _parsedData[table] as D; + } + + /// Reads all data that belongs to the given [table] from this row. + /// + /// Returns `null` if this row does not contain non-null values of the + /// [table]. + /// + /// See also: [readTable], which throws instead of returning `null`. + D? readTableOrNull(TableInfo table) { + return _parsedData[table] as D?; + } + + /// Reads a single column from an [expr]. The expression must have been added + /// as a column, for instance via [JoinedSelectStatement.addColumns]. + /// + /// To access the underlying columns directly, use [rawData]. + D read(Expression expr) { + if (_parsedExpressions.containsKey(expr)) { + return _parsedExpressions[expr] as D; + } + + throw ArgumentError( + 'Invalid call to read(): $expr. This result set does not have a column ' + 'for that expression.'); + } +} diff --git a/drift/lib/src/runtime/query_builder/statements/select/select_with_join.dart b/drift/lib/src/runtime/query_builder/statements/select/select_with_join.dart new file mode 100644 index 00000000..adb0e690 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/statements/select/select_with_join.dart @@ -0,0 +1,268 @@ +part of '../../query_builder.dart'; + +/// A `SELECT` statement that operates on more than one table. +// this is called JoinedSelectStatement for legacy reasons - we also use it +// when custom expressions are used as result columns. Basically, it stores +// queries that are more complex than SimpleSelectStatement +class JoinedSelectStatement + extends Query + with LimitContainerMixin, Selectable + implements BaseSelectStatement { + /// Used internally by moor, users should use [SimpleSelectStatement.join] + /// instead. + JoinedSelectStatement(DatabaseConnectionUser database, + ResultSetImplementation table, this._joins, + [this.distinct = false, this._includeMainTableInResult = true]) + : super(database, table); + + /// Whether to generate a `SELECT DISTINCT` query that will remove duplicate + /// rows from the result set. + final bool distinct; + final bool _includeMainTableInResult; + final List _joins; + + /// All columns that we're selecting from. + final List _selectedColumns = []; + + /// The `AS` aliases generated for each column that isn't from a table. + /// + /// Each table column can be uniquely identified by its (potentially aliased) + /// table and its name. So a column named `id` in a table called `users` would + /// be written as `users.id AS "users.id"`. These columns will NOT be written + /// into this map. + /// + /// Other expressions used as columns will be included here. There just named + /// in increasing order, so something like `AS c3`. + final Map _columnAliases = {}; + + /// The tables this select statement reads from + @visibleForOverriding + @Deprecated('Use watchedTables on the generated context') + Set get watchedTables => _queriedTables().toSet(); + + @override + int get _returnedColumnCount { + return _joins.fold(_selectedColumns.length, (prev, join) { + if (join.includeInResult) { + return prev + join.table.$columns.length; + } + return prev; + }); + } + + /// Lists all tables this query reads from. + /// + /// If [onlyResults] (defaults to false) is set, only tables that are included + /// in the result set are returned. + Iterable _queriedTables( + [bool onlyResults = false]) sync* { + if (!onlyResults || _includeMainTableInResult) { + yield table; + } + + for (final join in _joins) { + if (onlyResults && !join.includeInResult) continue; + + yield join.table; + } + } + + @override + void writeStartPart(GenerationContext ctx) { + // use all columns across all tables as result column for this query + _selectedColumns.insertAll( + 0, _queriedTables(true).expand((t) => t.$columns).cast()); + + ctx.hasMultipleTables = true; + ctx.buffer + ..write(_beginOfSelect(distinct)) + ..write(' '); + + for (var i = 0; i < _selectedColumns.length; i++) { + if (i != 0) { + ctx.buffer.write(', '); + } + + final column = _selectedColumns[i]; + String chosenAlias; + if (column is GeneratedColumn) { + chosenAlias = '${column.tableName}.${column.$name}'; + } else { + chosenAlias = 'c$i'; + } + _columnAliases[column] = chosenAlias; + + column.writeInto(ctx); + ctx.buffer + ..write(' AS "') + ..write(chosenAlias) + ..write('"'); + } + + ctx.buffer.write(' FROM ${table.tableWithAlias}'); + ctx.watchedTables.add(table); + + if (_joins.isNotEmpty) { + ctx.writeWhitespace(); + + for (var i = 0; i < _joins.length; i++) { + if (i != 0) ctx.writeWhitespace(); + + _joins[i].writeInto(ctx); + } + } + } + + /// Applies the [predicate] as the where clause, which will be used to filter + /// results. + /// + /// The clause should only refer to columns defined in one of the tables + /// specified during [SimpleSelectStatement.join]. + /// + /// With the example of a todos table which refers to categories, we can write + /// something like + /// ```dart + /// final query = select(todos) + /// .join([ + /// leftOuterJoin(categories, categories.id.equalsExp(todos.category)), + /// ]) + /// ..where(todos.name.like("%Important") & categories.name.equals("Work")); + /// ``` + void where(Expression predicate) { + if (whereExpr == null) { + whereExpr = Where(predicate); + } else { + whereExpr = Where(whereExpr!.predicate & predicate); + } + } + + /// Orders the results of this statement by the ordering [terms]. + void orderBy(List terms) { + orderByExpr = OrderBy(terms); + } + + /// {@template moor_select_addColumns} + /// Adds a custom expression to the query. + /// + /// The database will evaluate the [Expression] for each row found for this + /// query. The value of the expression can be extracted from the [TypedResult] + /// by passing it to [TypedResult.read]. + /// + /// As an example, we could calculate the length of a column on the database: + /// ```dart + /// final contentLength = todos.content.length; + /// final results = await select(todos).addColumns([contentLength]).get(); + /// + /// // we can now read the result of a column added to addColumns + /// final lengthOfFirst = results.first.read(contentLength); + /// ``` + /// + /// See also: + /// - The docs on expressions: https://moor.simonbinder.eu/docs/getting-started/expressions/ + /// {@endtemplate} + void addColumns(Iterable expressions) { + _selectedColumns.addAll(expressions); + } + + /// Adds more joined tables to this [JoinedSelectStatement]. + /// + /// Always returns the same instance. + /// + /// See also: + /// - https://moor.simonbinder.eu/docs/advanced-features/joins/#joins + /// - [SimpleSelectStatement.join], which is used for the first join + /// - [innerJoin], [leftOuterJoin] and [crossJoin], which can be used to + /// construct a [Join]. + /// - [DatabaseConnectionUser.alias], which can be used to build statements + /// that refer to the same table multiple times. + // ignore: avoid_returning_this + JoinedSelectStatement join(List joins) { + _joins.addAll(joins); + return this; + } + + /// Groups the result by values in [expressions]. + /// + /// An optional [having] attribute can be set to exclude certain groups. + void groupBy(Iterable expressions, {Expression? having}) { + _groupBy = GroupBy._(expressions.toList(), having); + } + + @override + Stream> watch() { + final ctx = constructQuery(); + final fetcher = QueryStreamFetcher( + readsFrom: TableUpdateQuery.onAllTables(ctx.watchedTables), + fetchData: () => _getRaw(ctx), + key: StreamKey(ctx.sql, ctx.boundVariables), + ); + + return database + .createStream(fetcher) + .map((rows) => _mapResponse(ctx, rows)); + } + + @override + Future> get() async { + final ctx = constructQuery(); + final raw = await _getRaw(ctx); + return _mapResponse(ctx, raw); + } + + Future>> _getRaw(GenerationContext ctx) { + return ctx.executor!.doWhenOpened((e) async { + try { + return await e.runSelect(ctx.sql, ctx.boundVariables); + } catch (e, s) { + final foundTables = {}; + for (final table in _queriedTables()) { + if (!foundTables.add(table.entityName)) { + _warnAboutDuplicate(e, s, table); + } + } + + rethrow; + } + }); + } + + List _mapResponse( + GenerationContext ctx, List> rows) { + return rows.map((row) { + final readTables = {}; + final readColumns = {}; + + for (final table in _queriedTables(true)) { + final prefix = '${table.aliasedName}.'; + // if all columns of this table are null, skip the table + if (table.$columns.any((c) => row[prefix + c.$name] != null)) { + readTables[table] = table.map(row, tablePrefix: table.aliasedName); + } + } + + for (final aliasedColumn in _columnAliases.entries) { + final expr = aliasedColumn.key; + final value = row[aliasedColumn.value]; + + final type = expr.findType(ctx.typeSystem); + readColumns[expr] = type.mapFromDatabaseResponse(value); + } + + return TypedResult(readTables, QueryRow(row, database), readColumns); + }).toList(); + } + + @alwaysThrows + void _warnAboutDuplicate( + dynamic cause, StackTrace trace, ResultSetImplementation table) { + throw MoorWrappedException( + message: 'This query contained the table ${table.entityName} more than ' + 'once. Is this a typo? \n' + 'If you need a join that includes the same table more than once, you ' + 'need to alias() at least one table. See https://moor.simonbinder.eu/queries/joins#aliases ' + 'for an example.', + cause: cause, + trace: trace, + ); + } +} diff --git a/drift/lib/src/runtime/query_builder/statements/update.dart b/drift/lib/src/runtime/query_builder/statements/update.dart new file mode 100644 index 00000000..4bc9f6d3 --- /dev/null +++ b/drift/lib/src/runtime/query_builder/statements/update.dart @@ -0,0 +1,141 @@ +part of '../query_builder.dart'; + +/// Represents an `UPDATE` statement in sql. +class UpdateStatement extends Query + with SingleTableQueryMixin { + /// Used internally by moor, construct an update statement + UpdateStatement(DatabaseConnectionUser database, TableInfo table) + : super(database, table); + + late Map _updatedFields; + + @override + void writeStartPart(GenerationContext ctx) { + // TODO support the OR (ROLLBACK / ABORT / REPLACE / FAIL / IGNORE...) thing + + ctx.buffer.write('UPDATE ${table.tableWithAlias} SET '); + + var first = true; + _updatedFields.forEach((columnName, variable) { + if (!first) { + ctx.buffer.write(', '); + } else { + first = false; + } + + ctx.buffer + ..write(escapeIfNeeded(columnName)) + ..write(' = '); + + variable.writeInto(ctx); + }); + } + + Future _performQuery() async { + final ctx = constructQuery(); + final rows = await ctx.executor!.doWhenOpened((e) async { + return await e.runUpdate(ctx.sql, ctx.boundVariables); + }); + + if (rows > 0) { + database.notifyUpdates( + {TableUpdate.onTable(_sourceTable, kind: UpdateKind.update)}); + } + + return rows; + } + + /// Writes all non-null fields from [entity] into the columns of all rows + /// that match the [where] clause. Warning: That also means that, when you're + /// not setting a where clause explicitly, this method will update all rows in + /// the [table]. + /// + /// The fields that are null on the [entity] object will not be changed by + /// this operation, they will be ignored. + /// + /// When [dontExecute] is true (defaults to false), the query will __NOT__ be + /// run, but all the validations are still in place. This is mainly used + /// internally by moor. + /// + /// Returns the amount of rows that have been affected by this operation. + /// + /// See also: [replace], which does not require [where] statements and + /// supports setting fields back to null. + Future write(Insertable entity, {bool dontExecute = false}) async { + _sourceTable.validateIntegrity(entity).throwIfInvalid(entity); + + _updatedFields = entity.toColumns(true) + ..remove((_, value) => value == null); + + if (_updatedFields.isEmpty) { + // nothing to update, we're done + return Future.value(0); + } + + if (dontExecute) return -1; + return await _performQuery(); + } + + /// Replaces the old version of [entity] that is stored in the database with + /// the fields of the [entity] provided here. This implicitly applies a + /// [where] clause to rows with the same primary key as [entity], so that only + /// the row representing outdated data will be replaced. + /// + /// If [entity] has absent values (set to null on the [DataClass] or + /// explicitly to absent on the [UpdateCompanion]), and a default value for + /// the field exists, that default value will be used. Otherwise, the field + /// will be reset to null. This behavior is different to [write], which simply + /// ignores such fields without changing them in the database. + /// + /// When [dontExecute] is true (defaults to false), the query will __NOT__ be + /// run, but all the validations are still in place. This is mainly used + /// internally by moor. + /// + /// Returns true if a row was affected by this operation. + /// + /// See also: + /// - [write], which doesn't apply a [where] statement itself and ignores + /// null values in the entity. + /// - [InsertStatement.insert] with the `orReplace` parameter, which behaves + /// similar to this method but creates a new row if none exists. + Future replace(Insertable entity, {bool dontExecute = false}) async { + // We don't turn nulls to absent values here (as opposed to a regular + // update, where only non-null fields will be written). + final columns = entity.toColumns(false); + _sourceTable + .validateIntegrity(entity, isInserting: true) + .throwIfInvalid(entity); + assert( + whereExpr == null, + 'When using replace on an update statement, you may not use where(...)' + 'as well. The where clause will be determined automatically'); + + whereSamePrimaryKey(entity); + + // copying to work around type issues - Map extends + // Map but crashes when adding anything that is not + // a Variable. + _updatedFields = columns is Map + ? Map.of(columns) + : columns; + + final primaryKeys = _sourceTable.$primaryKey.map((c) => c.$name); + + // entityToSql doesn't include absent values, so we might have to apply the + // default value here + for (final column in table.$columns) { + // if a default value exists and no value is set, apply the default + if (column.defaultValue != null && + !_updatedFields.containsKey(column.$name)) { + _updatedFields[column.$name] = column.defaultValue!; + } + } + + // Don't update the primary key + _updatedFields.removeWhere((key, _) => primaryKeys.contains(key)); + + if (dontExecute) return false; + final updatedRows = await _performQuery(); + return updatedRows != 0; + } +} diff --git a/drift/lib/src/runtime/types/custom_type.dart b/drift/lib/src/runtime/types/custom_type.dart new file mode 100644 index 00000000..cd2f34e9 --- /dev/null +++ b/drift/lib/src/runtime/types/custom_type.dart @@ -0,0 +1,75 @@ +part of 'sql_types.dart'; + +/// Maps a custom dart object of type [D] into a primitive type [S] understood +/// by the sqlite backend. +/// +/// Moor currently supports [DateTime], [double], [int], [Uint8List], [bool] +/// and [String] for [S]. +/// +/// Also see [BuildColumn.map] for details. +abstract class TypeConverter { + /// Empty constant constructor so that subclasses can have a constant + /// constructor. + const TypeConverter(); + + /// Map a value from an object in Dart into something that will be understood + /// by the database. + S? mapToSql(D? value); + + /// Maps a column from the database back to Dart. + D? mapToDart(S? fromDb); +} + +/// Implementation for an enum to int converter that uses the index of the enum +/// as the value stored in the database. +class EnumIndexConverter extends NullAwareTypeConverter { + /// All values of the enum. + final List values; + + /// Constant default constructor. + const EnumIndexConverter(this.values); + + @override + T requireMapToDart(int fromDb) { + return values[fromDb]; + } + + @override + int requireMapToSql(T value) { + // In Dart 2.14: Cast to Enum instead of dynamic. Also add Enum as an upper + // bound for T. + return (value as dynamic).index as int; + } +} + +/// A type converter automatically mapping `null` values to `null` in both +/// directions. +/// +/// Instead of overriding [mapToDart] and [mapToSql], subclasses of this +/// converter should implement [requireMapToDart] and [requireMapToSql], which +/// are used to map non-null values to and from sql values, respectively. +/// +/// Apart from the implementation changes, subclasses of this converter can be +/// used just like all other type converters. +abstract class NullAwareTypeConverter + extends TypeConverter { + /// Constant default constructor. + const NullAwareTypeConverter(); + + @override + D? mapToDart(S? fromDb) { + return fromDb == null ? null : requireMapToDart(fromDb); + } + + /// Maps a non-null column from the database back to Dart. + D requireMapToDart(S fromDb); + + @override + S? mapToSql(D? value) { + return value == null ? null : requireMapToSql(value); + } + + /// Map a non-null value from an object in Dart into something that will be + /// understood by the database. + S requireMapToSql(D value); +} diff --git a/drift/lib/src/runtime/types/sql_types.dart b/drift/lib/src/runtime/types/sql_types.dart new file mode 100644 index 00000000..d69597df --- /dev/null +++ b/drift/lib/src/runtime/types/sql_types.dart @@ -0,0 +1,207 @@ +import 'dart:typed_data'; + +import 'package:convert/convert.dart'; +import 'package:drift/drift.dart'; + +part 'custom_type.dart'; +part 'type_system.dart'; + +const _deprecated = + Deprecated('Types will be removed in drift 5, use the methods on ' + 'SqlTypeSystem instead.'); + +/// A type that can be mapped from Dart to sql. The generic type parameter [T] +/// denotes the resolved dart type. +@_deprecated +abstract class SqlType { + /// Constant constructor so that subclasses can be constant + const SqlType(); + + /// The name of this type in sql, such as `TEXT`. + String get sqlName; + + /// Maps the [content] to a value that we can send together with a prepared + /// statement to represent the given value. + dynamic mapToSqlVariable(T? content); + + /// Maps the given content to a sql literal that can be included in the query + /// string. + String? mapToSqlConstant(T? content); + + /// Maps the response from sql back to a readable dart type. + T? mapFromDatabaseResponse(dynamic response); +} + +/// A mapper for boolean values in sql. Booleans are represented as integers, +/// where 0 means false and any other value means true. +@_deprecated +class BoolType extends SqlType { + /// Constant constructor used by the type system + const BoolType(); + + @override + String get sqlName => 'INTEGER'; + + @override + bool? mapFromDatabaseResponse(dynamic response) { + // ignore: avoid_returning_null + if (response == null) return null; + return response != 0; + } + + @override + String mapToSqlConstant(bool? content) { + if (content == null) { + return 'NULL'; + } + return content ? '1' : '0'; + } + + @override + int? mapToSqlVariable(bool? content) { + if (content == null) { + // ignore: avoid_returning_null + return null; + } + return content ? 1 : 0; + } +} + +/// Mapper for string values in sql. +@_deprecated +class StringType extends SqlType { + /// Constant constructor used by the type system + const StringType(); + + @override + String get sqlName => 'TEXT'; + + @override + String? mapFromDatabaseResponse(dynamic response) => response?.toString(); + + @override + String mapToSqlConstant(String? content) { + if (content == null) return 'NULL'; + + // From the sqlite docs: (https://www.sqlite.org/lang_expr.html) + // A string constant is formed by enclosing the string in single quotes ('). + // A single quote within the string can be encoded by putting two single + // quotes in a row - as in Pascal. C-style escapes using the backslash + // character are not supported because they are not standard SQL. + final escapedChars = content.replaceAll('\'', '\'\''); + return "'$escapedChars'"; + } + + @override + String? mapToSqlVariable(String? content) => content; +} + +/// Maps [int] values from and to sql +@_deprecated +class IntType extends SqlType { + /// Constant constructor used by the type system + const IntType(); + + @override + String get sqlName => 'INTEGER'; + + @override + int? mapFromDatabaseResponse(dynamic response) { + if (response == null || response is int?) return response as int?; + return int.parse(response.toString()); + } + + @override + String mapToSqlConstant(int? content) => content?.toString() ?? 'NULL'; + + @override + int? mapToSqlVariable(int? content) { + return content; + } +} + +/// Maps [DateTime] values from and to sql +@_deprecated +class DateTimeType extends SqlType { + /// Constant constructor used by the type system + const DateTimeType(); + + @override + String get sqlName => 'INTEGER'; + + @override + DateTime? mapFromDatabaseResponse(dynamic response) { + if (response == null) return null; + + final unixSeconds = response as int; + + return DateTime.fromMillisecondsSinceEpoch(unixSeconds * 1000); + } + + @override + String mapToSqlConstant(DateTime? content) { + if (content == null) return 'NULL'; + + return (content.millisecondsSinceEpoch ~/ 1000).toString(); + } + + @override + int? mapToSqlVariable(DateTime? content) { + // ignore: avoid_returning_null + if (content == null) return null; + + return content.millisecondsSinceEpoch ~/ 1000; + } +} + +/// Maps [Uint8List] values from and to sql +@_deprecated +class BlobType extends SqlType { + /// Constant constructor used by the type system + const BlobType(); + + @override + String get sqlName => 'BLOB'; + + @override + Uint8List? mapFromDatabaseResponse(dynamic response) { + return response as Uint8List?; + } + + @override + String mapToSqlConstant(Uint8List? content) { + if (content == null) return 'NULL'; + // BLOB literals are string literals containing hexadecimal data and + // preceded by a single "x" or "X" character. Example: X'53514C697465' + return "x'${hex.encode(content)}'"; + } + + @override + Uint8List? mapToSqlVariable(Uint8List? content) => content; +} + +/// Maps [double] values from and to sql +@_deprecated +class RealType extends SqlType { + /// Constant constructor used by the type system + const RealType(); + + @override + String get sqlName => 'REAL'; + + @override + double? mapFromDatabaseResponse(dynamic response) { + return (response as num?)?.toDouble(); + } + + @override + String mapToSqlConstant(num? content) { + if (content == null) { + return 'NULL'; + } + return content.toString(); + } + + @override + num? mapToSqlVariable(num? content) => content; +} diff --git a/drift/lib/src/runtime/types/type_system.dart b/drift/lib/src/runtime/types/type_system.dart new file mode 100644 index 00000000..8953a915 --- /dev/null +++ b/drift/lib/src/runtime/types/type_system.dart @@ -0,0 +1,73 @@ +part of 'sql_types.dart'; + +/// Manages the set of [SqlType] known to a database. It's also responsible for +/// returning the appropriate sql type for a given dart type. +class SqlTypeSystem { + /// The mapping types maintained by this type system. + final List types; + + /// Constructs a [SqlTypeSystem] from the [types]. + @Deprecated('Only the default instance is supported') + const factory SqlTypeSystem(List types) = SqlTypeSystem._; + + const SqlTypeSystem._(this.types); + + /// Constructs a [SqlTypeSystem] from the default types. + const SqlTypeSystem.withDefaults() + : this._(const [ + BoolType(), + StringType(), + IntType(), + DateTimeType(), + BlobType(), + RealType(), + ]); + + /// Constant field of [SqlTypeSystem.withDefaults]. This field exists as a + /// workaround for an analyzer bug: https://dartbug.com/38658 + /// + /// Used internally by generated code. + static const defaultInstance = SqlTypeSystem.withDefaults(); + + /// Returns the appropriate sql type for the dart type provided as the + /// generic parameter. + @Deprecated('Use mapToVariable or a mapFromSql method instead') + SqlType forDartType() { + return types.singleWhere((t) => t is SqlType) as SqlType; + } + + /// Maps a Dart object to a (possibly simpler) object that can be used as + /// parameters to raw sql queries. + Object? mapToVariable(Object? dart) { + if (dart == null) return null; + + // These need special handling, all other types are a direct mapping + if (dart is DateTime) return const DateTimeType().mapToSqlVariable(dart); + if (dart is bool) return const BoolType().mapToSqlVariable(dart); + + return dart; + } + + /// Maps a Dart object to a SQL constant representing the same value. + static String mapToSqlConstant(Object? dart) { + if (dart == null) return 'NULL'; + + // todo: Inline and remove types in the next major moor version + if (dart is bool) { + return const BoolType().mapToSqlConstant(dart); + } else if (dart is String) { + return const StringType().mapToSqlConstant(dart); + } else if (dart is int) { + return const IntType().mapToSqlConstant(dart); + } else if (dart is DateTime) { + return const DateTimeType().mapToSqlConstant(dart); + } else if (dart is Uint8List) { + return const BlobType().mapToSqlConstant(dart); + } else if (dart is double) { + return const RealType().mapToSqlConstant(dart); + } + + throw ArgumentError.value(dart, 'dart', + 'Must be null, bool, String, int, DateTime, Uint8List or double'); + } +} diff --git a/drift/lib/src/utils/expand_variables.dart b/drift/lib/src/utils/expand_variables.dart new file mode 100644 index 00000000..9b3e09ed --- /dev/null +++ b/drift/lib/src/utils/expand_variables.dart @@ -0,0 +1,13 @@ +/// Used by generated code. +String $expandVar(int start, int amount) { + final buffer = StringBuffer(); + + for (var x = 0; x < amount; x++) { + buffer.write('?${start + x}'); + if (x != amount - 1) { + buffer.write(', '); + } + } + + return buffer.toString(); +} diff --git a/drift/lib/src/utils/lazy_database.dart b/drift/lib/src/utils/lazy_database.dart new file mode 100644 index 00000000..02b43622 --- /dev/null +++ b/drift/lib/src/utils/lazy_database.dart @@ -0,0 +1,83 @@ +import 'dart:async'; + +import 'package:drift/backends.dart'; +import 'package:drift/drift.dart'; + +/// Signature of a function that opens a database connection when instructed to. +typedef DatabaseOpener = FutureOr Function(); + +/// A special database executor that delegates work to another [QueryExecutor]. +/// The other executor is lazily opened by a [DatabaseOpener]. +class LazyDatabase extends QueryExecutor { + late QueryExecutor _delegate; + bool _delegateAvailable = false; + + Completer? _openDelegate; + + /// The function that will open the database when this [LazyDatabase] gets + /// opened for the first time. + final DatabaseOpener opener; + + /// Declares a [LazyDatabase] that will run [opener] when the database is + /// first requested to be opened. + LazyDatabase(this.opener); + + Future _awaitOpened() { + if (_delegateAvailable) { + return Future.value(); + } else if (_openDelegate != null) { + return _openDelegate!.future; + } else { + final delegate = _openDelegate = Completer(); + Future.value(opener()).then((database) { + _delegate = database; + _delegateAvailable = true; + delegate.complete(); + }); + return delegate.future; + } + } + + @override + TransactionExecutor beginTransaction() => _delegate.beginTransaction(); + + @override + Future ensureOpen(QueryExecutorUser user) { + return _awaitOpened().then((_) => _delegate.ensureOpen(user)); + } + + @override + Future runBatched(BatchedStatements statements) => + _delegate.runBatched(statements); + + @override + Future runCustom(String statement, [List? args]) => + _delegate.runCustom(statement, args); + + @override + Future runDelete(String statement, List args) => + _delegate.runDelete(statement, args); + + @override + Future runInsert(String statement, List args) => + _delegate.runInsert(statement, args); + + @override + Future>> runSelect( + String statement, List args) { + return _delegate.runSelect(statement, args); + } + + @override + Future runUpdate(String statement, List args) => + _delegate.runUpdate(statement, args); + + @override + Future close() { + if (_delegateAvailable) { + return _delegate.close(); + } else { + return Future.value(); + } + } +} diff --git a/drift/lib/src/utils/single_transformer.dart b/drift/lib/src/utils/single_transformer.dart new file mode 100644 index 00000000..98454d18 --- /dev/null +++ b/drift/lib/src/utils/single_transformer.dart @@ -0,0 +1,31 @@ +import 'dart:async'; + +/// Transforms a stream of lists into a stream of single elements, assuming +/// that each list is a singleton or empty. +StreamTransformer, T?> singleElementsOrNull() { + return StreamTransformer.fromHandlers(handleData: (data, sink) { + try { + if (data.isEmpty) { + sink.add(null); + } else { + sink.add(data.single); + } + } catch (e) { + sink.addError( + StateError('Expected exactly one element, but got ${data.length}')); + } + }); +} + +/// Transforms a stream of lists into a stream of single elements, assuming +/// that each list is a singleton. +StreamTransformer, T> singleElements() { + return StreamTransformer.fromHandlers(handleData: (data, sink) { + try { + sink.add(data.single); + } catch (e) { + sink.addError( + StateError('Expected exactly one element, but got ${data.length}')); + } + }); +} diff --git a/drift/lib/src/utils/start_with_value_transformer.dart b/drift/lib/src/utils/start_with_value_transformer.dart new file mode 100644 index 00000000..f0407ae9 --- /dev/null +++ b/drift/lib/src/utils/start_with_value_transformer.dart @@ -0,0 +1,107 @@ +import 'dart:async'; + +/// Signature of a function that returns the latest current value of a +/// [StartWithValueTransformer]. +typedef LatestValue = T? Function(); + +/// Lightweight implementation that turns a [StreamController] into a behavior +/// subject (we try to avoid depending on rxdart because of its size). +class StartWithValueTransformer extends StreamTransformerBase { + final LatestValue _value; + + /// Constructs a stream transformer that will emit what's returned by [_value] + /// to new listeners. + StartWithValueTransformer(this._value); + + @override + Stream bind(Stream stream) { + return _StartWithValueStream(_value, stream); + } +} + +class _StartWithValueStream extends Stream { + final LatestValue _value; + final Stream _inner; + + _StartWithValueStream(this._value, this._inner); + + @override + bool get isBroadcast => _inner.isBroadcast; + + @override + StreamSubscription listen(void Function(T event)? onData, + {Function? onError, void Function()? onDone, bool? cancelOnError}) { + final data = _value(); + return _StartWithValueSubscription(_inner, data, onData, + onError: onError, onDone: onDone, cancelOnError: cancelOnError); + } +} + +class _StartWithValueSubscription extends StreamSubscription { + late final StreamSubscription _inner; + final T? initialData; + + bool needsInitialData = true; + void Function(T data)? _onData; + + _StartWithValueSubscription( + Stream innerStream, this.initialData, this._onData, + {Function? onError, void Function()? onDone, bool? cancelOnError}) { + _inner = innerStream.listen(_wrappedDataCallback(_onData), + onError: onError, onDone: onDone, cancelOnError: cancelOnError); + + // Dart's stream contract specifies that listeners are only notified + // after the .listen() code completes. So, we add the initial data in + // a later microtask. + final data = initialData; + if (data != null) { + scheduleMicrotask(() { + if (needsInitialData) { + _onData?.call(data); + needsInitialData = false; + } + }); + } + } + + void Function(T data) _wrappedDataCallback(void Function(T data)? onData) { + return (event) { + needsInitialData = false; + onData?.call(event); + }; + } + + @override + Future asFuture([E? futureValue]) => _inner.asFuture(futureValue); + + @override + Future cancel() { + needsInitialData = false; + return _inner.cancel(); + } + + @override + bool get isPaused => _inner.isPaused; + + @override + void onData(void Function(T data)? handleData) { + _onData = handleData; + + _inner.onData(_wrappedDataCallback(handleData)); + } + + @override + void onDone(void Function()? handleDone) => _inner.onDone(handleDone); + + @override + void onError(Function? handleError) => _inner.onError(handleError); + + @override + void pause([Future? resumeSignal]) { + needsInitialData = false; + _inner.pause(resumeSignal); + } + + @override + void resume() => _inner.resume(); +} diff --git a/drift/lib/src/utils/synchronized.dart b/drift/lib/src/utils/synchronized.dart new file mode 100644 index 00000000..e8c83dfb --- /dev/null +++ b/drift/lib/src/utils/synchronized.dart @@ -0,0 +1,30 @@ +import 'dart:async'; + +/// A single asynchronous lock implemented by future-chaining. +class Lock { + Future? _last; + + /// Waits for previous [synchronized]-calls on this [Lock] to complete, and + /// then calls [block] before further [synchronized] calls are allowed. + Future synchronized(FutureOr Function() block) { + final previous = _last; + // This completer may not be sync: It must complete just after + // callBlockAndComplete completes. + final blockCompleted = Completer(); + _last = blockCompleted.future; + + Future callBlockAndComplete() async { + try { + return await block(); + } finally { + blockCompleted.complete(); + } + } + + if (previous != null) { + return previous.then((_) => callBlockAndComplete()); + } else { + return callBlockAndComplete(); + } + } +} diff --git a/drift/lib/src/web/binary_string_conversion.dart b/drift/lib/src/web/binary_string_conversion.dart new file mode 100644 index 00000000..f9fd8b20 --- /dev/null +++ b/drift/lib/src/web/binary_string_conversion.dart @@ -0,0 +1,52 @@ +import 'dart:convert'; +import 'dart:math' as math; +import 'dart:typed_data'; + +/// Converts [Uint8List]s to binary strings. Used internally by drift to store +/// a database inside `window.localStorage`. +const bin2str = _BinaryStringConversion(); + +class _BinaryStringConversion extends Codec { + const _BinaryStringConversion(); + + @override + Converter get decoder => const _String2Bin(); + + @override + Converter get encoder => const _Bin2String(); +} + +class _String2Bin extends Converter { + const _String2Bin(); + + @override + Uint8List convert(String input) { + final codeUnits = input.codeUnits; + final list = Uint8List(codeUnits.length); + + for (var i = 0; i < codeUnits.length; i++) { + list[i] = codeUnits[i]; + } + return list; + } +} + +class _Bin2String extends Converter { + const _Bin2String(); + + // There is a browser limit on the amount of chars one can give to + // String.fromCharCodes https://github.com/sql-js/sql.js/wiki/Persisting-a-Modified-Database#save-a-database-to-a-string + static const int _chunkSize = 0xffff; + + @override + String convert(Uint8List input) { + final buffer = StringBuffer(); + + for (var pos = 0; pos < input.length; pos += _chunkSize) { + final endPos = math.min(pos + _chunkSize, input.length); + buffer.write(String.fromCharCodes(input.sublist(pos, endPos))); + } + + return buffer.toString(); + } +} diff --git a/drift/lib/src/web/sql_js.dart b/drift/lib/src/web/sql_js.dart new file mode 100644 index 00000000..36773e25 --- /dev/null +++ b/drift/lib/src/web/sql_js.dart @@ -0,0 +1,162 @@ +import 'dart:async'; +import 'dart:js'; + +import 'dart:typed_data'; + +// We write our own mapping code to js instead of depending on package:js +// This way, projects using moor can run on flutter as long as they don't import +// this file. + +Completer? _moduleCompleter; + +/// Calls the `initSqlJs` function from the native sql.js library. +Future initSqlJs() { + if (_moduleCompleter != null) { + return _moduleCompleter!.future; + } + + _moduleCompleter = Completer(); + if (!context.hasProperty('initSqlJs')) { + return Future.error( + UnsupportedError('Could not access the sql.js javascript library. ' + 'The moor documentation contains instructions on how to setup moor ' + 'the web, which might help you fix this.')); + } + + (context.callMethod('initSqlJs') as JsObject) + .callMethod('then', [_handleModuleResolved]); + + return _moduleCompleter!.future; +} + +// We're extracting this into its own method so that we don't have to call +// [allowInterop] on this method or a lambda. +// todo figure out why dart2js generates invalid js when wrapping this in +// allowInterop +void _handleModuleResolved(dynamic module) { + _moduleCompleter!.complete(SqlJsModule._(module as JsObject)); +} + +/// `sql.js` module from the underlying library +class SqlJsModule { + final JsObject _obj; + SqlJsModule._(this._obj); + + /// Constructs a new [SqlJsDatabase], optionally from the [data] blob. + SqlJsDatabase createDatabase([Uint8List? data]) { + final dbObj = _createInternally(data); + assert(() { + // set the window.db variable to make debugging easier + context['db'] = dbObj; + return true; + }()); + + return SqlJsDatabase._(dbObj); + } + + JsObject _createInternally(Uint8List? data) { + final constructor = _obj['Database'] as JsFunction; + + if (data != null) { + return JsObject(constructor, [data]); + } else { + return JsObject(constructor); + } + } +} + +/// Dart wrapper around a sql database provided by the sql.js library. +class SqlJsDatabase { + final JsObject _obj; + SqlJsDatabase._(this._obj); + + /// Returns the `user_version` pragma from sqlite. + int get userVersion { + return _selectSingleRowAndColumn('PRAGMA user_version;') as int; + } + + /// Sets sqlite's `user_version` pragma to the specified [version]. + set userVersion(int version) { + run('PRAGMA user_version = $version'); + } + + /// Calls `prepare` on the underlying js api + PreparedStatement prepare(String sql) { + final obj = _obj.callMethod('prepare', [sql]) as JsObject; + return PreparedStatement._(obj); + } + + /// Calls `run(sql)` on the underlying js api + void run(String sql) { + _obj.callMethod('run', [sql]); + } + + /// Calls `run(sql, args)` on the underlying js api + void runWithArgs(String sql, List args) { + final ar = JsArray.from(args); + _obj.callMethod('run', [sql, ar]); + } + + /// Returns the amount of rows affected by the most recent INSERT, UPDATE or + /// DELETE statement. + int lastModifiedRows() { + return _obj.callMethod('getRowsModified') as int; + } + + /// The row id of the last inserted row. This counter is reset when calling + /// [export]. + int lastInsertId() { + // load insert id. Will return [{columns: [...], values: [[id]]}] + return _selectSingleRowAndColumn('SELECT last_insert_rowid();') as int; + } + + dynamic _selectSingleRowAndColumn(String sql) { + final results = _obj.callMethod('exec', [sql]) as JsArray; + final row = results.first as JsObject; + final data = (row['values'] as JsArray).first as JsArray; + return data.first; + } + + /// Runs `export` on the underlying js api + Uint8List export() { + return _obj.callMethod('export') as Uint8List; + } + + /// Runs `close` on the underlying js api + void close() { + _obj.callMethod('close'); + } +} + +/// Dart api wrapping an underlying prepared statement object from the sql.js +/// library. +class PreparedStatement { + final JsObject _obj; + PreparedStatement._(this._obj); + + /// Executes this statement with the bound [args]. + void executeWith(List args) { + _obj.callMethod('bind', [JsArray.from(args)]); + } + + /// Performs `step` on the underlying js api + bool step() { + return _obj.callMethod('step') as bool; + } + + /// Reads the current from the underlying js api + List currentRow() { + return _obj.callMethod('get') as JsArray; + } + + /// The columns returned by this statement. This will only be available after + /// [step] has been called once. + List columnNames() { + return (_obj.callMethod('getColumnNames') as JsArray).cast(); + } + + /// Calls `free` on the underlying js api + void free() { + _obj.callMethod('free'); + } +} diff --git a/drift/lib/src/web/storage.dart b/drift/lib/src/web/storage.dart new file mode 100644 index 00000000..09e52e70 --- /dev/null +++ b/drift/lib/src/web/storage.dart @@ -0,0 +1,245 @@ +part of 'package:drift/web.dart'; + +/// Interface to control how moor should store data on the web. +abstract class MoorWebStorage { + /// Opens the storage implementation. + Future open(); + + /// Closes the storage implementation. + /// + /// No further requests may be sent after [close] was called. + Future close(); + + /// Restore the last database version that was saved with [store]. + /// + /// If no saved data was found, returns null. + Future restore(); + + /// Store the entire database. + Future store(Uint8List data); + + /// Creates the default storage implementation that uses the local storage + /// apis. + /// + /// The [name] parameter is used as a key to store the database blob in local + /// storage. It can be used to store multiple databases. + const factory MoorWebStorage(String name) = _LocalStorageImpl; + + /// Creates an in-memory storage that doesn't persist data. + /// + /// This means that your database will be recreated at each page reload. + factory MoorWebStorage.volatile() = _VolatileStorage; + + /// An experimental storage implementation that uses IndexedDB. + /// + /// This implementation is significantly faster than the default + /// implementation in local storage. Browsers also tend to allow more data + /// to be saved in IndexedDB. + /// + /// When the [migrateFromLocalStorage] parameter (defaults to `true`) is set, + /// old data saved using the default [MoorWebStorage] will be migrated to the + /// IndexedDB based implementation. This parameter can be turned off for + /// applications that never used the local storage implementation as a small + /// performance improvement. + /// + /// When the [inWebWorker] parameter (defaults to false) is set, + /// the implementation will use [WorkerGlobalScope] instead of [window] as + /// it isn't accessible from the worker. + /// + /// However, older browsers might not support IndexedDB. + @experimental + factory MoorWebStorage.indexedDb(String name, + {bool migrateFromLocalStorage, bool inWebWorker}) = _IndexedDbStorage; + + /// Uses [MoorWebStorage.indexedDb] if the current browser supports it. + /// Otherwise, falls back to the local storage based implementation. + static Future indexedDbIfSupported(String name, + {bool inWebWorker = false}) async { + return await supportsIndexedDb(inWebWorker: inWebWorker) + ? MoorWebStorage.indexedDb(name, inWebWorker: inWebWorker) + : MoorWebStorage(name); + } + + /// Attempts to check whether the current browser supports the + /// [MoorWebStorage.indexedDb] storage implementation. + static Future supportsIndexedDb({bool inWebWorker = false}) async { + var isIndexedDbSupported = false; + if (inWebWorker && WorkerGlobalScope.instance.indexedDB != null) { + isIndexedDbSupported = true; + } else { + try { + isIndexedDbSupported = IdbFactory.supported; + + if (isIndexedDbSupported) { + // Try opening a mock database to check if IndexedDB is really + // available. This avoids the problem with Firefox incorrectly + // reporting IndexedDB as supported in private mode. + final mockDb = await window.indexedDB!.open('moor_mock_db'); + mockDb.close(); + } + } catch (error) { + isIndexedDbSupported = false; + } + } + return isIndexedDbSupported && context.hasProperty('FileReader'); + } +} + +abstract class _CustomSchemaVersionSave implements MoorWebStorage { + int? get schemaVersion; + set schemaVersion(int? value); +} + +String _persistenceKeyForLocalStorage(String name) { + return 'moor_db_str_$name'; +} + +String _legacyVersionKeyForLocalStorage(String name) { + return 'moor_db_version_$name'; +} + +Uint8List? _restoreLocalStorage(String name) { + final raw = window.localStorage[_persistenceKeyForLocalStorage(name)]; + if (raw != null) { + return bin2str.decode(raw); + } + return null; +} + +class _LocalStorageImpl implements MoorWebStorage, _CustomSchemaVersionSave { + final String name; + + String get _persistenceKey => _persistenceKeyForLocalStorage(name); + String get _versionKey => _legacyVersionKeyForLocalStorage(name); + + const _LocalStorageImpl(this.name); + + @override + int? get schemaVersion { + final versionStr = window.localStorage[_versionKey]; + // ignore: avoid_returning_null + if (versionStr == null) return null; + + return int.tryParse(versionStr); + } + + @override + set schemaVersion(int? value) { + final key = _versionKey; + + if (value == null) { + window.localStorage.remove(key); + } else { + window.localStorage[_versionKey] = value.toString(); + } + } + + @override + Future close() => Future.value(); + + @override + Future open() => Future.value(); + + @override + Future restore() async { + return _restoreLocalStorage(name); + } + + @override + Future store(Uint8List data) { + final binStr = bin2str.encode(data); + window.localStorage[_persistenceKey] = binStr; + + return Future.value(); + } +} + +class _IndexedDbStorage implements MoorWebStorage { + static const _objectStoreName = 'moor_databases'; + + final String name; + final bool migrateFromLocalStorage; + final bool inWebWorker; + + late Database _database; + + _IndexedDbStorage(this.name, + {this.migrateFromLocalStorage = true, this.inWebWorker = false}); + + @override + Future open() async { + var wasCreated = false; + + final indexedDb = + inWebWorker ? WorkerGlobalScope.instance.indexedDB : window.indexedDB; + + _database = await indexedDb!.open( + _objectStoreName, + version: 1, + onUpgradeNeeded: (event) { + final database = event.target.result as Database; + + database.createObjectStore(_objectStoreName); + wasCreated = true; + }, + ); + + if (migrateFromLocalStorage && wasCreated) { + final fromLocalStorage = _restoreLocalStorage(name); + if (fromLocalStorage != null) { + await store(fromLocalStorage); + } + } + } + + @override + Future close() async { + _database.close(); + } + + @override + Future store(Uint8List data) async { + final transaction = + _database.transactionStore(_objectStoreName, 'readwrite'); + final store = transaction.objectStore(_objectStoreName); + + await store.put(Blob([data]), name); + await transaction.completed; + } + + @override + Future restore() async { + final transaction = + _database.transactionStore(_objectStoreName, 'readonly'); + final store = transaction.objectStore(_objectStoreName); + + final result = await store.getObject(name) as Blob?; + if (result == null) return null; + + final reader = FileReader(); + reader.readAsArrayBuffer(result); + // todo: Do we need to handle errors? We're reading from memory + await reader.onLoad.first; + + return reader.result as Uint8List; + } +} + +class _VolatileStorage implements MoorWebStorage { + Uint8List? _storedData; + + @override + Future close() => Future.value(); + + @override + Future open() => Future.value(); + + @override + Future restore() => Future.value(_storedData); + + @override + Future store(Uint8List data) { + _storedData = data; + return Future.value(); + } +} diff --git a/drift/lib/src/web/web_db.dart b/drift/lib/src/web/web_db.dart new file mode 100644 index 00000000..64fb3bb7 --- /dev/null +++ b/drift/lib/src/web/web_db.dart @@ -0,0 +1,211 @@ +part of 'package:drift/web.dart'; + +/// Signature of a function that asynchronously initializes a web database if it +/// doesn't exist. +/// The bytes returned should represent a valid sqlite3 database file. +typedef CreateWebDatabase = Future Function(); + +/// Experimental moor backend for the web. To use this platform, you need to +/// include the latest version of `sql.js` in your html. +class WebDatabase extends DelegatedDatabase { + /// A database executor that works on the web. + /// + /// [name] can be used to identify multiple databases. The optional + /// [initializer] can be used to initialize the database if it doesn't exist. + WebDatabase(String name, + {bool logStatements = false, CreateWebDatabase? initializer}) + : super(_WebDelegate(MoorWebStorage(name), initializer), + logStatements: logStatements, isSequential: true); + + /// A database executor that works on the web. + /// + /// The [storage] parameter controls how the data will be stored. The default + /// constructor of [MoorWebStorage] will use local storage for that, but an + /// IndexedDB-based implementation is available via. + WebDatabase.withStorage(MoorWebStorage storage, + {bool logStatements = false, CreateWebDatabase? initializer}) + : super(_WebDelegate(storage, initializer), + logStatements: logStatements, isSequential: true); +} + +class _WebDelegate extends DatabaseDelegate { + final MoorWebStorage storage; + final CreateWebDatabase? initializer; + + late SqlJsDatabase _db; + bool _isOpen = false; + + bool _inTransaction = false; + + _WebDelegate(this.storage, this.initializer); + + @override + set isInTransaction(bool value) { + _inTransaction = value; + + if (!_inTransaction) { + // transaction completed, save the database! + _storeDb(); + } + } + + @override + bool get isInTransaction => _inTransaction; + + @override + TransactionDelegate get transactionDelegate => const NoTransactionDelegate(); + + @override + DbVersionDelegate get versionDelegate => + _versionDelegate ??= _WebVersionDelegate(this); + DbVersionDelegate? _versionDelegate; + + @override + bool get isOpen => _isOpen; + + @override + Future open(QueryExecutorUser db) async { + final dbVersion = db.schemaVersion; + assert(dbVersion >= 1, 'Database schema version needs to be at least 1'); + + final module = await initSqlJs(); + + await storage.open(); + var restored = await storage.restore(); + + if (restored == null && initializer != null) { + restored = await initializer?.call(); + + if (restored != null) { + await storage.store(restored); + } + } + + _db = module.createDatabase(restored); + _isOpen = true; + } + + @override + Future runBatched(BatchedStatements statements) { + final preparedStatements = [ + for (final stmt in statements.statements) _db.prepare(stmt), + ]; + + for (final application in statements.arguments) { + final stmt = preparedStatements[application.statementIndex]; + + stmt + ..executeWith(application.arguments) + ..step(); + } + + for (final prepared in preparedStatements) { + prepared.free(); + } + return _handlePotentialUpdate(); + } + + @override + Future runCustom(String statement, List args) { + _db.runWithArgs(statement, args); + return Future.value(); + } + + @override + Future runInsert(String statement, List args) async { + _db.runWithArgs(statement, args); + final insertId = _db.lastInsertId(); + await _handlePotentialUpdate(); + return insertId; + } + + @override + Future runSelect(String statement, List args) { + // todo at least for stream queries we should cache prepared statements. + final stmt = _db.prepare(statement)..executeWith(args); + + List? columnNames; + final rows = >[]; + + while (stmt.step()) { + columnNames ??= stmt.columnNames(); + rows.add(stmt.currentRow()); + } + + columnNames ??= []; // assume no column names when there were no rows + + stmt.free(); + return Future.value(QueryResult(columnNames, rows)); + } + + @override + Future runUpdate(String statement, List args) { + _db.runWithArgs(statement, args); + return _handlePotentialUpdate(); + } + + @override + Future close() async { + await _storeDb(); + if (_isOpen) { + _db.close(); + } + + await storage.close(); + } + + @override + void notifyDatabaseOpened(OpeningDetails details) { + if (details.hadUpgrade || details.wasCreated) { + _storeDb(); + } + } + + /// Saves the database if the last statement changed rows. As a side-effect, + /// saving the database resets the `last_insert_id` counter in sqlite. + Future _handlePotentialUpdate() async { + final modified = _db.lastModifiedRows(); + if (modified > 0) { + await _storeDb(); + } + return modified; + } + + Future _storeDb() async { + if (!isInTransaction) { + await storage.store(_db.export()); + } + } +} + +class _WebVersionDelegate extends DynamicVersionDelegate { + final _WebDelegate delegate; + + _WebVersionDelegate(this.delegate); + + // Note: Earlier moor versions used to store the database version in a special + // field in local storage (moor_db_version_). Since 2.3, we instead use + // the user_version pragma, but still need to keep backwards compatibility. + + @override + Future get schemaVersion async { + final storage = delegate.storage; + int? version; + if (storage is _CustomSchemaVersionSave) { + version = storage.schemaVersion; + } + + return version ?? delegate._db.userVersion; + } + + @override + Future setSchemaVersion(int version) async { + final storage = delegate.storage; + + if (storage is _CustomSchemaVersionSave) { + storage.schemaVersion = version; + } + + delegate._db.userVersion = version; + } +} diff --git a/drift/lib/web.dart b/drift/lib/web.dart new file mode 100644 index 00000000..d886412e --- /dev/null +++ b/drift/lib/web.dart @@ -0,0 +1,39 @@ +/// A version of moor that runs on the web by using [sql.js](https://github.com/sql-js/sql.js) +/// You manually need to include that library into your website to use the +/// web version of moor. See [the documentation](https://moor.simonbinder.eu/web) +/// for a more detailed instruction. +@experimental +library moor_web; + +import 'dart:async'; +import 'dart:html'; +import 'dart:indexed_db'; +import 'dart:js'; + +import 'package:meta/meta.dart'; +import 'package:stream_channel/stream_channel.dart'; + +import 'backends.dart'; +import 'drift.dart'; +import 'src/web/binary_string_conversion.dart'; +import 'src/web/sql_js.dart'; + +part 'src/web/storage.dart'; +part 'src/web/web_db.dart'; + +/// Extension to transform a raw [MessagePort] from web workers into a Dart +/// [StreamChannel]. +extension PortToChannel on MessagePort { + /// Converts this port to a two-way communication channel, exposed as a + /// [StreamChannel]. + /// + /// This can be used to implement a remote database connection over service + /// workers. + StreamChannel channel() { + final controller = StreamChannelController(); + onMessage.map((event) => event.data).pipe(controller.local.sink); + controller.local.stream.listen(postMessage, onDone: close); + + return controller.foreign; + } +} diff --git a/drift/pubspec.yaml b/drift/pubspec.yaml new file mode 100644 index 00000000..6a445223 --- /dev/null +++ b/drift/pubspec.yaml @@ -0,0 +1,35 @@ +name: drift +description: Drift is a reactive library to store relational data in Dart and Flutter applications. +version: 4.6.0-dev +repository: https://github.com/simolus3/moor +homepage: https://drift.simonbinder.eu/ +issue_tracker: https://github.com/simolus3/moor/issues + +environment: + sdk: '>=2.13.0 <3.0.0' + +dependencies: + async: ^2.5.0 + convert: ^3.0.0 + collection: ^1.15.0 + meta: ^1.3.0 + pedantic: ^1.10.0 + stream_channel: ^2.1.0 + sqlite3: ^1.0.0 + +dev_dependencies: + build_test: ^2.0.0 + build_runner_core: ^7.0.0 + moor_generator: any + uuid: ^3.0.0 + path: ^1.8.0 + build_runner: ^2.0.0 + test: ^1.17.0 + mockito: ^5.0.7 + rxdart: ^0.27.0 + +dependency_overrides: + moor_generator: + path: ../moor_generator + sqlparser: + path: ../sqlparser diff --git a/moor/CHANGELOG.md b/moor/CHANGELOG.md index c0222393..6c96b092 100644 --- a/moor/CHANGELOG.md +++ b/moor/CHANGELOG.md @@ -3,6 +3,12 @@ - Add `DoUpdate.withExcluded` to refer to the excluded row in an upsert clause. - Add optional `where` clause to `DoUpdate` constructors +### Important notice + +Moor has been renamed to `drift`. This package will continue to be supported until the next major release (5.0.0), +at which point the `moor` package will be discontinued in favor of the `drift` package. +Please consider migrating to `drift` at an early opps + ## 4.5.0 - Add `moorRuntimeOptions.debugPrint` option to control which `print` method is used by moor.