mirror of https://github.com/AMT-Cheif/drift.git
Merge branch 'delightful-queries' into develop
This commit is contained in:
commit
1d36e3316d
|
@ -5,6 +5,7 @@ dart:
|
|||
env:
|
||||
- PKG="moor"
|
||||
- PKG="moor_generator"
|
||||
- PKG="sqlparser"
|
||||
|
||||
script: ./tool/mono_repo_wrapper.sh
|
||||
after_success: ./tool/upload_coverage.sh
|
||||
|
|
|
@ -25,9 +25,21 @@ class UseMoor {
|
|||
/// For instructions on how to write a dao, see the documentation of [UseDao]
|
||||
final List<Type> daos;
|
||||
|
||||
/// Optionally, a list of queries. Moor will generate matching methods for the
|
||||
/// variables and return types.
|
||||
// todo better documentation
|
||||
final List<Sql> queries;
|
||||
|
||||
/// Use this class as an annotation to inform moor_generator that a database
|
||||
/// class should be generated using the specified [UseMoor.tables].
|
||||
const UseMoor({@required this.tables, this.daos = const []});
|
||||
const UseMoor({@required this.tables, this.daos = const [], this.queries});
|
||||
}
|
||||
|
||||
class Sql {
|
||||
final String name;
|
||||
final String query;
|
||||
|
||||
const Sql(this.name, this.query);
|
||||
}
|
||||
|
||||
/// Annotation to use on classes that implement [DatabaseAccessor]. It specifies
|
||||
|
@ -51,6 +63,8 @@ class UseMoor {
|
|||
class UseDao {
|
||||
/// The tables accessed by this DAO.
|
||||
final List<Type> tables;
|
||||
// todo better documentation
|
||||
final List<Sql> queries;
|
||||
|
||||
const UseDao({@required this.tables});
|
||||
const UseDao({@required this.tables, this.queries});
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ class _$ColumnName extends ColumnName {
|
|||
@override
|
||||
final String name;
|
||||
|
||||
factory _$ColumnName([void updates(ColumnNameBuilder b)]) =>
|
||||
factory _$ColumnName([void Function(ColumnNameBuilder) updates]) =>
|
||||
(new ColumnNameBuilder()..update(updates)).build();
|
||||
|
||||
_$ColumnName._({this.implicit, this.name}) : super._() {
|
||||
|
@ -25,7 +25,7 @@ class _$ColumnName extends ColumnName {
|
|||
}
|
||||
|
||||
@override
|
||||
ColumnName rebuild(void updates(ColumnNameBuilder b)) =>
|
||||
ColumnName rebuild(void Function(ColumnNameBuilder) updates) =>
|
||||
(toBuilder()..update(updates)).build();
|
||||
|
||||
@override
|
||||
|
@ -84,7 +84,7 @@ class ColumnNameBuilder implements Builder<ColumnName, ColumnNameBuilder> {
|
|||
}
|
||||
|
||||
@override
|
||||
void update(void updates(ColumnNameBuilder b)) {
|
||||
void update(void Function(ColumnNameBuilder) updates) {
|
||||
if (updates != null) updates(this);
|
||||
}
|
||||
|
||||
|
@ -102,13 +102,15 @@ class _$LimitingTextLength extends LimitingTextLength {
|
|||
@override
|
||||
final int maxLength;
|
||||
|
||||
factory _$LimitingTextLength([void updates(LimitingTextLengthBuilder b)]) =>
|
||||
factory _$LimitingTextLength(
|
||||
[void Function(LimitingTextLengthBuilder) updates]) =>
|
||||
(new LimitingTextLengthBuilder()..update(updates)).build();
|
||||
|
||||
_$LimitingTextLength._({this.minLength, this.maxLength}) : super._();
|
||||
|
||||
@override
|
||||
LimitingTextLength rebuild(void updates(LimitingTextLengthBuilder b)) =>
|
||||
LimitingTextLength rebuild(
|
||||
void Function(LimitingTextLengthBuilder) updates) =>
|
||||
(toBuilder()..update(updates)).build();
|
||||
|
||||
@override
|
||||
|
@ -169,7 +171,7 @@ class LimitingTextLengthBuilder
|
|||
}
|
||||
|
||||
@override
|
||||
void update(void updates(LimitingTextLengthBuilder b)) {
|
||||
void update(void Function(LimitingTextLengthBuilder) updates) {
|
||||
if (updates != null) updates(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ dependencies:
|
|||
recase: ^2.0.1
|
||||
built_value: '>=6.3.0 <7.0.0'
|
||||
source_gen: ^0.9.4
|
||||
source_span: ^1.5.5
|
||||
build: ^1.1.0
|
||||
build_config: '>=0.3.1 <1.0.0'
|
||||
moor: ^1.4.0
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
# Files and directories created by pub
|
||||
.dart_tool/
|
||||
.packages
|
||||
# Remove the following pattern if you wish to check in your lock file
|
||||
pubspec.lock
|
||||
|
||||
# Conventional directory for build outputs
|
||||
build/
|
||||
|
||||
# Directory created by dartdoc
|
||||
doc/api/
|
|
@ -0,0 +1,2 @@
|
|||
## 0.1.0
|
||||
Initial version
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2019 Simon Binder
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,3 @@
|
|||
# sqlparser
|
||||
|
||||
Parser and analyzer for sql queries, written in pure Dart. Heavy work in progress
|
|
@ -0,0 +1 @@
|
|||
../analysis_options.yaml
|
|
@ -0,0 +1 @@
|
|||
// todo write example
|
|
@ -0,0 +1,4 @@
|
|||
/// Sql parser and analyzer, written in pure dart.
|
||||
///
|
||||
/// More dartdocs go here.
|
||||
library sqlparser;
|
|
@ -0,0 +1,37 @@
|
|||
import 'package:meta/meta.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/token.dart';
|
||||
|
||||
part 'clauses/limit.dart';
|
||||
part 'clauses/ordering.dart';
|
||||
|
||||
part 'expressions/expressions.dart';
|
||||
part 'expressions/literals.dart';
|
||||
part 'expressions/reference.dart';
|
||||
part 'expressions/simple.dart';
|
||||
|
||||
part 'statements/select.dart';
|
||||
|
||||
abstract class AstNode {
|
||||
Iterable<AstNode> get childNodes;
|
||||
T accept<T>(AstVisitor<T> visitor);
|
||||
|
||||
/// Whether the content of this node is equal to the [other] node of the same
|
||||
/// type. The "content" refers to anything stored only in this node, children
|
||||
/// are ignored.
|
||||
bool contentEquals(covariant AstNode other);
|
||||
}
|
||||
|
||||
abstract class AstVisitor<T> {
|
||||
T visitSelectStatement(SelectStatement e);
|
||||
T visitResultColumn(ResultColumn e);
|
||||
|
||||
T visitOrderBy(OrderBy e);
|
||||
T visitOrderingTerm(OrderingTerm e);
|
||||
T visitLimit(Limit e);
|
||||
|
||||
T visitBinaryExpression(BinaryExpression e);
|
||||
T visitUnaryExpression(UnaryExpression e);
|
||||
T visitIsExpression(IsExpression e);
|
||||
T visitLiteral(Literal e);
|
||||
T visitReference(Reference e);
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
part of '../ast.dart';
|
||||
|
||||
class Limit extends AstNode {
|
||||
Expression count;
|
||||
Token offsetSeparator; // can either be OFFSET or just a comma
|
||||
Expression offset;
|
||||
|
||||
Limit({this.count, this.offsetSeparator, this.offset});
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) {
|
||||
return visitor.visitLimit(this);
|
||||
}
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => [count, if (offset != null) offset];
|
||||
|
||||
@override
|
||||
bool contentEquals(Limit other) {
|
||||
return other.offsetSeparator?.type == offsetSeparator?.type;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
part of '../ast.dart';
|
||||
|
||||
class OrderBy extends AstNode {
|
||||
final List<OrderingTerm> terms;
|
||||
|
||||
OrderBy({this.terms});
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) => visitor.visitOrderBy(this);
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => terms;
|
||||
|
||||
@override
|
||||
bool contentEquals(OrderBy other) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
enum OrderingMode { ascending, descending }
|
||||
|
||||
class OrderingTerm extends AstNode {
|
||||
final Expression expression;
|
||||
final OrderingMode orderingMode;
|
||||
|
||||
OrderingTerm({this.expression, this.orderingMode = OrderingMode.ascending});
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) => visitor.visitOrderingTerm(this);
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => [expression];
|
||||
|
||||
@override
|
||||
bool contentEquals(OrderingTerm other) {
|
||||
return other.orderingMode == orderingMode;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
part of '../ast.dart';
|
||||
|
||||
abstract class Expression implements AstNode {
|
||||
const Expression();
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
part of '../ast.dart';
|
||||
// https://www.sqlite.org/syntax/literal-value.html
|
||||
|
||||
abstract class Literal extends Expression {
|
||||
final Token token;
|
||||
|
||||
Literal(this.token);
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) => visitor.visitLiteral(this);
|
||||
|
||||
@override
|
||||
final Iterable<AstNode> childNodes = const <AstNode>[];
|
||||
}
|
||||
|
||||
class NullLiteral extends Literal {
|
||||
NullLiteral(Token token) : super(token);
|
||||
|
||||
@override
|
||||
bool contentEquals(NullLiteral other) => true;
|
||||
}
|
||||
|
||||
class NumericLiteral extends Literal {
|
||||
final num number;
|
||||
|
||||
NumericLiteral(this.number, Token token) : super(token);
|
||||
|
||||
@override
|
||||
bool contentEquals(NumericLiteral other) => other.number == number;
|
||||
}
|
||||
|
||||
class BooleanLiteral extends NumericLiteral {
|
||||
BooleanLiteral.withFalse(Token token) : super(0, token);
|
||||
BooleanLiteral.withTrue(Token token) : super(1, token);
|
||||
}
|
||||
|
||||
class StringLiteral extends Literal {
|
||||
final String data;
|
||||
final bool isBinary;
|
||||
|
||||
StringLiteral(StringLiteralToken token)
|
||||
: data = token.value,
|
||||
isBinary = token.binary,
|
||||
super(token);
|
||||
|
||||
@override
|
||||
bool contentEquals(StringLiteral other) {
|
||||
return other.isBinary == isBinary && other.data == data;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
part of '../ast.dart';
|
||||
|
||||
/// Expression that refers to an individual column.
|
||||
class Reference extends Expression {
|
||||
final String tableName;
|
||||
final String columnName;
|
||||
|
||||
Reference({this.tableName, this.columnName});
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) => visitor.visitReference(this);
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => const [];
|
||||
|
||||
@override
|
||||
bool contentEquals(Reference other) {
|
||||
return other.tableName == tableName && other.columnName == columnName;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
part of '../ast.dart';
|
||||
|
||||
class UnaryExpression extends Expression {
|
||||
final Token operator;
|
||||
final Expression inner;
|
||||
|
||||
UnaryExpression(this.operator, this.inner);
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) => visitor.visitUnaryExpression(this);
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => [inner];
|
||||
|
||||
@override
|
||||
bool contentEquals(UnaryExpression other) {
|
||||
return other.operator.type == operator.type;
|
||||
}
|
||||
}
|
||||
|
||||
class BinaryExpression extends Expression {
|
||||
final Token operator;
|
||||
final Expression left;
|
||||
final Expression right;
|
||||
|
||||
BinaryExpression(this.left, this.operator, this.right);
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) => visitor.visitBinaryExpression(this);
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => [left, right];
|
||||
|
||||
@override
|
||||
bool contentEquals(BinaryExpression other) {
|
||||
return other.operator.type == operator.type;
|
||||
}
|
||||
}
|
||||
|
||||
class IsExpression extends Expression {
|
||||
final bool negated;
|
||||
final Expression left;
|
||||
final Expression right;
|
||||
|
||||
IsExpression(this.negated, this.left, this.right);
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) {
|
||||
return visitor.visitIsExpression(this);
|
||||
}
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => [left, right];
|
||||
|
||||
@override
|
||||
bool contentEquals(IsExpression other) {
|
||||
return other.negated == negated;
|
||||
}
|
||||
}
|
||||
|
||||
class Parentheses extends Expression {
|
||||
final Token openingLeft;
|
||||
final Expression expression;
|
||||
final Token closingRight;
|
||||
|
||||
Parentheses(this.openingLeft, this.expression, this.closingRight);
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) {
|
||||
return expression.accept(visitor);
|
||||
}
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => [expression];
|
||||
|
||||
@override
|
||||
bool contentEquals(Parentheses other) => true;
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
part of '../ast.dart';
|
||||
|
||||
class SelectStatement extends AstNode {
|
||||
final Expression where;
|
||||
final List<ResultColumn> columns;
|
||||
final OrderBy orderBy;
|
||||
final Limit limit;
|
||||
|
||||
SelectStatement({this.where, this.columns, this.orderBy, this.limit});
|
||||
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) {
|
||||
return visitor.visitSelectStatement(this);
|
||||
}
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes {
|
||||
return [
|
||||
if (where != null) where,
|
||||
...columns,
|
||||
if (limit != null) limit,
|
||||
if (orderBy != null) orderBy,
|
||||
];
|
||||
}
|
||||
|
||||
@override
|
||||
bool contentEquals(SelectStatement other) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
abstract class ResultColumn extends AstNode {
|
||||
@override
|
||||
T accept<T>(AstVisitor<T> visitor) => visitor.visitResultColumn(this);
|
||||
}
|
||||
|
||||
/// A result column that either yields all columns or all columns from a table
|
||||
/// by using "*" or "table.*".
|
||||
class StarResultColumn extends ResultColumn {
|
||||
final String tableName;
|
||||
|
||||
StarResultColumn(this.tableName);
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => const [];
|
||||
|
||||
@override
|
||||
bool contentEquals(StarResultColumn other) {
|
||||
return other.tableName == tableName;
|
||||
}
|
||||
}
|
||||
|
||||
class ExpressionResultColumn extends ResultColumn {
|
||||
final Expression expression;
|
||||
final String as;
|
||||
|
||||
ExpressionResultColumn({@required this.expression, this.as});
|
||||
|
||||
@override
|
||||
Iterable<AstNode> get childNodes => [expression];
|
||||
|
||||
@override
|
||||
bool contentEquals(ExpressionResultColumn other) {
|
||||
return other.as == as;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
part of 'parser.dart';
|
||||
|
||||
/// Parses a number from the [lexeme] assuming it has a form conforming to
|
||||
/// https://www.sqlite.org/syntax/numeric-literal.html
|
||||
num _parseNumber(String lexeme) {
|
||||
if (lexeme.startsWith('0x')) {
|
||||
return int.parse(lexeme.substring(2), radix: 16);
|
||||
}
|
||||
|
||||
return double.parse(lexeme);
|
||||
}
|
|
@ -0,0 +1,339 @@
|
|||
import 'package:meta/meta.dart';
|
||||
import 'package:sqlparser/src/ast/ast.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/token.dart';
|
||||
|
||||
part 'num_parser.dart';
|
||||
|
||||
const _comparisonOperators = [
|
||||
TokenType.less,
|
||||
TokenType.lessEqual,
|
||||
TokenType.more,
|
||||
TokenType.moreEqual,
|
||||
];
|
||||
const _binaryOperators = const [
|
||||
TokenType.shiftLeft,
|
||||
TokenType.shiftRight,
|
||||
TokenType.ampersand,
|
||||
TokenType.pipe,
|
||||
];
|
||||
|
||||
class ParsingError implements Exception {
|
||||
final Token token;
|
||||
final String message;
|
||||
|
||||
ParsingError(this.token, this.message);
|
||||
|
||||
@override
|
||||
String toString() {
|
||||
return token.span.message('Error: $message}');
|
||||
}
|
||||
}
|
||||
|
||||
// todo better error handling and synchronisation, like it's done here:
|
||||
// https://craftinginterpreters.com/parsing-expressions.html#synchronizing-a-recursive-descent-parser
|
||||
|
||||
class Parser {
|
||||
final List<Token> tokens;
|
||||
final List<ParsingError> errors = [];
|
||||
int _current = 0;
|
||||
|
||||
Parser(this.tokens);
|
||||
|
||||
bool get _isAtEnd => _peek.type == TokenType.eof;
|
||||
Token get _peek => tokens[_current];
|
||||
Token get _previous => tokens[_current - 1];
|
||||
|
||||
bool _match(List<TokenType> types) {
|
||||
for (var type in types) {
|
||||
if (_check(type)) {
|
||||
_advance();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool _check(TokenType type) {
|
||||
if (_isAtEnd) return false;
|
||||
return _peek.type == type;
|
||||
}
|
||||
|
||||
Token _advance() {
|
||||
if (!_isAtEnd) {
|
||||
_current++;
|
||||
}
|
||||
return _previous;
|
||||
}
|
||||
|
||||
@alwaysThrows
|
||||
void _error(String message) {
|
||||
final error = ParsingError(_peek, message);
|
||||
errors.add(error);
|
||||
throw error;
|
||||
}
|
||||
|
||||
Token _consume(TokenType type, String message) {
|
||||
if (_check(type)) return _advance();
|
||||
_error(message);
|
||||
}
|
||||
|
||||
/// Parses a [SelectStatement], or returns null if there is no select token
|
||||
/// after the current position.
|
||||
///
|
||||
/// See also:
|
||||
/// https://www.sqlite.org/lang_select.html
|
||||
SelectStatement select() {
|
||||
if (!_match(const [TokenType.select])) return null;
|
||||
|
||||
// todo parse result column
|
||||
final resultColumns = <ResultColumn>[];
|
||||
do {
|
||||
resultColumns.add(_resultColumn());
|
||||
} while (_match(const [TokenType.comma]));
|
||||
|
||||
final where = _where();
|
||||
final orderBy = _orderBy();
|
||||
final limit = _limit();
|
||||
|
||||
return SelectStatement(
|
||||
where: where, columns: resultColumns, orderBy: orderBy, limit: limit);
|
||||
}
|
||||
|
||||
/// Parses a [ResultColumn] or throws if none is found.
|
||||
/// https://www.sqlite.org/syntax/result-column.html
|
||||
ResultColumn _resultColumn() {
|
||||
if (_match(const [TokenType.star])) {
|
||||
return StarResultColumn(null);
|
||||
}
|
||||
|
||||
final positionBefore = _current;
|
||||
|
||||
if (_match(const [TokenType.identifier])) {
|
||||
// two options. the identifier could be followed by ".*", in which case
|
||||
// we have a star result column. If it's followed by anything else, it can
|
||||
// still refer to a column in a table as part of a expression result column
|
||||
final identifier = _previous;
|
||||
|
||||
if (_match(const [TokenType.dot]) && _match(const [TokenType.star])) {
|
||||
return StarResultColumn((identifier as IdentifierToken).identifier);
|
||||
}
|
||||
|
||||
// not a star result column. go back and parse the expression.
|
||||
// todo this is a bit unorthodox. is there a better way to parse the
|
||||
// expression from before?
|
||||
_current = positionBefore;
|
||||
}
|
||||
|
||||
final expr = expression();
|
||||
// todo in sqlite, the as is optional
|
||||
if (_match(const [TokenType.as])) {
|
||||
if (_match(const [TokenType.identifier])) {
|
||||
final identifier = (_previous as IdentifierToken).identifier;
|
||||
return ExpressionResultColumn(expression: expr, as: identifier);
|
||||
} else {
|
||||
throw ParsingError(_peek, 'Expected an identifier as the column name');
|
||||
}
|
||||
}
|
||||
|
||||
return ExpressionResultColumn(expression: expr);
|
||||
}
|
||||
|
||||
/// Parses a where clause if there is one at the current position
|
||||
Expression _where() {
|
||||
if (_match(const [TokenType.where])) {
|
||||
return expression();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
OrderBy _orderBy() {
|
||||
if (_match(const [TokenType.order])) {
|
||||
_consume(TokenType.by, 'Expected "BY" after "ORDER" token');
|
||||
final terms = <OrderingTerm>[];
|
||||
do {
|
||||
terms.add(_orderingTerm());
|
||||
} while (_match(const [TokenType.comma]));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
OrderingTerm _orderingTerm() {
|
||||
final expr = expression();
|
||||
|
||||
if (_match(const [TokenType.asc, TokenType.desc])) {
|
||||
final mode = _previous.type == TokenType.asc
|
||||
? OrderingMode.ascending
|
||||
: OrderingMode.descending;
|
||||
return OrderingTerm(expression: expr, orderingMode: mode);
|
||||
}
|
||||
|
||||
return OrderingTerm(expression: expr);
|
||||
}
|
||||
|
||||
/// Parses a [Limit] clause, or returns null if there is no limit token after
|
||||
/// the current position.
|
||||
Limit _limit() {
|
||||
if (!_match(const [TokenType.limit])) return null;
|
||||
|
||||
final count = expression();
|
||||
Token offsetSep;
|
||||
Expression offset;
|
||||
|
||||
if (_match(const [TokenType.comma, TokenType.offset])) {
|
||||
offsetSep = _previous;
|
||||
offset = expression();
|
||||
}
|
||||
|
||||
return Limit(count: count, offsetSeparator: offsetSep, offset: offset);
|
||||
}
|
||||
|
||||
/* We parse expressions here.
|
||||
* Operators have the following precedence:
|
||||
* - + ~ NOT (unary)
|
||||
* || (concatenation)
|
||||
* * / %
|
||||
* + -
|
||||
* << >> & |
|
||||
* < <= > >=
|
||||
* = == != <> IS IS NOT IN LIKE GLOB MATCH REGEXP
|
||||
* AND
|
||||
* OR
|
||||
* We also treat expressions in parentheses and literals with the highest
|
||||
* priority. Parsing methods are written in ascending precedence, and each
|
||||
* parsing method calls the next higher precedence if unsuccessful.
|
||||
* https://www.sqlite.org/lang_expr.html
|
||||
* */
|
||||
|
||||
Expression expression() {
|
||||
return _or();
|
||||
}
|
||||
|
||||
/// Parses an expression of the form a <T> b, where <T> is in [types] and
|
||||
/// both a and b are expressions with a higher precedence parsed from
|
||||
/// [higherPrecedence].
|
||||
Expression _parseSimpleBinary(
|
||||
List<TokenType> types, Expression Function() higherPrecedence) {
|
||||
var expression = higherPrecedence();
|
||||
|
||||
while (_match(types)) {
|
||||
final operator = _previous;
|
||||
final right = higherPrecedence();
|
||||
expression = BinaryExpression(expression, operator, right);
|
||||
}
|
||||
return expression;
|
||||
}
|
||||
|
||||
Expression _or() => _parseSimpleBinary(const [TokenType.or], _and);
|
||||
Expression _and() => _parseSimpleBinary(const [TokenType.and], _equals);
|
||||
|
||||
Expression _equals() {
|
||||
var expression = _comparison();
|
||||
final ops = const [
|
||||
TokenType.equal,
|
||||
TokenType.doubleEqual,
|
||||
TokenType.exclamationEqual,
|
||||
TokenType.lessMore,
|
||||
TokenType.$is,
|
||||
TokenType.$in,
|
||||
TokenType.like,
|
||||
TokenType.glob,
|
||||
TokenType.match,
|
||||
TokenType.regexp,
|
||||
];
|
||||
|
||||
while (_match(ops)) {
|
||||
final operator = _previous;
|
||||
if (operator.type == TokenType.$is) {
|
||||
final not = _match(const [TokenType.not]);
|
||||
// special case: is not expression
|
||||
expression = IsExpression(not, expression, _comparison());
|
||||
} else {
|
||||
expression = BinaryExpression(expression, operator, _comparison());
|
||||
}
|
||||
}
|
||||
return expression;
|
||||
}
|
||||
|
||||
Expression _comparison() {
|
||||
return _parseSimpleBinary(_comparisonOperators, _binaryOperation);
|
||||
}
|
||||
|
||||
Expression _binaryOperation() {
|
||||
return _parseSimpleBinary(_binaryOperators, _addition);
|
||||
}
|
||||
|
||||
Expression _addition() {
|
||||
return _parseSimpleBinary(const [
|
||||
TokenType.plus,
|
||||
TokenType.minus,
|
||||
], _multiplication);
|
||||
}
|
||||
|
||||
Expression _multiplication() {
|
||||
return _parseSimpleBinary(const [
|
||||
TokenType.star,
|
||||
TokenType.slash,
|
||||
TokenType.percent,
|
||||
], _concatenation);
|
||||
}
|
||||
|
||||
Expression _concatenation() {
|
||||
return _parseSimpleBinary(const [TokenType.doublePipe], _unary);
|
||||
}
|
||||
|
||||
Expression _unary() {
|
||||
if (_match(const [
|
||||
TokenType.minus,
|
||||
TokenType.plus,
|
||||
TokenType.tilde,
|
||||
TokenType.not
|
||||
])) {
|
||||
final operator = _previous;
|
||||
final expression = _unary();
|
||||
return UnaryExpression(operator, expression);
|
||||
}
|
||||
|
||||
return _primary();
|
||||
}
|
||||
|
||||
Expression _primary() {
|
||||
final token = _advance();
|
||||
final type = token.type;
|
||||
switch (type) {
|
||||
case TokenType.numberLiteral:
|
||||
return NumericLiteral(_parseNumber(token.lexeme), _peek);
|
||||
case TokenType.stringLiteral:
|
||||
final token = _peek as StringLiteralToken;
|
||||
return StringLiteral(token);
|
||||
case TokenType.$null:
|
||||
return NullLiteral(_peek);
|
||||
case TokenType.$true:
|
||||
return BooleanLiteral.withTrue(_peek);
|
||||
case TokenType.$false:
|
||||
return BooleanLiteral.withFalse(_peek);
|
||||
// todo CURRENT_TIME, CURRENT_DATE, CURRENT_TIMESTAMP
|
||||
case TokenType.leftParen:
|
||||
final left = _previous;
|
||||
final expr = expression();
|
||||
_consume(TokenType.rightParen, 'Expected a closing bracket');
|
||||
return Parentheses(left, expr, _previous);
|
||||
case TokenType.identifier:
|
||||
final first = _previous as IdentifierToken;
|
||||
if (_match(const [TokenType.dot])) {
|
||||
final second =
|
||||
_consume(TokenType.identifier, 'Expected a column name here')
|
||||
as IdentifierToken;
|
||||
return Reference(
|
||||
tableName: first.identifier, columnName: second.identifier);
|
||||
} else {
|
||||
return Reference(columnName: first.identifier);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// nothing found -> issue error
|
||||
_error('Could not parse this expression');
|
||||
}
|
||||
}
|
|
@ -0,0 +1,304 @@
|
|||
import 'package:source_span/source_span.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/token.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/utils.dart';
|
||||
|
||||
class Scanner {
|
||||
final String source;
|
||||
|
||||
final List<Token> tokens = [];
|
||||
final List<TokenizerError> errors = [];
|
||||
|
||||
int _startOffset;
|
||||
int _currentOffset = 0;
|
||||
bool get _isAtEnd => _currentOffset >= source.length;
|
||||
|
||||
SourceSpan get _currentSpan {
|
||||
return SourceSpan(_startLocation, _currentLocation,
|
||||
source.substring(_startOffset, _currentOffset));
|
||||
}
|
||||
|
||||
SourceLocation get _startLocation {
|
||||
return SourceLocation(_startOffset);
|
||||
}
|
||||
|
||||
SourceLocation get _currentLocation {
|
||||
return SourceLocation(_currentOffset);
|
||||
}
|
||||
|
||||
Scanner(this.source);
|
||||
|
||||
List<Token> scanTokens() {
|
||||
while (!_isAtEnd) {
|
||||
_startOffset = _currentOffset;
|
||||
_scanToken();
|
||||
}
|
||||
|
||||
final endLoc = SourceLocation(source.length);
|
||||
tokens.add(Token(TokenType.eof, SourceSpan(endLoc, endLoc, '')));
|
||||
return tokens;
|
||||
}
|
||||
|
||||
void _scanToken() {
|
||||
final char = _nextChar();
|
||||
switch (char) {
|
||||
case '(':
|
||||
_addToken(TokenType.leftParen);
|
||||
break;
|
||||
case ')':
|
||||
_addToken(TokenType.rightParen);
|
||||
break;
|
||||
case ',':
|
||||
_addToken(TokenType.comma);
|
||||
break;
|
||||
case '.':
|
||||
if (!_isAtEnd && isDigit(_peek())) {
|
||||
_numeric(char);
|
||||
} else {
|
||||
_addToken(TokenType.dot);
|
||||
}
|
||||
break;
|
||||
case '+':
|
||||
_addToken(TokenType.plus);
|
||||
break;
|
||||
case '-':
|
||||
_addToken(TokenType.minus);
|
||||
break;
|
||||
case '*':
|
||||
_addToken(TokenType.star);
|
||||
break;
|
||||
case '/':
|
||||
_addToken(TokenType.slash);
|
||||
break;
|
||||
case '%':
|
||||
_addToken(TokenType.percent);
|
||||
break;
|
||||
case '&':
|
||||
_addToken(TokenType.ampersand);
|
||||
break;
|
||||
case '|':
|
||||
_addToken(_match('|') ? TokenType.doublePipe : TokenType.pipe);
|
||||
break;
|
||||
|
||||
case '<':
|
||||
if (_match('=')) {
|
||||
_addToken(TokenType.lessEqual);
|
||||
} else if (_match('<')) {
|
||||
_addToken(TokenType.shiftLeft);
|
||||
} else if (_match('>')) {
|
||||
_addToken(TokenType.lessMore);
|
||||
} else {
|
||||
_addToken(TokenType.less);
|
||||
}
|
||||
break;
|
||||
case '>':
|
||||
if (_match('=')) {
|
||||
_addToken(TokenType.moreEqual);
|
||||
} else if (_match('>')) {
|
||||
_addToken(TokenType.shiftRight);
|
||||
} else {
|
||||
_addToken(TokenType.more);
|
||||
}
|
||||
break;
|
||||
case '=':
|
||||
_addToken(_match('=') ? TokenType.doubleEqual : TokenType.equal);
|
||||
break;
|
||||
case '~':
|
||||
_addToken(TokenType.tilde);
|
||||
break;
|
||||
|
||||
case 'x':
|
||||
if (_match("'")) {
|
||||
_string(binary: false);
|
||||
} else {
|
||||
_identifier();
|
||||
}
|
||||
break;
|
||||
case "'":
|
||||
_string();
|
||||
break;
|
||||
case '"':
|
||||
// todo sqlite also allows string literals with double ticks, we don't
|
||||
_identifier(escapedInQuotes: true);
|
||||
break;
|
||||
case ' ':
|
||||
case '\t':
|
||||
case '\n':
|
||||
// ignore whitespace
|
||||
break;
|
||||
|
||||
default:
|
||||
if (isDigit(char)) {
|
||||
_numeric(char);
|
||||
} else if (canStartColumnName(char)) {
|
||||
_identifier();
|
||||
}
|
||||
errors.add(TokenizerError(
|
||||
'Unexpected character.', SourceLocation(_currentOffset)));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
String _nextChar() {
|
||||
_currentOffset++;
|
||||
return source.substring(_currentOffset - 1, _currentOffset);
|
||||
}
|
||||
|
||||
String _peek() {
|
||||
if (_isAtEnd) throw StateError('Reached end of source');
|
||||
return source.substring(_currentOffset, _currentOffset + 1);
|
||||
}
|
||||
|
||||
bool _match(String expected) {
|
||||
if (_isAtEnd) return false;
|
||||
if (source.substring(_currentOffset, _currentOffset + 1) != expected) {
|
||||
return false;
|
||||
}
|
||||
_currentOffset++;
|
||||
return true;
|
||||
}
|
||||
|
||||
void _addToken(TokenType type) {
|
||||
tokens.add(Token(type, _currentSpan));
|
||||
}
|
||||
|
||||
void _string({bool binary = false}) {
|
||||
while (_peek() != "'" && !_isAtEnd) {
|
||||
_nextChar();
|
||||
}
|
||||
|
||||
// Issue an error if the string is unterminated
|
||||
if (_isAtEnd) {
|
||||
errors.add(TokenizerError('Unterminated string', _currentLocation));
|
||||
}
|
||||
|
||||
// consume the closing "'"
|
||||
_nextChar();
|
||||
|
||||
final value = source.substring(_startOffset + 1, _currentOffset - 1);
|
||||
tokens.add(StringLiteralToken(value, _currentSpan, binary: binary));
|
||||
}
|
||||
|
||||
void _numeric(String firstChar) {
|
||||
// https://www.sqlite.org/syntax/numeric-literal.html
|
||||
|
||||
// We basically have three cases: hexadecimal numbers (starting with 0x),
|
||||
// numbers starting with a decimal dot and numbers starting with a digit.
|
||||
if (firstChar == '0') {
|
||||
if (!_isAtEnd && (_peek() == 'x' || _peek() == 'X')) {
|
||||
_nextChar(); // consume the x
|
||||
// advance hexadecimal digits
|
||||
while (!_isAtEnd && isHexDigit(_peek())) {
|
||||
_nextChar();
|
||||
}
|
||||
_addToken(TokenType.numberLiteral);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void consumeDigits() {
|
||||
while (!_isAtEnd && isDigit(_peek())) {
|
||||
_nextChar();
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true without advancing if the next char is a digit. Returns
|
||||
/// false and logs an error with the message otherwise.
|
||||
bool _requireDigit(String message) {
|
||||
final noDigit = _isAtEnd || !isDigit(_peek());
|
||||
if (noDigit) {
|
||||
errors.add(TokenizerError(message, _currentLocation));
|
||||
}
|
||||
return !noDigit;
|
||||
}
|
||||
|
||||
// ok, we're not dealing with a hexadecimal number.
|
||||
if (firstChar == '.') {
|
||||
// started with a decimal point. the next char has to be numeric
|
||||
if (_requireDigit('Expected a digit after the decimal dot')) {
|
||||
consumeDigits();
|
||||
}
|
||||
} else {
|
||||
// ok, not starting with a decimal dot. In that case, the first char must
|
||||
// be a digit
|
||||
if (!isDigit(firstChar)) {
|
||||
errors.add(TokenizerError('Expected a digit', _currentLocation));
|
||||
return;
|
||||
}
|
||||
consumeDigits();
|
||||
|
||||
// optional decimal part
|
||||
if (!_isAtEnd && _peek() == '.') {
|
||||
_nextChar();
|
||||
// if there is a decimal separator, there must be at least one digit
|
||||
// after it
|
||||
if (_requireDigit('Expected a digit after the decimal dot')) {
|
||||
consumeDigits();
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ok, we've read the first part of the number. But there's more! If it's
|
||||
// not a hexadecimal number, it could be in scientific notation.
|
||||
if (!_isAtEnd && (_peek() == 'e' || _peek() == 'E')) {
|
||||
_nextChar(); // consume e or E
|
||||
|
||||
if (_isAtEnd) {
|
||||
errors.add(TokenizerError(
|
||||
'Unexpected end of file. Expected digits for the scientific notation',
|
||||
_currentLocation));
|
||||
return;
|
||||
}
|
||||
|
||||
final char = _nextChar();
|
||||
if (isDigit(char)) {
|
||||
consumeDigits();
|
||||
_addToken(TokenType.numberLiteral);
|
||||
return;
|
||||
} else {
|
||||
if (char == '+' || char == '-') {
|
||||
_requireDigit('Expected digits for the exponent');
|
||||
consumeDigits();
|
||||
_addToken(TokenType.numberLiteral);
|
||||
} else {
|
||||
errors
|
||||
.add(TokenizerError('Expected plus or minus', _currentLocation));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// ok, no scientific notation
|
||||
_addToken(TokenType.numberLiteral);
|
||||
}
|
||||
}
|
||||
|
||||
void _identifier({bool escapedInQuotes = false}) {
|
||||
if (escapedInQuotes) {
|
||||
// find the closing quote
|
||||
while (_peek() != '"' && !_isAtEnd) {
|
||||
_nextChar();
|
||||
}
|
||||
// Issue an error if the column name is unterminated
|
||||
if (_isAtEnd) {
|
||||
errors
|
||||
.add(TokenizerError('Unterminated column name', _currentLocation));
|
||||
} else {
|
||||
// consume the closing double quote
|
||||
_nextChar();
|
||||
tokens.add(IdentifierToken(true, _currentSpan));
|
||||
}
|
||||
} else {
|
||||
while (!_isAtEnd && continuesColumnName(_peek())) {
|
||||
_nextChar();
|
||||
}
|
||||
|
||||
// not escaped, so it could be a keyword
|
||||
final text = _currentSpan.text.toUpperCase();
|
||||
if (keywords.containsKey(text)) {
|
||||
_addToken(keywords[text]);
|
||||
} else {
|
||||
tokens.add(IdentifierToken(false, _currentSpan));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
import 'package:source_span/source_span.dart';
|
||||
|
||||
enum TokenType {
|
||||
leftParen,
|
||||
rightParen,
|
||||
comma,
|
||||
dot,
|
||||
doublePipe,
|
||||
star,
|
||||
slash,
|
||||
percent,
|
||||
plus,
|
||||
minus,
|
||||
shiftLeft,
|
||||
shiftRight,
|
||||
ampersand,
|
||||
pipe,
|
||||
less,
|
||||
lessEqual,
|
||||
more,
|
||||
moreEqual,
|
||||
equal,
|
||||
doubleEqual,
|
||||
exclamationEqual,
|
||||
lessMore,
|
||||
$is,
|
||||
$in,
|
||||
not,
|
||||
like,
|
||||
glob,
|
||||
match,
|
||||
regexp,
|
||||
and,
|
||||
or,
|
||||
tilde,
|
||||
|
||||
stringLiteral,
|
||||
numberLiteral,
|
||||
$true,
|
||||
$false,
|
||||
$null,
|
||||
currentTime,
|
||||
currentDate,
|
||||
currentTimestamp,
|
||||
identifier,
|
||||
|
||||
select,
|
||||
|
||||
from,
|
||||
as,
|
||||
where,
|
||||
|
||||
order,
|
||||
by,
|
||||
asc,
|
||||
desc,
|
||||
|
||||
limit,
|
||||
offset,
|
||||
|
||||
eof,
|
||||
}
|
||||
|
||||
const Map<String, TokenType> keywords = {
|
||||
'SELECT': TokenType.select,
|
||||
'FROM': TokenType.from,
|
||||
'AS': TokenType.as,
|
||||
'WHERE': TokenType.where,
|
||||
'ORDER': TokenType.order,
|
||||
'BY': TokenType.by,
|
||||
'ASC': TokenType.asc,
|
||||
'DESC': TokenType.desc,
|
||||
'LIMIT': TokenType.limit,
|
||||
'OFFSET': TokenType.offset,
|
||||
'IS': TokenType.$is,
|
||||
'IN': TokenType.$in,
|
||||
'LIKE': TokenType.like,
|
||||
'GLOB': TokenType.glob,
|
||||
'MATCH': TokenType.match,
|
||||
'REGEXP': TokenType.regexp,
|
||||
'NOT': TokenType.not,
|
||||
'TRUE': TokenType.$true,
|
||||
'FALSE': TokenType.$false,
|
||||
'NULL': TokenType.$null,
|
||||
'CURRENT_TIME': TokenType.currentTime,
|
||||
'CURRENT_DATE': TokenType.currentDate,
|
||||
'CURRENT_TIMESTAMP': TokenType.currentTimestamp,
|
||||
};
|
||||
|
||||
class Token {
|
||||
final TokenType type;
|
||||
|
||||
final SourceSpan span;
|
||||
String get lexeme => span.text;
|
||||
|
||||
const Token(this.type, this.span);
|
||||
}
|
||||
|
||||
class StringLiteralToken extends Token {
|
||||
final String value;
|
||||
|
||||
/// sqlite allows binary strings (x'literal') which are interpreted as blobs.
|
||||
final bool binary;
|
||||
|
||||
const StringLiteralToken(this.value, SourceSpan span, {this.binary = false})
|
||||
: super(TokenType.stringLiteral, span);
|
||||
}
|
||||
|
||||
class IdentifierToken extends Token {
|
||||
/// In sql, identifiers can be put in "double quotes", in which case they are
|
||||
/// always interpreted as an column name.
|
||||
final bool escapedColumnName;
|
||||
|
||||
String get identifier => lexeme;
|
||||
|
||||
const IdentifierToken(this.escapedColumnName, SourceSpan span)
|
||||
: super(TokenType.identifier, span);
|
||||
}
|
||||
|
||||
class TokenizerError {
|
||||
final String message;
|
||||
final SourceLocation location;
|
||||
|
||||
TokenizerError(this.message, this.location);
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
const _charCodeZero = 48; // '0'.codeUnitAt(0);
|
||||
const _charCodeNine = 57; // '9'.codeUnitAt(0);
|
||||
const _charCodeLowerA = 97; // 'a'.codeUnitAt(0);
|
||||
const _charCodeLowerF = 102; // 'f'.codeUnitAt(0);
|
||||
const _charCodeA = 65; // 'A'.codeUnitAt(0);
|
||||
const _charCodeF = 79; // 'F'.codeUnitAt(0);
|
||||
const _charCodeZ = 90; // 'Z'.codeUnitAt(0);
|
||||
const _charCodeLowerZ = 122; // 'z'.codeUnitAt(0);
|
||||
|
||||
bool isDigit(String char) {
|
||||
final code = char.codeUnitAt(0);
|
||||
return _charCodeZero <= code && code <= _charCodeNine;
|
||||
}
|
||||
|
||||
bool isHexDigit(String char) {
|
||||
final code = char.codeUnitAt(0);
|
||||
|
||||
return (_charCodeLowerA <= code && code <= _charCodeLowerF) ||
|
||||
(_charCodeA <= code && code <= _charCodeF) ||
|
||||
(_charCodeZero <= code && code <= _charCodeNine);
|
||||
}
|
||||
|
||||
bool canStartColumnName(String char) {
|
||||
final code = char.codeUnitAt(0);
|
||||
return char == '_' ||
|
||||
(_charCodeLowerA <= code && code <= _charCodeLowerZ) ||
|
||||
(_charCodeA <= code && code <= _charCodeZ);
|
||||
}
|
||||
|
||||
bool continuesColumnName(String char) {
|
||||
return canStartColumnName(char) || isDigit(char);
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
import 'package:sqlparser/src/ast/ast.dart';
|
||||
|
||||
/// Checks whether [a] and [b] are equal. If they aren't, throws an exception.
|
||||
void enforceEqual(AstNode a, AstNode b) {
|
||||
if (a.runtimeType != b.runtimeType) {
|
||||
throw ArgumentError('Not equal: First was $a, second $b');
|
||||
}
|
||||
|
||||
if (!a.contentEquals(b)) {
|
||||
throw ArgumentError('Content not equal: $a and $b');
|
||||
}
|
||||
|
||||
final childrenA = a.childNodes.iterator;
|
||||
final childrenB = b.childNodes.iterator;
|
||||
|
||||
// always move both iterators
|
||||
while (childrenA.moveNext() & childrenB.moveNext()) {
|
||||
enforceEqual(childrenA.current, childrenB.current);
|
||||
}
|
||||
|
||||
if (childrenA.moveNext() || childrenB.moveNext()) {
|
||||
throw ArgumentError("$a and $b don't have an equal amount of children");
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
name: sqlparser
|
||||
description: Parsing and analysis for sql queries
|
||||
version: 0.1.0
|
||||
repository: https://github.com/simolus3/moor
|
||||
#homepage: https://moor.simonbinder.eu/
|
||||
issue_tracker: https://github.com/simolus3/moor/issues
|
||||
author: Simon Binder <oss@simonbinder.eu>
|
||||
|
||||
environment:
|
||||
sdk: '>=2.2.2 <3.0.0'
|
||||
|
||||
dev_dependencies:
|
||||
test: ^1.0.0
|
|
@ -0,0 +1,65 @@
|
|||
import 'package:sqlparser/src/ast/ast.dart';
|
||||
import 'package:sqlparser/src/reader/parser/parser.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/scanner.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/token.dart';
|
||||
import 'package:sqlparser/src/utils/ast_equality.dart';
|
||||
import 'package:test/test.dart';
|
||||
|
||||
import 'utils.dart';
|
||||
|
||||
void main() {
|
||||
test('parses simple expressions', () {
|
||||
final scanner = Scanner('3 * 4 + 5 == 17');
|
||||
final tokens = scanner.scanTokens();
|
||||
final parser = Parser(tokens);
|
||||
|
||||
final expression = parser.expression();
|
||||
enforceEqual(
|
||||
expression,
|
||||
BinaryExpression(
|
||||
BinaryExpression(
|
||||
BinaryExpression(
|
||||
NumericLiteral(3, token(TokenType.numberLiteral)),
|
||||
token(TokenType.star),
|
||||
NumericLiteral(4, token(TokenType.numberLiteral)),
|
||||
),
|
||||
token(TokenType.plus),
|
||||
NumericLiteral(5, token(TokenType.numberLiteral)),
|
||||
),
|
||||
token(TokenType.doubleEqual),
|
||||
NumericLiteral(17, token(TokenType.numberLiteral)),
|
||||
),
|
||||
);
|
||||
});
|
||||
|
||||
test('parses select statements', () {
|
||||
final scanner = Scanner(
|
||||
'SELECT table.*, *, 1 as name WHERE 1 ORDER BY name LIMIT 3 OFFSET 5');
|
||||
final tokens = scanner.scanTokens();
|
||||
final parser = Parser(tokens);
|
||||
|
||||
final stmt = parser.select();
|
||||
enforceEqual(
|
||||
stmt,
|
||||
SelectStatement(
|
||||
columns: [
|
||||
StarResultColumn('table'),
|
||||
StarResultColumn(null),
|
||||
ExpressionResultColumn(
|
||||
expression: NumericLiteral(1, token(TokenType.numberLiteral)),
|
||||
as: 'name',
|
||||
),
|
||||
],
|
||||
where: NumericLiteral(1, token(TokenType.numberLiteral)),
|
||||
orderBy: OrderBy(terms: [
|
||||
OrderingTerm(expression: Reference(columnName: 'name')),
|
||||
]),
|
||||
limit: Limit(
|
||||
count: NumericLiteral(3, token(TokenType.numberLiteral)),
|
||||
offsetSeparator: token(TokenType.offset),
|
||||
offset: NumericLiteral(5, token(TokenType.numberLiteral)),
|
||||
),
|
||||
),
|
||||
);
|
||||
});
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
import 'package:sqlparser/src/reader/tokenizer/token.dart';
|
||||
|
||||
Token token(TokenType type) {
|
||||
return Token(type, null);
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
import 'package:sqlparser/src/reader/tokenizer/scanner.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/token.dart';
|
||||
import 'package:test/test.dart';
|
||||
|
||||
void expectFullToken(String token, TokenType type) {
|
||||
final scanner = Scanner(token);
|
||||
List<Token> tokens;
|
||||
try {
|
||||
tokens = scanner.scanTokens();
|
||||
} catch (e, s) {
|
||||
print(e);
|
||||
print(s);
|
||||
fail('Parsing error while parsing $token');
|
||||
}
|
||||
|
||||
if (tokens.length != 2 || tokens.last.type != TokenType.eof) {
|
||||
fail(
|
||||
'Expected exactly one token when parsing $token, got ${tokens.length - 1}');
|
||||
}
|
||||
|
||||
expect(tokens.first.type, type, reason: '$token is a $type');
|
||||
expect(tokens.first.span.text, token);
|
||||
}
|
||||
|
||||
Map<String, TokenType> testCases = {
|
||||
'(': TokenType.leftParen,
|
||||
')': TokenType.rightParen,
|
||||
',': TokenType.comma,
|
||||
'.': TokenType.dot,
|
||||
'+': TokenType.plus,
|
||||
'-': TokenType.minus,
|
||||
'*': TokenType.star,
|
||||
'/': TokenType.slash,
|
||||
'<=': TokenType.lessEqual,
|
||||
'<': TokenType.less,
|
||||
'>=': TokenType.moreEqual,
|
||||
'>': TokenType.more,
|
||||
"'hello there'": TokenType.stringLiteral,
|
||||
'1.123': TokenType.numberLiteral,
|
||||
'1.32e5': TokenType.numberLiteral,
|
||||
'.123e-3': TokenType.numberLiteral,
|
||||
'0xFF13': TokenType.numberLiteral,
|
||||
'0Xf13A': TokenType.numberLiteral,
|
||||
'SELECT': TokenType.select,
|
||||
'"UPDATE"': TokenType.identifier,
|
||||
};
|
||||
|
||||
void main() {
|
||||
test('parses single tokens', () {
|
||||
testCases.forEach(expectFullToken);
|
||||
});
|
||||
}
|
|
@ -4,6 +4,9 @@ case $PKG in
|
|||
moor_generator)
|
||||
./tool/travis.sh dartfmt dartanalyzer test
|
||||
;;
|
||||
sqlparser)
|
||||
./tool/travis.sh dartfmt dartanalyzer test
|
||||
;;
|
||||
moor)
|
||||
./tool/travis.sh dartfmt dartanalyzer command
|
||||
;;
|
||||
|
|
Loading…
Reference in New Issue