mirror of https://github.com/AMT-Cheif/drift.git
Introduce moor-specific tokens for type converters
This commit is contained in:
parent
3612c78241
commit
aa13aad276
|
@ -8,7 +8,12 @@ class SqlEngine {
|
|||
/// All tables registered with [registerTable].
|
||||
final List<Table> knownTables = [];
|
||||
|
||||
SqlEngine();
|
||||
/// Moor extends the sql grammar a bit to support type converters and other
|
||||
/// features. Enabling this flag will make this engine parse sql with these
|
||||
/// extensions enabled.
|
||||
final bool useMoorExtensions;
|
||||
|
||||
SqlEngine({this.useMoorExtensions = false});
|
||||
|
||||
/// Registers the [table], which means that it can later be used in sql
|
||||
/// statements.
|
||||
|
@ -28,7 +33,7 @@ class SqlEngine {
|
|||
/// Tokenizes the [source] into a list list [Token]s. Each [Token] contains
|
||||
/// information about where it appears in the [source] and a [TokenType].
|
||||
List<Token> tokenize(String source) {
|
||||
final scanner = Scanner(source);
|
||||
final scanner = Scanner(source, scanMoorTokens: useMoorExtensions);
|
||||
final tokens = scanner.scanTokens();
|
||||
|
||||
if (scanner.errors.isNotEmpty) {
|
||||
|
|
|
@ -4,6 +4,9 @@ import 'package:sqlparser/src/reader/tokenizer/utils.dart';
|
|||
|
||||
class Scanner {
|
||||
final String source;
|
||||
|
||||
/// Whether to scan tokens that are only relevant for moor.
|
||||
final bool scanMoorTokens;
|
||||
final SourceFile _file;
|
||||
|
||||
final List<Token> tokens = [];
|
||||
|
@ -21,7 +24,8 @@ class Scanner {
|
|||
return _file.location(_currentOffset);
|
||||
}
|
||||
|
||||
Scanner(this.source) : _file = SourceFile.fromString(source);
|
||||
Scanner(this.source, {this.scanMoorTokens = false})
|
||||
: _file = SourceFile.fromString(source);
|
||||
|
||||
List<Token> scanTokens() {
|
||||
while (!_isAtEnd) {
|
||||
|
@ -131,6 +135,13 @@ class Scanner {
|
|||
// todo sqlite also allows string literals with double ticks, we don't
|
||||
_identifier(escapedInQuotes: true);
|
||||
break;
|
||||
case '`':
|
||||
if (scanMoorTokens) {
|
||||
_inlineDart();
|
||||
} else {
|
||||
_unexpectedToken();
|
||||
}
|
||||
break;
|
||||
case ' ':
|
||||
case '\t':
|
||||
case '\n':
|
||||
|
@ -143,12 +154,16 @@ class Scanner {
|
|||
} else if (canStartColumnName(char)) {
|
||||
_identifier();
|
||||
} else {
|
||||
errors.add(TokenizerError('Unexpected character.', _currentLocation));
|
||||
_unexpectedToken();
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void _unexpectedToken() {
|
||||
errors.add(TokenizerError('Unexpected character.', _currentLocation));
|
||||
}
|
||||
|
||||
String _nextChar() {
|
||||
_currentOffset++;
|
||||
return source.substring(_currentOffset - 1, _currentOffset);
|
||||
|
@ -307,9 +322,28 @@ class Scanner {
|
|||
final text = _currentSpan.text.toUpperCase();
|
||||
if (keywords.containsKey(text)) {
|
||||
tokens.add(KeywordToken(keywords[text], _currentSpan));
|
||||
} else if (scanMoorTokens && moorKeywords.containsKey(text)) {
|
||||
tokens.add(KeywordToken(moorKeywords[text], _currentSpan));
|
||||
} else {
|
||||
tokens.add(IdentifierToken(false, _currentSpan));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void _inlineDart() {
|
||||
// inline starts with a `, we just need to find the matching ` that
|
||||
// terminates this token.
|
||||
while (_peek() != '`' && !_isAtEnd) {
|
||||
_nextChar();
|
||||
}
|
||||
|
||||
if (_isAtEnd) {
|
||||
errors.add(
|
||||
TokenizerError('Unterminated inline Dart code', _currentLocation));
|
||||
} else {
|
||||
// consume the `
|
||||
_nextChar();
|
||||
tokens.add(InlineDartToken(_currentSpan));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -133,6 +133,10 @@ enum TokenType {
|
|||
|
||||
semicolon,
|
||||
eof,
|
||||
|
||||
/// Moor specific token, used to declare a type converters
|
||||
mapped,
|
||||
inlineDart,
|
||||
}
|
||||
|
||||
const Map<String, TokenType> keywords = {
|
||||
|
@ -226,6 +230,10 @@ const Map<String, TokenType> keywords = {
|
|||
'WINDOW': TokenType.window,
|
||||
};
|
||||
|
||||
const Map<String, TokenType> moorKeywords = {
|
||||
'MAPPED': TokenType.mapped,
|
||||
};
|
||||
|
||||
class Token {
|
||||
final TokenType type;
|
||||
|
||||
|
@ -266,6 +274,17 @@ class IdentifierToken extends Token {
|
|||
: super(TokenType.identifier, span);
|
||||
}
|
||||
|
||||
/// Inline Dart appearing in a create table statement. Only parsed when the moor
|
||||
/// extensions are enabled. Dart code is wrapped in backticks.
|
||||
class InlineDartToken extends Token {
|
||||
InlineDartToken(FileSpan span) : super(TokenType.inlineDart, span);
|
||||
|
||||
String get dartCode {
|
||||
// strip the backticks
|
||||
return lexeme.substring(1, lexeme.length - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Used for tokens that are keywords. We use this special class without any
|
||||
/// additional properties to ease syntax highlighting, as it allows us to find
|
||||
/// the keywords easily.
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
import 'package:test/test.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/scanner.dart';
|
||||
import 'package:sqlparser/src/reader/tokenizer/token.dart';
|
||||
|
||||
void main() {
|
||||
test('parses moor specific tokens', () {
|
||||
final part = 'c INTEGER MAPPED BY `const Mapper()` NOT NULL';
|
||||
final scanner = Scanner(part, scanMoorTokens: true);
|
||||
final tokens = scanner.scanTokens();
|
||||
|
||||
expect(scanner.errors, isEmpty);
|
||||
expect(tokens.map((t) => t.type), [
|
||||
TokenType.identifier, // c
|
||||
TokenType.identifier, // INTEGER
|
||||
TokenType.mapped,
|
||||
TokenType.by,
|
||||
TokenType.inlineDart, // `const Mapper()`
|
||||
TokenType.not,
|
||||
TokenType.$null,
|
||||
TokenType.eof,
|
||||
]);
|
||||
|
||||
expect(
|
||||
tokens.whereType<InlineDartToken>().single.dartCode, 'const Mapper()');
|
||||
});
|
||||
}
|
Loading…
Reference in New Issue