sqlglot.tokens
1from __future__ import annotations 2 3import os 4import typing as t 5from enum import auto 6 7from sqlglot.errors import SqlglotError, TokenError 8from sqlglot.helper import AutoName 9from sqlglot.trie import TrieResult, in_trie, new_trie 10 11if t.TYPE_CHECKING: 12 from sqlglot.dialects.dialect import DialectType 13 14 15try: 16 from sqlglotrs import ( # type: ignore 17 Tokenizer as RsTokenizer, 18 TokenizerDialectSettings as RsTokenizerDialectSettings, 19 TokenizerSettings as RsTokenizerSettings, 20 TokenTypeSettings as RsTokenTypeSettings, 21 ) 22 23 USE_RS_TOKENIZER = os.environ.get("SQLGLOTRS_TOKENIZER", "1") == "1" 24except ImportError: 25 USE_RS_TOKENIZER = False 26 27 28class TokenType(AutoName): 29 L_PAREN = auto() 30 R_PAREN = auto() 31 L_BRACKET = auto() 32 R_BRACKET = auto() 33 L_BRACE = auto() 34 R_BRACE = auto() 35 COMMA = auto() 36 DOT = auto() 37 DASH = auto() 38 PLUS = auto() 39 COLON = auto() 40 DCOLON = auto() 41 DQMARK = auto() 42 SEMICOLON = auto() 43 STAR = auto() 44 BACKSLASH = auto() 45 SLASH = auto() 46 LT = auto() 47 LTE = auto() 48 GT = auto() 49 GTE = auto() 50 NOT = auto() 51 EQ = auto() 52 NEQ = auto() 53 NULLSAFE_EQ = auto() 54 COLON_EQ = auto() 55 AND = auto() 56 OR = auto() 57 AMP = auto() 58 DPIPE = auto() 59 PIPE = auto() 60 CARET = auto() 61 TILDA = auto() 62 ARROW = auto() 63 DARROW = auto() 64 FARROW = auto() 65 HASH = auto() 66 HASH_ARROW = auto() 67 DHASH_ARROW = auto() 68 LR_ARROW = auto() 69 DAT = auto() 70 LT_AT = auto() 71 AT_GT = auto() 72 DOLLAR = auto() 73 PARAMETER = auto() 74 SESSION_PARAMETER = auto() 75 DAMP = auto() 76 XOR = auto() 77 DSTAR = auto() 78 79 BLOCK_START = auto() 80 BLOCK_END = auto() 81 82 SPACE = auto() 83 BREAK = auto() 84 85 STRING = auto() 86 NUMBER = auto() 87 IDENTIFIER = auto() 88 DATABASE = auto() 89 COLUMN = auto() 90 COLUMN_DEF = auto() 91 SCHEMA = auto() 92 TABLE = auto() 93 VAR = auto() 94 BIT_STRING = auto() 95 HEX_STRING = auto() 96 BYTE_STRING = auto() 97 NATIONAL_STRING = auto() 98 RAW_STRING = auto() 99 HEREDOC_STRING = auto() 100 UNICODE_STRING = auto() 101 102 # types 103 BIT = auto() 104 BOOLEAN = auto() 105 TINYINT = auto() 106 UTINYINT = auto() 107 SMALLINT = auto() 108 USMALLINT = auto() 109 MEDIUMINT = auto() 110 UMEDIUMINT = auto() 111 INT = auto() 112 UINT = auto() 113 BIGINT = auto() 114 UBIGINT = auto() 115 INT128 = auto() 116 UINT128 = auto() 117 INT256 = auto() 118 UINT256 = auto() 119 FLOAT = auto() 120 DOUBLE = auto() 121 DECIMAL = auto() 122 UDECIMAL = auto() 123 BIGDECIMAL = auto() 124 CHAR = auto() 125 NCHAR = auto() 126 VARCHAR = auto() 127 NVARCHAR = auto() 128 TEXT = auto() 129 MEDIUMTEXT = auto() 130 LONGTEXT = auto() 131 MEDIUMBLOB = auto() 132 LONGBLOB = auto() 133 TINYBLOB = auto() 134 TINYTEXT = auto() 135 BINARY = auto() 136 VARBINARY = auto() 137 JSON = auto() 138 JSONB = auto() 139 TIME = auto() 140 TIMETZ = auto() 141 TIMESTAMP = auto() 142 TIMESTAMPTZ = auto() 143 TIMESTAMPLTZ = auto() 144 TIMESTAMP_S = auto() 145 TIMESTAMP_MS = auto() 146 TIMESTAMP_NS = auto() 147 DATETIME = auto() 148 DATETIME64 = auto() 149 DATE = auto() 150 DATE32 = auto() 151 INT4RANGE = auto() 152 INT4MULTIRANGE = auto() 153 INT8RANGE = auto() 154 INT8MULTIRANGE = auto() 155 NUMRANGE = auto() 156 NUMMULTIRANGE = auto() 157 TSRANGE = auto() 158 TSMULTIRANGE = auto() 159 TSTZRANGE = auto() 160 TSTZMULTIRANGE = auto() 161 DATERANGE = auto() 162 DATEMULTIRANGE = auto() 163 UUID = auto() 164 GEOGRAPHY = auto() 165 NULLABLE = auto() 166 GEOMETRY = auto() 167 HLLSKETCH = auto() 168 HSTORE = auto() 169 SUPER = auto() 170 SERIAL = auto() 171 SMALLSERIAL = auto() 172 BIGSERIAL = auto() 173 XML = auto() 174 YEAR = auto() 175 UNIQUEIDENTIFIER = auto() 176 USERDEFINED = auto() 177 MONEY = auto() 178 SMALLMONEY = auto() 179 ROWVERSION = auto() 180 IMAGE = auto() 181 VARIANT = auto() 182 OBJECT = auto() 183 INET = auto() 184 IPADDRESS = auto() 185 IPPREFIX = auto() 186 IPV4 = auto() 187 IPV6 = auto() 188 ENUM = auto() 189 ENUM8 = auto() 190 ENUM16 = auto() 191 FIXEDSTRING = auto() 192 LOWCARDINALITY = auto() 193 NESTED = auto() 194 UNKNOWN = auto() 195 196 # keywords 197 ALIAS = auto() 198 ALTER = auto() 199 ALWAYS = auto() 200 ALL = auto() 201 ANTI = auto() 202 ANY = auto() 203 APPLY = auto() 204 ARRAY = auto() 205 ASC = auto() 206 ASOF = auto() 207 AUTO_INCREMENT = auto() 208 BEGIN = auto() 209 BETWEEN = auto() 210 CACHE = auto() 211 CASE = auto() 212 CHARACTER_SET = auto() 213 CLUSTER_BY = auto() 214 COLLATE = auto() 215 COMMAND = auto() 216 COMMENT = auto() 217 COMMIT = auto() 218 CONNECT_BY = auto() 219 CONSTRAINT = auto() 220 CREATE = auto() 221 CROSS = auto() 222 CUBE = auto() 223 CURRENT_DATE = auto() 224 CURRENT_DATETIME = auto() 225 CURRENT_TIME = auto() 226 CURRENT_TIMESTAMP = auto() 227 CURRENT_USER = auto() 228 DEFAULT = auto() 229 DELETE = auto() 230 DESC = auto() 231 DESCRIBE = auto() 232 DICTIONARY = auto() 233 DISTINCT = auto() 234 DISTRIBUTE_BY = auto() 235 DIV = auto() 236 DROP = auto() 237 ELSE = auto() 238 END = auto() 239 ESCAPE = auto() 240 EXCEPT = auto() 241 EXECUTE = auto() 242 EXISTS = auto() 243 FALSE = auto() 244 FETCH = auto() 245 FILTER = auto() 246 FINAL = auto() 247 FIRST = auto() 248 FOR = auto() 249 FORCE = auto() 250 FOREIGN_KEY = auto() 251 FORMAT = auto() 252 FROM = auto() 253 FULL = auto() 254 FUNCTION = auto() 255 GLOB = auto() 256 GLOBAL = auto() 257 GROUP_BY = auto() 258 GROUPING_SETS = auto() 259 HAVING = auto() 260 HINT = auto() 261 IGNORE = auto() 262 ILIKE = auto() 263 ILIKE_ANY = auto() 264 IN = auto() 265 INDEX = auto() 266 INNER = auto() 267 INSERT = auto() 268 INTERSECT = auto() 269 INTERVAL = auto() 270 INTO = auto() 271 INTRODUCER = auto() 272 IRLIKE = auto() 273 IS = auto() 274 ISNULL = auto() 275 JOIN = auto() 276 JOIN_MARKER = auto() 277 KEEP = auto() 278 KILL = auto() 279 LANGUAGE = auto() 280 LATERAL = auto() 281 LEFT = auto() 282 LIKE = auto() 283 LIKE_ANY = auto() 284 LIMIT = auto() 285 LOAD = auto() 286 LOCK = auto() 287 MAP = auto() 288 MATCH_RECOGNIZE = auto() 289 MEMBER_OF = auto() 290 MERGE = auto() 291 MOD = auto() 292 MODEL = auto() 293 NATURAL = auto() 294 NEXT = auto() 295 NOTNULL = auto() 296 NULL = auto() 297 OBJECT_IDENTIFIER = auto() 298 OFFSET = auto() 299 ON = auto() 300 OPERATOR = auto() 301 ORDER_BY = auto() 302 ORDER_SIBLINGS_BY = auto() 303 ORDERED = auto() 304 ORDINALITY = auto() 305 OUTER = auto() 306 OVER = auto() 307 OVERLAPS = auto() 308 OVERWRITE = auto() 309 PARTITION = auto() 310 PARTITION_BY = auto() 311 PERCENT = auto() 312 PIVOT = auto() 313 PLACEHOLDER = auto() 314 PRAGMA = auto() 315 PRIMARY_KEY = auto() 316 PROCEDURE = auto() 317 PROPERTIES = auto() 318 PSEUDO_TYPE = auto() 319 QUALIFY = auto() 320 QUOTE = auto() 321 RANGE = auto() 322 RECURSIVE = auto() 323 REFRESH = auto() 324 REPLACE = auto() 325 RETURNING = auto() 326 REFERENCES = auto() 327 RIGHT = auto() 328 RLIKE = auto() 329 ROLLBACK = auto() 330 ROLLUP = auto() 331 ROW = auto() 332 ROWS = auto() 333 SELECT = auto() 334 SEMI = auto() 335 SEPARATOR = auto() 336 SERDE_PROPERTIES = auto() 337 SET = auto() 338 SETTINGS = auto() 339 SHOW = auto() 340 SIMILAR_TO = auto() 341 SOME = auto() 342 SORT_BY = auto() 343 START_WITH = auto() 344 STRUCT = auto() 345 TABLE_SAMPLE = auto() 346 TEMPORARY = auto() 347 TOP = auto() 348 THEN = auto() 349 TRUE = auto() 350 UNCACHE = auto() 351 UNION = auto() 352 UNNEST = auto() 353 UNPIVOT = auto() 354 UPDATE = auto() 355 USE = auto() 356 USING = auto() 357 VALUES = auto() 358 VIEW = auto() 359 VOLATILE = auto() 360 WHEN = auto() 361 WHERE = auto() 362 WINDOW = auto() 363 WITH = auto() 364 UNIQUE = auto() 365 VERSION_SNAPSHOT = auto() 366 TIMESTAMP_SNAPSHOT = auto() 367 368 369_ALL_TOKEN_TYPES = list(TokenType) 370_TOKEN_TYPE_TO_INDEX = {token_type: i for i, token_type in enumerate(_ALL_TOKEN_TYPES)} 371 372 373class Token: 374 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 375 376 @classmethod 377 def number(cls, number: int) -> Token: 378 """Returns a NUMBER token with `number` as its text.""" 379 return cls(TokenType.NUMBER, str(number)) 380 381 @classmethod 382 def string(cls, string: str) -> Token: 383 """Returns a STRING token with `string` as its text.""" 384 return cls(TokenType.STRING, string) 385 386 @classmethod 387 def identifier(cls, identifier: str) -> Token: 388 """Returns an IDENTIFIER token with `identifier` as its text.""" 389 return cls(TokenType.IDENTIFIER, identifier) 390 391 @classmethod 392 def var(cls, var: str) -> Token: 393 """Returns an VAR token with `var` as its text.""" 394 return cls(TokenType.VAR, var) 395 396 def __init__( 397 self, 398 token_type: TokenType, 399 text: str, 400 line: int = 1, 401 col: int = 1, 402 start: int = 0, 403 end: int = 0, 404 comments: t.Optional[t.List[str]] = None, 405 ) -> None: 406 """Token initializer. 407 408 Args: 409 token_type: The TokenType Enum. 410 text: The text of the token. 411 line: The line that the token ends on. 412 col: The column that the token ends on. 413 start: The start index of the token. 414 end: The ending index of the token. 415 comments: The comments to attach to the token. 416 """ 417 self.token_type = token_type 418 self.text = text 419 self.line = line 420 self.col = col 421 self.start = start 422 self.end = end 423 self.comments = [] if comments is None else comments 424 425 def __repr__(self) -> str: 426 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 427 return f"<Token {attributes}>" 428 429 430class _Tokenizer(type): 431 def __new__(cls, clsname, bases, attrs): 432 klass = super().__new__(cls, clsname, bases, attrs) 433 434 def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]: 435 return dict( 436 (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr 437 ) 438 439 def _quotes_to_format( 440 token_type: TokenType, arr: t.List[str | t.Tuple[str, str]] 441 ) -> t.Dict[str, t.Tuple[str, TokenType]]: 442 return {k: (v, token_type) for k, v in _convert_quotes(arr).items()} 443 444 klass._QUOTES = _convert_quotes(klass.QUOTES) 445 klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS) 446 447 klass._FORMAT_STRINGS = { 448 **{ 449 p + s: (e, TokenType.NATIONAL_STRING) 450 for s, e in klass._QUOTES.items() 451 for p in ("n", "N") 452 }, 453 **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS), 454 **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS), 455 **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS), 456 **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS), 457 **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS), 458 **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS), 459 } 460 461 klass._STRING_ESCAPES = set(klass.STRING_ESCAPES) 462 klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES) 463 klass._COMMENTS = { 464 **dict( 465 (comment, None) if isinstance(comment, str) else (comment[0], comment[1]) 466 for comment in klass.COMMENTS 467 ), 468 "{#": "#}", # Ensure Jinja comments are tokenized correctly in all dialects 469 } 470 471 klass._KEYWORD_TRIE = new_trie( 472 key.upper() 473 for key in ( 474 *klass.KEYWORDS, 475 *klass._COMMENTS, 476 *klass._QUOTES, 477 *klass._FORMAT_STRINGS, 478 ) 479 if " " in key or any(single in key for single in klass.SINGLE_TOKENS) 480 ) 481 482 if USE_RS_TOKENIZER: 483 settings = RsTokenizerSettings( 484 white_space={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.WHITE_SPACE.items()}, 485 single_tokens={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.SINGLE_TOKENS.items()}, 486 keywords={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.KEYWORDS.items()}, 487 numeric_literals=klass.NUMERIC_LITERALS, 488 identifiers=klass._IDENTIFIERS, 489 identifier_escapes=klass._IDENTIFIER_ESCAPES, 490 string_escapes=klass._STRING_ESCAPES, 491 quotes=klass._QUOTES, 492 format_strings={ 493 k: (v1, _TOKEN_TYPE_TO_INDEX[v2]) 494 for k, (v1, v2) in klass._FORMAT_STRINGS.items() 495 }, 496 has_bit_strings=bool(klass.BIT_STRINGS), 497 has_hex_strings=bool(klass.HEX_STRINGS), 498 comments=klass._COMMENTS, 499 var_single_tokens=klass.VAR_SINGLE_TOKENS, 500 commands={_TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMANDS}, 501 command_prefix_tokens={ 502 _TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS 503 }, 504 ) 505 token_types = RsTokenTypeSettings( 506 bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING], 507 break_=_TOKEN_TYPE_TO_INDEX[TokenType.BREAK], 508 dcolon=_TOKEN_TYPE_TO_INDEX[TokenType.DCOLON], 509 heredoc_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEREDOC_STRING], 510 hex_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEX_STRING], 511 identifier=_TOKEN_TYPE_TO_INDEX[TokenType.IDENTIFIER], 512 number=_TOKEN_TYPE_TO_INDEX[TokenType.NUMBER], 513 parameter=_TOKEN_TYPE_TO_INDEX[TokenType.PARAMETER], 514 semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON], 515 string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING], 516 var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR], 517 ) 518 klass._RS_TOKENIZER = RsTokenizer(settings, token_types) 519 else: 520 klass._RS_TOKENIZER = None 521 522 return klass 523 524 525class Tokenizer(metaclass=_Tokenizer): 526 SINGLE_TOKENS = { 527 "(": TokenType.L_PAREN, 528 ")": TokenType.R_PAREN, 529 "[": TokenType.L_BRACKET, 530 "]": TokenType.R_BRACKET, 531 "{": TokenType.L_BRACE, 532 "}": TokenType.R_BRACE, 533 "&": TokenType.AMP, 534 "^": TokenType.CARET, 535 ":": TokenType.COLON, 536 ",": TokenType.COMMA, 537 ".": TokenType.DOT, 538 "-": TokenType.DASH, 539 "=": TokenType.EQ, 540 ">": TokenType.GT, 541 "<": TokenType.LT, 542 "%": TokenType.MOD, 543 "!": TokenType.NOT, 544 "|": TokenType.PIPE, 545 "+": TokenType.PLUS, 546 ";": TokenType.SEMICOLON, 547 "/": TokenType.SLASH, 548 "\\": TokenType.BACKSLASH, 549 "*": TokenType.STAR, 550 "~": TokenType.TILDA, 551 "?": TokenType.PLACEHOLDER, 552 "@": TokenType.PARAMETER, 553 # used for breaking a var like x'y' but nothing else 554 # the token type doesn't matter 555 "'": TokenType.QUOTE, 556 "`": TokenType.IDENTIFIER, 557 '"': TokenType.IDENTIFIER, 558 "#": TokenType.HASH, 559 } 560 561 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 562 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 563 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 564 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 565 HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] 566 UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 567 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 568 IDENTIFIER_ESCAPES = ['"'] 569 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 570 STRING_ESCAPES = ["'"] 571 VAR_SINGLE_TOKENS: t.Set[str] = set() 572 573 # Autofilled 574 _COMMENTS: t.Dict[str, str] = {} 575 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 576 _IDENTIFIERS: t.Dict[str, str] = {} 577 _IDENTIFIER_ESCAPES: t.Set[str] = set() 578 _QUOTES: t.Dict[str, str] = {} 579 _STRING_ESCAPES: t.Set[str] = set() 580 _KEYWORD_TRIE: t.Dict = {} 581 _RS_TOKENIZER: t.Optional[t.Any] = None 582 583 KEYWORDS: t.Dict[str, TokenType] = { 584 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 585 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 586 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 587 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 588 "/*+": TokenType.HINT, 589 "==": TokenType.EQ, 590 "::": TokenType.DCOLON, 591 "||": TokenType.DPIPE, 592 ">=": TokenType.GTE, 593 "<=": TokenType.LTE, 594 "<>": TokenType.NEQ, 595 "!=": TokenType.NEQ, 596 ":=": TokenType.COLON_EQ, 597 "<=>": TokenType.NULLSAFE_EQ, 598 "->": TokenType.ARROW, 599 "->>": TokenType.DARROW, 600 "=>": TokenType.FARROW, 601 "#>": TokenType.HASH_ARROW, 602 "#>>": TokenType.DHASH_ARROW, 603 "<->": TokenType.LR_ARROW, 604 "&&": TokenType.DAMP, 605 "??": TokenType.DQMARK, 606 "ALL": TokenType.ALL, 607 "ALWAYS": TokenType.ALWAYS, 608 "AND": TokenType.AND, 609 "ANTI": TokenType.ANTI, 610 "ANY": TokenType.ANY, 611 "ASC": TokenType.ASC, 612 "AS": TokenType.ALIAS, 613 "ASOF": TokenType.ASOF, 614 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 615 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 616 "BEGIN": TokenType.BEGIN, 617 "BETWEEN": TokenType.BETWEEN, 618 "CACHE": TokenType.CACHE, 619 "UNCACHE": TokenType.UNCACHE, 620 "CASE": TokenType.CASE, 621 "CHARACTER SET": TokenType.CHARACTER_SET, 622 "CLUSTER BY": TokenType.CLUSTER_BY, 623 "COLLATE": TokenType.COLLATE, 624 "COLUMN": TokenType.COLUMN, 625 "COMMIT": TokenType.COMMIT, 626 "CONNECT BY": TokenType.CONNECT_BY, 627 "CONSTRAINT": TokenType.CONSTRAINT, 628 "CREATE": TokenType.CREATE, 629 "CROSS": TokenType.CROSS, 630 "CUBE": TokenType.CUBE, 631 "CURRENT_DATE": TokenType.CURRENT_DATE, 632 "CURRENT_TIME": TokenType.CURRENT_TIME, 633 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 634 "CURRENT_USER": TokenType.CURRENT_USER, 635 "DATABASE": TokenType.DATABASE, 636 "DEFAULT": TokenType.DEFAULT, 637 "DELETE": TokenType.DELETE, 638 "DESC": TokenType.DESC, 639 "DESCRIBE": TokenType.DESCRIBE, 640 "DISTINCT": TokenType.DISTINCT, 641 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 642 "DIV": TokenType.DIV, 643 "DROP": TokenType.DROP, 644 "ELSE": TokenType.ELSE, 645 "END": TokenType.END, 646 "ESCAPE": TokenType.ESCAPE, 647 "EXCEPT": TokenType.EXCEPT, 648 "EXECUTE": TokenType.EXECUTE, 649 "EXISTS": TokenType.EXISTS, 650 "FALSE": TokenType.FALSE, 651 "FETCH": TokenType.FETCH, 652 "FILTER": TokenType.FILTER, 653 "FIRST": TokenType.FIRST, 654 "FULL": TokenType.FULL, 655 "FUNCTION": TokenType.FUNCTION, 656 "FOR": TokenType.FOR, 657 "FOREIGN KEY": TokenType.FOREIGN_KEY, 658 "FORMAT": TokenType.FORMAT, 659 "FROM": TokenType.FROM, 660 "GEOGRAPHY": TokenType.GEOGRAPHY, 661 "GEOMETRY": TokenType.GEOMETRY, 662 "GLOB": TokenType.GLOB, 663 "GROUP BY": TokenType.GROUP_BY, 664 "GROUPING SETS": TokenType.GROUPING_SETS, 665 "HAVING": TokenType.HAVING, 666 "ILIKE": TokenType.ILIKE, 667 "IN": TokenType.IN, 668 "INDEX": TokenType.INDEX, 669 "INET": TokenType.INET, 670 "INNER": TokenType.INNER, 671 "INSERT": TokenType.INSERT, 672 "INTERVAL": TokenType.INTERVAL, 673 "INTERSECT": TokenType.INTERSECT, 674 "INTO": TokenType.INTO, 675 "IS": TokenType.IS, 676 "ISNULL": TokenType.ISNULL, 677 "JOIN": TokenType.JOIN, 678 "KEEP": TokenType.KEEP, 679 "KILL": TokenType.KILL, 680 "LATERAL": TokenType.LATERAL, 681 "LEFT": TokenType.LEFT, 682 "LIKE": TokenType.LIKE, 683 "LIMIT": TokenType.LIMIT, 684 "LOAD": TokenType.LOAD, 685 "LOCK": TokenType.LOCK, 686 "MERGE": TokenType.MERGE, 687 "NATURAL": TokenType.NATURAL, 688 "NEXT": TokenType.NEXT, 689 "NOT": TokenType.NOT, 690 "NOTNULL": TokenType.NOTNULL, 691 "NULL": TokenType.NULL, 692 "OBJECT": TokenType.OBJECT, 693 "OFFSET": TokenType.OFFSET, 694 "ON": TokenType.ON, 695 "OR": TokenType.OR, 696 "XOR": TokenType.XOR, 697 "ORDER BY": TokenType.ORDER_BY, 698 "ORDINALITY": TokenType.ORDINALITY, 699 "OUTER": TokenType.OUTER, 700 "OVER": TokenType.OVER, 701 "OVERLAPS": TokenType.OVERLAPS, 702 "OVERWRITE": TokenType.OVERWRITE, 703 "PARTITION": TokenType.PARTITION, 704 "PARTITION BY": TokenType.PARTITION_BY, 705 "PARTITIONED BY": TokenType.PARTITION_BY, 706 "PARTITIONED_BY": TokenType.PARTITION_BY, 707 "PERCENT": TokenType.PERCENT, 708 "PIVOT": TokenType.PIVOT, 709 "PRAGMA": TokenType.PRAGMA, 710 "PRIMARY KEY": TokenType.PRIMARY_KEY, 711 "PROCEDURE": TokenType.PROCEDURE, 712 "QUALIFY": TokenType.QUALIFY, 713 "RANGE": TokenType.RANGE, 714 "RECURSIVE": TokenType.RECURSIVE, 715 "REGEXP": TokenType.RLIKE, 716 "REPLACE": TokenType.REPLACE, 717 "RETURNING": TokenType.RETURNING, 718 "REFERENCES": TokenType.REFERENCES, 719 "RIGHT": TokenType.RIGHT, 720 "RLIKE": TokenType.RLIKE, 721 "ROLLBACK": TokenType.ROLLBACK, 722 "ROLLUP": TokenType.ROLLUP, 723 "ROW": TokenType.ROW, 724 "ROWS": TokenType.ROWS, 725 "SCHEMA": TokenType.SCHEMA, 726 "SELECT": TokenType.SELECT, 727 "SEMI": TokenType.SEMI, 728 "SET": TokenType.SET, 729 "SETTINGS": TokenType.SETTINGS, 730 "SHOW": TokenType.SHOW, 731 "SIMILAR TO": TokenType.SIMILAR_TO, 732 "SOME": TokenType.SOME, 733 "SORT BY": TokenType.SORT_BY, 734 "START WITH": TokenType.START_WITH, 735 "TABLE": TokenType.TABLE, 736 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 737 "TEMP": TokenType.TEMPORARY, 738 "TEMPORARY": TokenType.TEMPORARY, 739 "THEN": TokenType.THEN, 740 "TRUE": TokenType.TRUE, 741 "UNION": TokenType.UNION, 742 "UNKNOWN": TokenType.UNKNOWN, 743 "UNNEST": TokenType.UNNEST, 744 "UNPIVOT": TokenType.UNPIVOT, 745 "UPDATE": TokenType.UPDATE, 746 "USE": TokenType.USE, 747 "USING": TokenType.USING, 748 "UUID": TokenType.UUID, 749 "VALUES": TokenType.VALUES, 750 "VIEW": TokenType.VIEW, 751 "VOLATILE": TokenType.VOLATILE, 752 "WHEN": TokenType.WHEN, 753 "WHERE": TokenType.WHERE, 754 "WINDOW": TokenType.WINDOW, 755 "WITH": TokenType.WITH, 756 "APPLY": TokenType.APPLY, 757 "ARRAY": TokenType.ARRAY, 758 "BIT": TokenType.BIT, 759 "BOOL": TokenType.BOOLEAN, 760 "BOOLEAN": TokenType.BOOLEAN, 761 "BYTE": TokenType.TINYINT, 762 "MEDIUMINT": TokenType.MEDIUMINT, 763 "INT1": TokenType.TINYINT, 764 "TINYINT": TokenType.TINYINT, 765 "INT16": TokenType.SMALLINT, 766 "SHORT": TokenType.SMALLINT, 767 "SMALLINT": TokenType.SMALLINT, 768 "INT128": TokenType.INT128, 769 "HUGEINT": TokenType.INT128, 770 "INT2": TokenType.SMALLINT, 771 "INTEGER": TokenType.INT, 772 "INT": TokenType.INT, 773 "INT4": TokenType.INT, 774 "INT32": TokenType.INT, 775 "INT64": TokenType.BIGINT, 776 "LONG": TokenType.BIGINT, 777 "BIGINT": TokenType.BIGINT, 778 "INT8": TokenType.TINYINT, 779 "DEC": TokenType.DECIMAL, 780 "DECIMAL": TokenType.DECIMAL, 781 "BIGDECIMAL": TokenType.BIGDECIMAL, 782 "BIGNUMERIC": TokenType.BIGDECIMAL, 783 "MAP": TokenType.MAP, 784 "NULLABLE": TokenType.NULLABLE, 785 "NUMBER": TokenType.DECIMAL, 786 "NUMERIC": TokenType.DECIMAL, 787 "FIXED": TokenType.DECIMAL, 788 "REAL": TokenType.FLOAT, 789 "FLOAT": TokenType.FLOAT, 790 "FLOAT4": TokenType.FLOAT, 791 "FLOAT8": TokenType.DOUBLE, 792 "DOUBLE": TokenType.DOUBLE, 793 "DOUBLE PRECISION": TokenType.DOUBLE, 794 "JSON": TokenType.JSON, 795 "CHAR": TokenType.CHAR, 796 "CHARACTER": TokenType.CHAR, 797 "NCHAR": TokenType.NCHAR, 798 "VARCHAR": TokenType.VARCHAR, 799 "VARCHAR2": TokenType.VARCHAR, 800 "NVARCHAR": TokenType.NVARCHAR, 801 "NVARCHAR2": TokenType.NVARCHAR, 802 "STR": TokenType.TEXT, 803 "STRING": TokenType.TEXT, 804 "TEXT": TokenType.TEXT, 805 "LONGTEXT": TokenType.LONGTEXT, 806 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 807 "TINYTEXT": TokenType.TINYTEXT, 808 "CLOB": TokenType.TEXT, 809 "LONGVARCHAR": TokenType.TEXT, 810 "BINARY": TokenType.BINARY, 811 "BLOB": TokenType.VARBINARY, 812 "LONGBLOB": TokenType.LONGBLOB, 813 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 814 "TINYBLOB": TokenType.TINYBLOB, 815 "BYTEA": TokenType.VARBINARY, 816 "VARBINARY": TokenType.VARBINARY, 817 "TIME": TokenType.TIME, 818 "TIMETZ": TokenType.TIMETZ, 819 "TIMESTAMP": TokenType.TIMESTAMP, 820 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 821 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 822 "DATE": TokenType.DATE, 823 "DATETIME": TokenType.DATETIME, 824 "INT4RANGE": TokenType.INT4RANGE, 825 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 826 "INT8RANGE": TokenType.INT8RANGE, 827 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 828 "NUMRANGE": TokenType.NUMRANGE, 829 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 830 "TSRANGE": TokenType.TSRANGE, 831 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 832 "TSTZRANGE": TokenType.TSTZRANGE, 833 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 834 "DATERANGE": TokenType.DATERANGE, 835 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 836 "UNIQUE": TokenType.UNIQUE, 837 "STRUCT": TokenType.STRUCT, 838 "VARIANT": TokenType.VARIANT, 839 "ALTER": TokenType.ALTER, 840 "ANALYZE": TokenType.COMMAND, 841 "CALL": TokenType.COMMAND, 842 "COMMENT": TokenType.COMMENT, 843 "COPY": TokenType.COMMAND, 844 "EXPLAIN": TokenType.COMMAND, 845 "GRANT": TokenType.COMMAND, 846 "OPTIMIZE": TokenType.COMMAND, 847 "PREPARE": TokenType.COMMAND, 848 "TRUNCATE": TokenType.COMMAND, 849 "VACUUM": TokenType.COMMAND, 850 "USER-DEFINED": TokenType.USERDEFINED, 851 "FOR VERSION": TokenType.VERSION_SNAPSHOT, 852 "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT, 853 } 854 855 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 856 " ": TokenType.SPACE, 857 "\t": TokenType.SPACE, 858 "\n": TokenType.BREAK, 859 "\r": TokenType.BREAK, 860 } 861 862 COMMANDS = { 863 TokenType.COMMAND, 864 TokenType.EXECUTE, 865 TokenType.FETCH, 866 TokenType.SHOW, 867 } 868 869 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 870 871 # handle numeric literals like in hive (3L = BIGINT) 872 NUMERIC_LITERALS: t.Dict[str, str] = {} 873 874 COMMENTS = ["--", ("/*", "*/")] 875 876 __slots__ = ( 877 "sql", 878 "size", 879 "tokens", 880 "dialect", 881 "_start", 882 "_current", 883 "_line", 884 "_col", 885 "_comments", 886 "_char", 887 "_end", 888 "_peek", 889 "_prev_token_line", 890 "_rs_dialect_settings", 891 ) 892 893 def __init__(self, dialect: DialectType = None) -> None: 894 from sqlglot.dialects import Dialect 895 896 self.dialect = Dialect.get_or_raise(dialect) 897 898 if USE_RS_TOKENIZER: 899 self._rs_dialect_settings = RsTokenizerDialectSettings( 900 escape_sequences=self.dialect.ESCAPE_SEQUENCES, 901 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 902 ) 903 904 self.reset() 905 906 def reset(self) -> None: 907 self.sql = "" 908 self.size = 0 909 self.tokens: t.List[Token] = [] 910 self._start = 0 911 self._current = 0 912 self._line = 1 913 self._col = 0 914 self._comments: t.List[str] = [] 915 916 self._char = "" 917 self._end = False 918 self._peek = "" 919 self._prev_token_line = -1 920 921 def tokenize(self, sql: str) -> t.List[Token]: 922 """Returns a list of tokens corresponding to the SQL string `sql`.""" 923 if USE_RS_TOKENIZER: 924 return self.tokenize_rs(sql) 925 926 self.reset() 927 self.sql = sql 928 self.size = len(sql) 929 930 try: 931 self._scan() 932 except Exception as e: 933 start = max(self._current - 50, 0) 934 end = min(self._current + 50, self.size - 1) 935 context = self.sql[start:end] 936 raise TokenError(f"Error tokenizing '{context}'") from e 937 938 return self.tokens 939 940 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 941 while self.size and not self._end: 942 current = self._current 943 944 # skip spaces inline rather than iteratively call advance() 945 # for performance reasons 946 while current < self.size: 947 char = self.sql[current] 948 949 if char.isspace() and (char == " " or char == "\t"): 950 current += 1 951 else: 952 break 953 954 n = current - self._current 955 self._start = current 956 self._advance(n if n > 1 else 1) 957 958 if self._char is None: 959 break 960 961 if not self._char.isspace(): 962 if self._char.isdigit(): 963 self._scan_number() 964 elif self._char in self._IDENTIFIERS: 965 self._scan_identifier(self._IDENTIFIERS[self._char]) 966 else: 967 self._scan_keywords() 968 969 if until and until(): 970 break 971 972 if self.tokens and self._comments: 973 self.tokens[-1].comments.extend(self._comments) 974 975 def _chars(self, size: int) -> str: 976 if size == 1: 977 return self._char 978 979 start = self._current - 1 980 end = start + size 981 982 return self.sql[start:end] if end <= self.size else "" 983 984 def _advance(self, i: int = 1, alnum: bool = False) -> None: 985 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 986 # Ensures we don't count an extra line if we get a \r\n line break sequence 987 if self._char == "\r" and self._peek == "\n": 988 i = 2 989 self._start += 1 990 991 self._col = 1 992 self._line += 1 993 else: 994 self._col += i 995 996 self._current += i 997 self._end = self._current >= self.size 998 self._char = self.sql[self._current - 1] 999 self._peek = "" if self._end else self.sql[self._current] 1000 1001 if alnum and self._char.isalnum(): 1002 # Here we use local variables instead of attributes for better performance 1003 _col = self._col 1004 _current = self._current 1005 _end = self._end 1006 _peek = self._peek 1007 1008 while _peek.isalnum(): 1009 _col += 1 1010 _current += 1 1011 _end = _current >= self.size 1012 _peek = "" if _end else self.sql[_current] 1013 1014 self._col = _col 1015 self._current = _current 1016 self._end = _end 1017 self._peek = _peek 1018 self._char = self.sql[_current - 1] 1019 1020 @property 1021 def _text(self) -> str: 1022 return self.sql[self._start : self._current] 1023 1024 def peek(self, i: int = 0) -> str: 1025 i = self._current + i 1026 if i < self.size: 1027 return self.sql[i] 1028 return "" 1029 1030 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 1031 self._prev_token_line = self._line 1032 1033 if self._comments and token_type == TokenType.SEMICOLON and self.tokens: 1034 self.tokens[-1].comments.extend(self._comments) 1035 self._comments = [] 1036 1037 self.tokens.append( 1038 Token( 1039 token_type, 1040 text=self._text if text is None else text, 1041 line=self._line, 1042 col=self._col, 1043 start=self._start, 1044 end=self._current - 1, 1045 comments=self._comments, 1046 ) 1047 ) 1048 self._comments = [] 1049 1050 # If we have either a semicolon or a begin token before the command's token, we'll parse 1051 # whatever follows the command's token as a string 1052 if ( 1053 token_type in self.COMMANDS 1054 and self._peek != ";" 1055 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 1056 ): 1057 start = self._current 1058 tokens = len(self.tokens) 1059 self._scan(lambda: self._peek == ";") 1060 self.tokens = self.tokens[:tokens] 1061 text = self.sql[start : self._current].strip() 1062 if text: 1063 self._add(TokenType.STRING, text) 1064 1065 def _scan_keywords(self) -> None: 1066 size = 0 1067 word = None 1068 chars = self._text 1069 char = chars 1070 prev_space = False 1071 skip = False 1072 trie = self._KEYWORD_TRIE 1073 single_token = char in self.SINGLE_TOKENS 1074 1075 while chars: 1076 if skip: 1077 result = TrieResult.PREFIX 1078 else: 1079 result, trie = in_trie(trie, char.upper()) 1080 1081 if result == TrieResult.FAILED: 1082 break 1083 if result == TrieResult.EXISTS: 1084 word = chars 1085 1086 end = self._current + size 1087 size += 1 1088 1089 if end < self.size: 1090 char = self.sql[end] 1091 single_token = single_token or char in self.SINGLE_TOKENS 1092 is_space = char.isspace() 1093 1094 if not is_space or not prev_space: 1095 if is_space: 1096 char = " " 1097 chars += char 1098 prev_space = is_space 1099 skip = False 1100 else: 1101 skip = True 1102 else: 1103 char = "" 1104 break 1105 1106 if word: 1107 if self._scan_string(word): 1108 return 1109 if self._scan_comment(word): 1110 return 1111 if prev_space or single_token or not char: 1112 self._advance(size - 1) 1113 word = word.upper() 1114 self._add(self.KEYWORDS[word], text=word) 1115 return 1116 1117 if self._char in self.SINGLE_TOKENS: 1118 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 1119 return 1120 1121 self._scan_var() 1122 1123 def _scan_comment(self, comment_start: str) -> bool: 1124 if comment_start not in self._COMMENTS: 1125 return False 1126 1127 comment_start_line = self._line 1128 comment_start_size = len(comment_start) 1129 comment_end = self._COMMENTS[comment_start] 1130 1131 if comment_end: 1132 # Skip the comment's start delimiter 1133 self._advance(comment_start_size) 1134 1135 comment_end_size = len(comment_end) 1136 while not self._end and self._chars(comment_end_size) != comment_end: 1137 self._advance(alnum=True) 1138 1139 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 1140 self._advance(comment_end_size - 1) 1141 else: 1142 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: 1143 self._advance(alnum=True) 1144 self._comments.append(self._text[comment_start_size:]) 1145 1146 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1147 # Multiple consecutive comments are preserved by appending them to the current comments list. 1148 if comment_start_line == self._prev_token_line: 1149 self.tokens[-1].comments.extend(self._comments) 1150 self._comments = [] 1151 self._prev_token_line = self._line 1152 1153 return True 1154 1155 def _scan_number(self) -> None: 1156 if self._char == "0": 1157 peek = self._peek.upper() 1158 if peek == "B": 1159 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 1160 elif peek == "X": 1161 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1162 1163 decimal = False 1164 scientific = 0 1165 1166 while True: 1167 if self._peek.isdigit(): 1168 self._advance() 1169 elif self._peek == "." and not decimal: 1170 after = self.peek(1) 1171 if after.isdigit() or not after.isalpha(): 1172 decimal = True 1173 self._advance() 1174 else: 1175 return self._add(TokenType.VAR) 1176 elif self._peek in ("-", "+") and scientific == 1: 1177 scientific += 1 1178 self._advance() 1179 elif self._peek.upper() == "E" and not scientific: 1180 scientific += 1 1181 self._advance() 1182 elif self._peek.isidentifier(): 1183 number_text = self._text 1184 literal = "" 1185 1186 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1187 literal += self._peek 1188 self._advance() 1189 1190 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), "")) 1191 1192 if token_type: 1193 self._add(TokenType.NUMBER, number_text) 1194 self._add(TokenType.DCOLON, "::") 1195 return self._add(token_type, literal) 1196 elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT: 1197 return self._add(TokenType.VAR) 1198 1199 self._advance(-len(literal)) 1200 return self._add(TokenType.NUMBER, number_text) 1201 else: 1202 return self._add(TokenType.NUMBER) 1203 1204 def _scan_bits(self) -> None: 1205 self._advance() 1206 value = self._extract_value() 1207 try: 1208 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1209 int(value, 2) 1210 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1211 except ValueError: 1212 self._add(TokenType.IDENTIFIER) 1213 1214 def _scan_hex(self) -> None: 1215 self._advance() 1216 value = self._extract_value() 1217 try: 1218 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1219 int(value, 16) 1220 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1221 except ValueError: 1222 self._add(TokenType.IDENTIFIER) 1223 1224 def _extract_value(self) -> str: 1225 while True: 1226 char = self._peek.strip() 1227 if char and char not in self.SINGLE_TOKENS: 1228 self._advance(alnum=True) 1229 else: 1230 break 1231 1232 return self._text 1233 1234 def _scan_string(self, start: str) -> bool: 1235 base = None 1236 token_type = TokenType.STRING 1237 1238 if start in self._QUOTES: 1239 end = self._QUOTES[start] 1240 elif start in self._FORMAT_STRINGS: 1241 end, token_type = self._FORMAT_STRINGS[start] 1242 1243 if token_type == TokenType.HEX_STRING: 1244 base = 16 1245 elif token_type == TokenType.BIT_STRING: 1246 base = 2 1247 elif token_type == TokenType.HEREDOC_STRING: 1248 self._advance() 1249 tag = "" if self._char == end else self._extract_string(end) 1250 end = f"{start}{tag}{end}" 1251 else: 1252 return False 1253 1254 self._advance(len(start)) 1255 text = self._extract_string(end) 1256 1257 if base: 1258 try: 1259 int(text, base) 1260 except: 1261 raise TokenError( 1262 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1263 ) 1264 1265 self._add(token_type, text) 1266 return True 1267 1268 def _scan_identifier(self, identifier_end: str) -> None: 1269 self._advance() 1270 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) 1271 self._add(TokenType.IDENTIFIER, text) 1272 1273 def _scan_var(self) -> None: 1274 while True: 1275 char = self._peek.strip() 1276 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1277 self._advance(alnum=True) 1278 else: 1279 break 1280 1281 self._add( 1282 TokenType.VAR 1283 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1284 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1285 ) 1286 1287 def _extract_string(self, delimiter: str, escapes=None) -> str: 1288 text = "" 1289 delim_size = len(delimiter) 1290 escapes = self._STRING_ESCAPES if escapes is None else escapes 1291 1292 while True: 1293 if ( 1294 self._char in escapes 1295 and (self._peek == delimiter or self._peek in escapes) 1296 and (self._char not in self._QUOTES or self._char == self._peek) 1297 ): 1298 if self._peek == delimiter: 1299 text += self._peek 1300 else: 1301 text += self._char + self._peek 1302 1303 if self._current + 1 < self.size: 1304 self._advance(2) 1305 else: 1306 raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}") 1307 else: 1308 if self._chars(delim_size) == delimiter: 1309 if delim_size > 1: 1310 self._advance(delim_size - 1) 1311 break 1312 1313 if self._end: 1314 raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}") 1315 1316 if ( 1317 self.dialect.ESCAPE_SEQUENCES 1318 and self._peek 1319 and self._char in self.STRING_ESCAPES 1320 ): 1321 escaped_sequence = self.dialect.ESCAPE_SEQUENCES.get(self._char + self._peek) 1322 if escaped_sequence: 1323 self._advance(2) 1324 text += escaped_sequence 1325 continue 1326 1327 current = self._current - 1 1328 self._advance(alnum=True) 1329 text += self.sql[current : self._current - 1] 1330 1331 return text 1332 1333 def tokenize_rs(self, sql: str) -> t.List[Token]: 1334 if not self._RS_TOKENIZER: 1335 raise SqlglotError("Rust tokenizer is not available") 1336 1337 try: 1338 tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1339 for token in tokens: 1340 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1341 return tokens 1342 except Exception as e: 1343 raise TokenError(str(e))
29class TokenType(AutoName): 30 L_PAREN = auto() 31 R_PAREN = auto() 32 L_BRACKET = auto() 33 R_BRACKET = auto() 34 L_BRACE = auto() 35 R_BRACE = auto() 36 COMMA = auto() 37 DOT = auto() 38 DASH = auto() 39 PLUS = auto() 40 COLON = auto() 41 DCOLON = auto() 42 DQMARK = auto() 43 SEMICOLON = auto() 44 STAR = auto() 45 BACKSLASH = auto() 46 SLASH = auto() 47 LT = auto() 48 LTE = auto() 49 GT = auto() 50 GTE = auto() 51 NOT = auto() 52 EQ = auto() 53 NEQ = auto() 54 NULLSAFE_EQ = auto() 55 COLON_EQ = auto() 56 AND = auto() 57 OR = auto() 58 AMP = auto() 59 DPIPE = auto() 60 PIPE = auto() 61 CARET = auto() 62 TILDA = auto() 63 ARROW = auto() 64 DARROW = auto() 65 FARROW = auto() 66 HASH = auto() 67 HASH_ARROW = auto() 68 DHASH_ARROW = auto() 69 LR_ARROW = auto() 70 DAT = auto() 71 LT_AT = auto() 72 AT_GT = auto() 73 DOLLAR = auto() 74 PARAMETER = auto() 75 SESSION_PARAMETER = auto() 76 DAMP = auto() 77 XOR = auto() 78 DSTAR = auto() 79 80 BLOCK_START = auto() 81 BLOCK_END = auto() 82 83 SPACE = auto() 84 BREAK = auto() 85 86 STRING = auto() 87 NUMBER = auto() 88 IDENTIFIER = auto() 89 DATABASE = auto() 90 COLUMN = auto() 91 COLUMN_DEF = auto() 92 SCHEMA = auto() 93 TABLE = auto() 94 VAR = auto() 95 BIT_STRING = auto() 96 HEX_STRING = auto() 97 BYTE_STRING = auto() 98 NATIONAL_STRING = auto() 99 RAW_STRING = auto() 100 HEREDOC_STRING = auto() 101 UNICODE_STRING = auto() 102 103 # types 104 BIT = auto() 105 BOOLEAN = auto() 106 TINYINT = auto() 107 UTINYINT = auto() 108 SMALLINT = auto() 109 USMALLINT = auto() 110 MEDIUMINT = auto() 111 UMEDIUMINT = auto() 112 INT = auto() 113 UINT = auto() 114 BIGINT = auto() 115 UBIGINT = auto() 116 INT128 = auto() 117 UINT128 = auto() 118 INT256 = auto() 119 UINT256 = auto() 120 FLOAT = auto() 121 DOUBLE = auto() 122 DECIMAL = auto() 123 UDECIMAL = auto() 124 BIGDECIMAL = auto() 125 CHAR = auto() 126 NCHAR = auto() 127 VARCHAR = auto() 128 NVARCHAR = auto() 129 TEXT = auto() 130 MEDIUMTEXT = auto() 131 LONGTEXT = auto() 132 MEDIUMBLOB = auto() 133 LONGBLOB = auto() 134 TINYBLOB = auto() 135 TINYTEXT = auto() 136 BINARY = auto() 137 VARBINARY = auto() 138 JSON = auto() 139 JSONB = auto() 140 TIME = auto() 141 TIMETZ = auto() 142 TIMESTAMP = auto() 143 TIMESTAMPTZ = auto() 144 TIMESTAMPLTZ = auto() 145 TIMESTAMP_S = auto() 146 TIMESTAMP_MS = auto() 147 TIMESTAMP_NS = auto() 148 DATETIME = auto() 149 DATETIME64 = auto() 150 DATE = auto() 151 DATE32 = auto() 152 INT4RANGE = auto() 153 INT4MULTIRANGE = auto() 154 INT8RANGE = auto() 155 INT8MULTIRANGE = auto() 156 NUMRANGE = auto() 157 NUMMULTIRANGE = auto() 158 TSRANGE = auto() 159 TSMULTIRANGE = auto() 160 TSTZRANGE = auto() 161 TSTZMULTIRANGE = auto() 162 DATERANGE = auto() 163 DATEMULTIRANGE = auto() 164 UUID = auto() 165 GEOGRAPHY = auto() 166 NULLABLE = auto() 167 GEOMETRY = auto() 168 HLLSKETCH = auto() 169 HSTORE = auto() 170 SUPER = auto() 171 SERIAL = auto() 172 SMALLSERIAL = auto() 173 BIGSERIAL = auto() 174 XML = auto() 175 YEAR = auto() 176 UNIQUEIDENTIFIER = auto() 177 USERDEFINED = auto() 178 MONEY = auto() 179 SMALLMONEY = auto() 180 ROWVERSION = auto() 181 IMAGE = auto() 182 VARIANT = auto() 183 OBJECT = auto() 184 INET = auto() 185 IPADDRESS = auto() 186 IPPREFIX = auto() 187 IPV4 = auto() 188 IPV6 = auto() 189 ENUM = auto() 190 ENUM8 = auto() 191 ENUM16 = auto() 192 FIXEDSTRING = auto() 193 LOWCARDINALITY = auto() 194 NESTED = auto() 195 UNKNOWN = auto() 196 197 # keywords 198 ALIAS = auto() 199 ALTER = auto() 200 ALWAYS = auto() 201 ALL = auto() 202 ANTI = auto() 203 ANY = auto() 204 APPLY = auto() 205 ARRAY = auto() 206 ASC = auto() 207 ASOF = auto() 208 AUTO_INCREMENT = auto() 209 BEGIN = auto() 210 BETWEEN = auto() 211 CACHE = auto() 212 CASE = auto() 213 CHARACTER_SET = auto() 214 CLUSTER_BY = auto() 215 COLLATE = auto() 216 COMMAND = auto() 217 COMMENT = auto() 218 COMMIT = auto() 219 CONNECT_BY = auto() 220 CONSTRAINT = auto() 221 CREATE = auto() 222 CROSS = auto() 223 CUBE = auto() 224 CURRENT_DATE = auto() 225 CURRENT_DATETIME = auto() 226 CURRENT_TIME = auto() 227 CURRENT_TIMESTAMP = auto() 228 CURRENT_USER = auto() 229 DEFAULT = auto() 230 DELETE = auto() 231 DESC = auto() 232 DESCRIBE = auto() 233 DICTIONARY = auto() 234 DISTINCT = auto() 235 DISTRIBUTE_BY = auto() 236 DIV = auto() 237 DROP = auto() 238 ELSE = auto() 239 END = auto() 240 ESCAPE = auto() 241 EXCEPT = auto() 242 EXECUTE = auto() 243 EXISTS = auto() 244 FALSE = auto() 245 FETCH = auto() 246 FILTER = auto() 247 FINAL = auto() 248 FIRST = auto() 249 FOR = auto() 250 FORCE = auto() 251 FOREIGN_KEY = auto() 252 FORMAT = auto() 253 FROM = auto() 254 FULL = auto() 255 FUNCTION = auto() 256 GLOB = auto() 257 GLOBAL = auto() 258 GROUP_BY = auto() 259 GROUPING_SETS = auto() 260 HAVING = auto() 261 HINT = auto() 262 IGNORE = auto() 263 ILIKE = auto() 264 ILIKE_ANY = auto() 265 IN = auto() 266 INDEX = auto() 267 INNER = auto() 268 INSERT = auto() 269 INTERSECT = auto() 270 INTERVAL = auto() 271 INTO = auto() 272 INTRODUCER = auto() 273 IRLIKE = auto() 274 IS = auto() 275 ISNULL = auto() 276 JOIN = auto() 277 JOIN_MARKER = auto() 278 KEEP = auto() 279 KILL = auto() 280 LANGUAGE = auto() 281 LATERAL = auto() 282 LEFT = auto() 283 LIKE = auto() 284 LIKE_ANY = auto() 285 LIMIT = auto() 286 LOAD = auto() 287 LOCK = auto() 288 MAP = auto() 289 MATCH_RECOGNIZE = auto() 290 MEMBER_OF = auto() 291 MERGE = auto() 292 MOD = auto() 293 MODEL = auto() 294 NATURAL = auto() 295 NEXT = auto() 296 NOTNULL = auto() 297 NULL = auto() 298 OBJECT_IDENTIFIER = auto() 299 OFFSET = auto() 300 ON = auto() 301 OPERATOR = auto() 302 ORDER_BY = auto() 303 ORDER_SIBLINGS_BY = auto() 304 ORDERED = auto() 305 ORDINALITY = auto() 306 OUTER = auto() 307 OVER = auto() 308 OVERLAPS = auto() 309 OVERWRITE = auto() 310 PARTITION = auto() 311 PARTITION_BY = auto() 312 PERCENT = auto() 313 PIVOT = auto() 314 PLACEHOLDER = auto() 315 PRAGMA = auto() 316 PRIMARY_KEY = auto() 317 PROCEDURE = auto() 318 PROPERTIES = auto() 319 PSEUDO_TYPE = auto() 320 QUALIFY = auto() 321 QUOTE = auto() 322 RANGE = auto() 323 RECURSIVE = auto() 324 REFRESH = auto() 325 REPLACE = auto() 326 RETURNING = auto() 327 REFERENCES = auto() 328 RIGHT = auto() 329 RLIKE = auto() 330 ROLLBACK = auto() 331 ROLLUP = auto() 332 ROW = auto() 333 ROWS = auto() 334 SELECT = auto() 335 SEMI = auto() 336 SEPARATOR = auto() 337 SERDE_PROPERTIES = auto() 338 SET = auto() 339 SETTINGS = auto() 340 SHOW = auto() 341 SIMILAR_TO = auto() 342 SOME = auto() 343 SORT_BY = auto() 344 START_WITH = auto() 345 STRUCT = auto() 346 TABLE_SAMPLE = auto() 347 TEMPORARY = auto() 348 TOP = auto() 349 THEN = auto() 350 TRUE = auto() 351 UNCACHE = auto() 352 UNION = auto() 353 UNNEST = auto() 354 UNPIVOT = auto() 355 UPDATE = auto() 356 USE = auto() 357 USING = auto() 358 VALUES = auto() 359 VIEW = auto() 360 VOLATILE = auto() 361 WHEN = auto() 362 WHERE = auto() 363 WINDOW = auto() 364 WITH = auto() 365 UNIQUE = auto() 366 VERSION_SNAPSHOT = auto() 367 TIMESTAMP_SNAPSHOT = auto()
An enumeration.
L_PAREN =
<TokenType.L_PAREN: 'L_PAREN'>
R_PAREN =
<TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET =
<TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET =
<TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE =
<TokenType.L_BRACE: 'L_BRACE'>
R_BRACE =
<TokenType.R_BRACE: 'R_BRACE'>
COMMA =
<TokenType.COMMA: 'COMMA'>
DOT =
<TokenType.DOT: 'DOT'>
DASH =
<TokenType.DASH: 'DASH'>
PLUS =
<TokenType.PLUS: 'PLUS'>
COLON =
<TokenType.COLON: 'COLON'>
DCOLON =
<TokenType.DCOLON: 'DCOLON'>
DQMARK =
<TokenType.DQMARK: 'DQMARK'>
SEMICOLON =
<TokenType.SEMICOLON: 'SEMICOLON'>
STAR =
<TokenType.STAR: 'STAR'>
BACKSLASH =
<TokenType.BACKSLASH: 'BACKSLASH'>
SLASH =
<TokenType.SLASH: 'SLASH'>
LT =
<TokenType.LT: 'LT'>
LTE =
<TokenType.LTE: 'LTE'>
GT =
<TokenType.GT: 'GT'>
GTE =
<TokenType.GTE: 'GTE'>
NOT =
<TokenType.NOT: 'NOT'>
EQ =
<TokenType.EQ: 'EQ'>
NEQ =
<TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ =
<TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
COLON_EQ =
<TokenType.COLON_EQ: 'COLON_EQ'>
AND =
<TokenType.AND: 'AND'>
OR =
<TokenType.OR: 'OR'>
AMP =
<TokenType.AMP: 'AMP'>
DPIPE =
<TokenType.DPIPE: 'DPIPE'>
PIPE =
<TokenType.PIPE: 'PIPE'>
CARET =
<TokenType.CARET: 'CARET'>
TILDA =
<TokenType.TILDA: 'TILDA'>
ARROW =
<TokenType.ARROW: 'ARROW'>
DARROW =
<TokenType.DARROW: 'DARROW'>
FARROW =
<TokenType.FARROW: 'FARROW'>
HASH =
<TokenType.HASH: 'HASH'>
HASH_ARROW =
<TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW =
<TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW =
<TokenType.LR_ARROW: 'LR_ARROW'>
DAT =
<TokenType.DAT: 'DAT'>
LT_AT =
<TokenType.LT_AT: 'LT_AT'>
AT_GT =
<TokenType.AT_GT: 'AT_GT'>
DOLLAR =
<TokenType.DOLLAR: 'DOLLAR'>
PARAMETER =
<TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER =
<TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
DAMP =
<TokenType.DAMP: 'DAMP'>
XOR =
<TokenType.XOR: 'XOR'>
DSTAR =
<TokenType.DSTAR: 'DSTAR'>
BLOCK_START =
<TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END =
<TokenType.BLOCK_END: 'BLOCK_END'>
SPACE =
<TokenType.SPACE: 'SPACE'>
BREAK =
<TokenType.BREAK: 'BREAK'>
STRING =
<TokenType.STRING: 'STRING'>
NUMBER =
<TokenType.NUMBER: 'NUMBER'>
IDENTIFIER =
<TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE =
<TokenType.DATABASE: 'DATABASE'>
COLUMN =
<TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF =
<TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA =
<TokenType.SCHEMA: 'SCHEMA'>
TABLE =
<TokenType.TABLE: 'TABLE'>
VAR =
<TokenType.VAR: 'VAR'>
BIT_STRING =
<TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING =
<TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING =
<TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING =
<TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING =
<TokenType.RAW_STRING: 'RAW_STRING'>
HEREDOC_STRING =
<TokenType.HEREDOC_STRING: 'HEREDOC_STRING'>
UNICODE_STRING =
<TokenType.UNICODE_STRING: 'UNICODE_STRING'>
BIT =
<TokenType.BIT: 'BIT'>
BOOLEAN =
<TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT =
<TokenType.TINYINT: 'TINYINT'>
UTINYINT =
<TokenType.UTINYINT: 'UTINYINT'>
SMALLINT =
<TokenType.SMALLINT: 'SMALLINT'>
USMALLINT =
<TokenType.USMALLINT: 'USMALLINT'>
MEDIUMINT =
<TokenType.MEDIUMINT: 'MEDIUMINT'>
UMEDIUMINT =
<TokenType.UMEDIUMINT: 'UMEDIUMINT'>
INT =
<TokenType.INT: 'INT'>
UINT =
<TokenType.UINT: 'UINT'>
BIGINT =
<TokenType.BIGINT: 'BIGINT'>
UBIGINT =
<TokenType.UBIGINT: 'UBIGINT'>
INT128 =
<TokenType.INT128: 'INT128'>
UINT128 =
<TokenType.UINT128: 'UINT128'>
INT256 =
<TokenType.INT256: 'INT256'>
UINT256 =
<TokenType.UINT256: 'UINT256'>
FLOAT =
<TokenType.FLOAT: 'FLOAT'>
DOUBLE =
<TokenType.DOUBLE: 'DOUBLE'>
DECIMAL =
<TokenType.DECIMAL: 'DECIMAL'>
UDECIMAL =
<TokenType.UDECIMAL: 'UDECIMAL'>
BIGDECIMAL =
<TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR =
<TokenType.CHAR: 'CHAR'>
NCHAR =
<TokenType.NCHAR: 'NCHAR'>
VARCHAR =
<TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR =
<TokenType.NVARCHAR: 'NVARCHAR'>
TEXT =
<TokenType.TEXT: 'TEXT'>
MEDIUMTEXT =
<TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT =
<TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB =
<TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB =
<TokenType.LONGBLOB: 'LONGBLOB'>
TINYBLOB =
<TokenType.TINYBLOB: 'TINYBLOB'>
TINYTEXT =
<TokenType.TINYTEXT: 'TINYTEXT'>
BINARY =
<TokenType.BINARY: 'BINARY'>
VARBINARY =
<TokenType.VARBINARY: 'VARBINARY'>
JSON =
<TokenType.JSON: 'JSON'>
JSONB =
<TokenType.JSONB: 'JSONB'>
TIME =
<TokenType.TIME: 'TIME'>
TIMETZ =
<TokenType.TIMETZ: 'TIMETZ'>
TIMESTAMP =
<TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ =
<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ =
<TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
TIMESTAMP_S =
<TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>
TIMESTAMP_MS =
<TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>
TIMESTAMP_NS =
<TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>
DATETIME =
<TokenType.DATETIME: 'DATETIME'>
DATETIME64 =
<TokenType.DATETIME64: 'DATETIME64'>
DATE =
<TokenType.DATE: 'DATE'>
DATE32 =
<TokenType.DATE32: 'DATE32'>
INT4RANGE =
<TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE =
<TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE =
<TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE =
<TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE =
<TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE =
<TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE =
<TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE =
<TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE =
<TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE =
<TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE =
<TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE =
<TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID =
<TokenType.UUID: 'UUID'>
GEOGRAPHY =
<TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE =
<TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY =
<TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH =
<TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE =
<TokenType.HSTORE: 'HSTORE'>
SUPER =
<TokenType.SUPER: 'SUPER'>
SERIAL =
<TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL =
<TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL =
<TokenType.BIGSERIAL: 'BIGSERIAL'>
XML =
<TokenType.XML: 'XML'>
YEAR =
<TokenType.YEAR: 'YEAR'>
UNIQUEIDENTIFIER =
<TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
USERDEFINED =
<TokenType.USERDEFINED: 'USERDEFINED'>
MONEY =
<TokenType.MONEY: 'MONEY'>
SMALLMONEY =
<TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION =
<TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE =
<TokenType.IMAGE: 'IMAGE'>
VARIANT =
<TokenType.VARIANT: 'VARIANT'>
OBJECT =
<TokenType.OBJECT: 'OBJECT'>
INET =
<TokenType.INET: 'INET'>
IPADDRESS =
<TokenType.IPADDRESS: 'IPADDRESS'>
IPPREFIX =
<TokenType.IPPREFIX: 'IPPREFIX'>
IPV4 =
<TokenType.IPV4: 'IPV4'>
IPV6 =
<TokenType.IPV6: 'IPV6'>
ENUM =
<TokenType.ENUM: 'ENUM'>
ENUM8 =
<TokenType.ENUM8: 'ENUM8'>
ENUM16 =
<TokenType.ENUM16: 'ENUM16'>
FIXEDSTRING =
<TokenType.FIXEDSTRING: 'FIXEDSTRING'>
LOWCARDINALITY =
<TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>
NESTED =
<TokenType.NESTED: 'NESTED'>
UNKNOWN =
<TokenType.UNKNOWN: 'UNKNOWN'>
ALIAS =
<TokenType.ALIAS: 'ALIAS'>
ALTER =
<TokenType.ALTER: 'ALTER'>
ALWAYS =
<TokenType.ALWAYS: 'ALWAYS'>
ALL =
<TokenType.ALL: 'ALL'>
ANTI =
<TokenType.ANTI: 'ANTI'>
ANY =
<TokenType.ANY: 'ANY'>
APPLY =
<TokenType.APPLY: 'APPLY'>
ARRAY =
<TokenType.ARRAY: 'ARRAY'>
ASC =
<TokenType.ASC: 'ASC'>
ASOF =
<TokenType.ASOF: 'ASOF'>
AUTO_INCREMENT =
<TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN =
<TokenType.BEGIN: 'BEGIN'>
BETWEEN =
<TokenType.BETWEEN: 'BETWEEN'>
CACHE =
<TokenType.CACHE: 'CACHE'>
CASE =
<TokenType.CASE: 'CASE'>
CHARACTER_SET =
<TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY =
<TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE =
<TokenType.COLLATE: 'COLLATE'>
COMMAND =
<TokenType.COMMAND: 'COMMAND'>
COMMENT =
<TokenType.COMMENT: 'COMMENT'>
COMMIT =
<TokenType.COMMIT: 'COMMIT'>
CONNECT_BY =
<TokenType.CONNECT_BY: 'CONNECT_BY'>
CONSTRAINT =
<TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE =
<TokenType.CREATE: 'CREATE'>
CROSS =
<TokenType.CROSS: 'CROSS'>
CUBE =
<TokenType.CUBE: 'CUBE'>
CURRENT_DATE =
<TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME =
<TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_TIME =
<TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP =
<TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER =
<TokenType.CURRENT_USER: 'CURRENT_USER'>
DEFAULT =
<TokenType.DEFAULT: 'DEFAULT'>
DELETE =
<TokenType.DELETE: 'DELETE'>
DESC =
<TokenType.DESC: 'DESC'>
DESCRIBE =
<TokenType.DESCRIBE: 'DESCRIBE'>
DICTIONARY =
<TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT =
<TokenType.DISTINCT: 'DISTINCT'>
DISTRIBUTE_BY =
<TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV =
<TokenType.DIV: 'DIV'>
DROP =
<TokenType.DROP: 'DROP'>
ELSE =
<TokenType.ELSE: 'ELSE'>
END =
<TokenType.END: 'END'>
ESCAPE =
<TokenType.ESCAPE: 'ESCAPE'>
EXCEPT =
<TokenType.EXCEPT: 'EXCEPT'>
EXECUTE =
<TokenType.EXECUTE: 'EXECUTE'>
EXISTS =
<TokenType.EXISTS: 'EXISTS'>
FALSE =
<TokenType.FALSE: 'FALSE'>
FETCH =
<TokenType.FETCH: 'FETCH'>
FILTER =
<TokenType.FILTER: 'FILTER'>
FINAL =
<TokenType.FINAL: 'FINAL'>
FIRST =
<TokenType.FIRST: 'FIRST'>
FOR =
<TokenType.FOR: 'FOR'>
FORCE =
<TokenType.FORCE: 'FORCE'>
FOREIGN_KEY =
<TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT =
<TokenType.FORMAT: 'FORMAT'>
FROM =
<TokenType.FROM: 'FROM'>
FULL =
<TokenType.FULL: 'FULL'>
FUNCTION =
<TokenType.FUNCTION: 'FUNCTION'>
GLOB =
<TokenType.GLOB: 'GLOB'>
GLOBAL =
<TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY =
<TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS =
<TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING =
<TokenType.HAVING: 'HAVING'>
HINT =
<TokenType.HINT: 'HINT'>
IGNORE =
<TokenType.IGNORE: 'IGNORE'>
ILIKE =
<TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY =
<TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN =
<TokenType.IN: 'IN'>
INDEX =
<TokenType.INDEX: 'INDEX'>
INNER =
<TokenType.INNER: 'INNER'>
INSERT =
<TokenType.INSERT: 'INSERT'>
INTERSECT =
<TokenType.INTERSECT: 'INTERSECT'>
INTERVAL =
<TokenType.INTERVAL: 'INTERVAL'>
INTO =
<TokenType.INTO: 'INTO'>
INTRODUCER =
<TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE =
<TokenType.IRLIKE: 'IRLIKE'>
IS =
<TokenType.IS: 'IS'>
ISNULL =
<TokenType.ISNULL: 'ISNULL'>
JOIN =
<TokenType.JOIN: 'JOIN'>
JOIN_MARKER =
<TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP =
<TokenType.KEEP: 'KEEP'>
KILL =
<TokenType.KILL: 'KILL'>
LANGUAGE =
<TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL =
<TokenType.LATERAL: 'LATERAL'>
LEFT =
<TokenType.LEFT: 'LEFT'>
LIKE =
<TokenType.LIKE: 'LIKE'>
LIKE_ANY =
<TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT =
<TokenType.LIMIT: 'LIMIT'>
LOAD =
<TokenType.LOAD: 'LOAD'>
LOCK =
<TokenType.LOCK: 'LOCK'>
MAP =
<TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE =
<TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MEMBER_OF =
<TokenType.MEMBER_OF: 'MEMBER_OF'>
MERGE =
<TokenType.MERGE: 'MERGE'>
MOD =
<TokenType.MOD: 'MOD'>
MODEL =
<TokenType.MODEL: 'MODEL'>
NATURAL =
<TokenType.NATURAL: 'NATURAL'>
NEXT =
<TokenType.NEXT: 'NEXT'>
NOTNULL =
<TokenType.NOTNULL: 'NOTNULL'>
NULL =
<TokenType.NULL: 'NULL'>
OBJECT_IDENTIFIER =
<TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>
OFFSET =
<TokenType.OFFSET: 'OFFSET'>
ON =
<TokenType.ON: 'ON'>
OPERATOR =
<TokenType.OPERATOR: 'OPERATOR'>
ORDER_BY =
<TokenType.ORDER_BY: 'ORDER_BY'>
ORDER_SIBLINGS_BY =
<TokenType.ORDER_SIBLINGS_BY: 'ORDER_SIBLINGS_BY'>
ORDERED =
<TokenType.ORDERED: 'ORDERED'>
ORDINALITY =
<TokenType.ORDINALITY: 'ORDINALITY'>
OUTER =
<TokenType.OUTER: 'OUTER'>
OVER =
<TokenType.OVER: 'OVER'>
OVERLAPS =
<TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE =
<TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION =
<TokenType.PARTITION: 'PARTITION'>
PARTITION_BY =
<TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT =
<TokenType.PERCENT: 'PERCENT'>
PIVOT =
<TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER =
<TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA =
<TokenType.PRAGMA: 'PRAGMA'>
PRIMARY_KEY =
<TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE =
<TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES =
<TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE =
<TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY =
<TokenType.QUALIFY: 'QUALIFY'>
QUOTE =
<TokenType.QUOTE: 'QUOTE'>
RANGE =
<TokenType.RANGE: 'RANGE'>
RECURSIVE =
<TokenType.RECURSIVE: 'RECURSIVE'>
REFRESH =
<TokenType.REFRESH: 'REFRESH'>
REPLACE =
<TokenType.REPLACE: 'REPLACE'>
RETURNING =
<TokenType.RETURNING: 'RETURNING'>
REFERENCES =
<TokenType.REFERENCES: 'REFERENCES'>
RIGHT =
<TokenType.RIGHT: 'RIGHT'>
RLIKE =
<TokenType.RLIKE: 'RLIKE'>
ROLLBACK =
<TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP =
<TokenType.ROLLUP: 'ROLLUP'>
ROW =
<TokenType.ROW: 'ROW'>
ROWS =
<TokenType.ROWS: 'ROWS'>
SELECT =
<TokenType.SELECT: 'SELECT'>
SEMI =
<TokenType.SEMI: 'SEMI'>
SEPARATOR =
<TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES =
<TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET =
<TokenType.SET: 'SET'>
SETTINGS =
<TokenType.SETTINGS: 'SETTINGS'>
SHOW =
<TokenType.SHOW: 'SHOW'>
SIMILAR_TO =
<TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME =
<TokenType.SOME: 'SOME'>
SORT_BY =
<TokenType.SORT_BY: 'SORT_BY'>
START_WITH =
<TokenType.START_WITH: 'START_WITH'>
STRUCT =
<TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE =
<TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY =
<TokenType.TEMPORARY: 'TEMPORARY'>
TOP =
<TokenType.TOP: 'TOP'>
THEN =
<TokenType.THEN: 'THEN'>
TRUE =
<TokenType.TRUE: 'TRUE'>
UNCACHE =
<TokenType.UNCACHE: 'UNCACHE'>
UNION =
<TokenType.UNION: 'UNION'>
UNNEST =
<TokenType.UNNEST: 'UNNEST'>
UNPIVOT =
<TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE =
<TokenType.UPDATE: 'UPDATE'>
USE =
<TokenType.USE: 'USE'>
USING =
<TokenType.USING: 'USING'>
VALUES =
<TokenType.VALUES: 'VALUES'>
VIEW =
<TokenType.VIEW: 'VIEW'>
VOLATILE =
<TokenType.VOLATILE: 'VOLATILE'>
WHEN =
<TokenType.WHEN: 'WHEN'>
WHERE =
<TokenType.WHERE: 'WHERE'>
WINDOW =
<TokenType.WINDOW: 'WINDOW'>
WITH =
<TokenType.WITH: 'WITH'>
UNIQUE =
<TokenType.UNIQUE: 'UNIQUE'>
VERSION_SNAPSHOT =
<TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>
TIMESTAMP_SNAPSHOT =
<TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>
Inherited Members
- enum.Enum
- name
- value
class
Token:
374class Token: 375 __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments") 376 377 @classmethod 378 def number(cls, number: int) -> Token: 379 """Returns a NUMBER token with `number` as its text.""" 380 return cls(TokenType.NUMBER, str(number)) 381 382 @classmethod 383 def string(cls, string: str) -> Token: 384 """Returns a STRING token with `string` as its text.""" 385 return cls(TokenType.STRING, string) 386 387 @classmethod 388 def identifier(cls, identifier: str) -> Token: 389 """Returns an IDENTIFIER token with `identifier` as its text.""" 390 return cls(TokenType.IDENTIFIER, identifier) 391 392 @classmethod 393 def var(cls, var: str) -> Token: 394 """Returns an VAR token with `var` as its text.""" 395 return cls(TokenType.VAR, var) 396 397 def __init__( 398 self, 399 token_type: TokenType, 400 text: str, 401 line: int = 1, 402 col: int = 1, 403 start: int = 0, 404 end: int = 0, 405 comments: t.Optional[t.List[str]] = None, 406 ) -> None: 407 """Token initializer. 408 409 Args: 410 token_type: The TokenType Enum. 411 text: The text of the token. 412 line: The line that the token ends on. 413 col: The column that the token ends on. 414 start: The start index of the token. 415 end: The ending index of the token. 416 comments: The comments to attach to the token. 417 """ 418 self.token_type = token_type 419 self.text = text 420 self.line = line 421 self.col = col 422 self.start = start 423 self.end = end 424 self.comments = [] if comments is None else comments 425 426 def __repr__(self) -> str: 427 attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__) 428 return f"<Token {attributes}>"
Token( token_type: TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: Optional[List[str]] = None)
397 def __init__( 398 self, 399 token_type: TokenType, 400 text: str, 401 line: int = 1, 402 col: int = 1, 403 start: int = 0, 404 end: int = 0, 405 comments: t.Optional[t.List[str]] = None, 406 ) -> None: 407 """Token initializer. 408 409 Args: 410 token_type: The TokenType Enum. 411 text: The text of the token. 412 line: The line that the token ends on. 413 col: The column that the token ends on. 414 start: The start index of the token. 415 end: The ending index of the token. 416 comments: The comments to attach to the token. 417 """ 418 self.token_type = token_type 419 self.text = text 420 self.line = line 421 self.col = col 422 self.start = start 423 self.end = end 424 self.comments = [] if comments is None else comments
Token initializer.
Arguments:
- token_type: The TokenType Enum.
- text: The text of the token.
- line: The line that the token ends on.
- col: The column that the token ends on.
- start: The start index of the token.
- end: The ending index of the token.
- comments: The comments to attach to the token.
377 @classmethod 378 def number(cls, number: int) -> Token: 379 """Returns a NUMBER token with `number` as its text.""" 380 return cls(TokenType.NUMBER, str(number))
Returns a NUMBER token with number
as its text.
382 @classmethod 383 def string(cls, string: str) -> Token: 384 """Returns a STRING token with `string` as its text.""" 385 return cls(TokenType.STRING, string)
Returns a STRING token with string
as its text.
387 @classmethod 388 def identifier(cls, identifier: str) -> Token: 389 """Returns an IDENTIFIER token with `identifier` as its text.""" 390 return cls(TokenType.IDENTIFIER, identifier)
Returns an IDENTIFIER token with identifier
as its text.
class
Tokenizer:
526class Tokenizer(metaclass=_Tokenizer): 527 SINGLE_TOKENS = { 528 "(": TokenType.L_PAREN, 529 ")": TokenType.R_PAREN, 530 "[": TokenType.L_BRACKET, 531 "]": TokenType.R_BRACKET, 532 "{": TokenType.L_BRACE, 533 "}": TokenType.R_BRACE, 534 "&": TokenType.AMP, 535 "^": TokenType.CARET, 536 ":": TokenType.COLON, 537 ",": TokenType.COMMA, 538 ".": TokenType.DOT, 539 "-": TokenType.DASH, 540 "=": TokenType.EQ, 541 ">": TokenType.GT, 542 "<": TokenType.LT, 543 "%": TokenType.MOD, 544 "!": TokenType.NOT, 545 "|": TokenType.PIPE, 546 "+": TokenType.PLUS, 547 ";": TokenType.SEMICOLON, 548 "/": TokenType.SLASH, 549 "\\": TokenType.BACKSLASH, 550 "*": TokenType.STAR, 551 "~": TokenType.TILDA, 552 "?": TokenType.PLACEHOLDER, 553 "@": TokenType.PARAMETER, 554 # used for breaking a var like x'y' but nothing else 555 # the token type doesn't matter 556 "'": TokenType.QUOTE, 557 "`": TokenType.IDENTIFIER, 558 '"': TokenType.IDENTIFIER, 559 "#": TokenType.HASH, 560 } 561 562 BIT_STRINGS: t.List[str | t.Tuple[str, str]] = [] 563 BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 564 HEX_STRINGS: t.List[str | t.Tuple[str, str]] = [] 565 RAW_STRINGS: t.List[str | t.Tuple[str, str]] = [] 566 HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = [] 567 UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = [] 568 IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"'] 569 IDENTIFIER_ESCAPES = ['"'] 570 QUOTES: t.List[t.Tuple[str, str] | str] = ["'"] 571 STRING_ESCAPES = ["'"] 572 VAR_SINGLE_TOKENS: t.Set[str] = set() 573 574 # Autofilled 575 _COMMENTS: t.Dict[str, str] = {} 576 _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {} 577 _IDENTIFIERS: t.Dict[str, str] = {} 578 _IDENTIFIER_ESCAPES: t.Set[str] = set() 579 _QUOTES: t.Dict[str, str] = {} 580 _STRING_ESCAPES: t.Set[str] = set() 581 _KEYWORD_TRIE: t.Dict = {} 582 _RS_TOKENIZER: t.Optional[t.Any] = None 583 584 KEYWORDS: t.Dict[str, TokenType] = { 585 **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")}, 586 **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")}, 587 **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")}, 588 **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")}, 589 "/*+": TokenType.HINT, 590 "==": TokenType.EQ, 591 "::": TokenType.DCOLON, 592 "||": TokenType.DPIPE, 593 ">=": TokenType.GTE, 594 "<=": TokenType.LTE, 595 "<>": TokenType.NEQ, 596 "!=": TokenType.NEQ, 597 ":=": TokenType.COLON_EQ, 598 "<=>": TokenType.NULLSAFE_EQ, 599 "->": TokenType.ARROW, 600 "->>": TokenType.DARROW, 601 "=>": TokenType.FARROW, 602 "#>": TokenType.HASH_ARROW, 603 "#>>": TokenType.DHASH_ARROW, 604 "<->": TokenType.LR_ARROW, 605 "&&": TokenType.DAMP, 606 "??": TokenType.DQMARK, 607 "ALL": TokenType.ALL, 608 "ALWAYS": TokenType.ALWAYS, 609 "AND": TokenType.AND, 610 "ANTI": TokenType.ANTI, 611 "ANY": TokenType.ANY, 612 "ASC": TokenType.ASC, 613 "AS": TokenType.ALIAS, 614 "ASOF": TokenType.ASOF, 615 "AUTOINCREMENT": TokenType.AUTO_INCREMENT, 616 "AUTO_INCREMENT": TokenType.AUTO_INCREMENT, 617 "BEGIN": TokenType.BEGIN, 618 "BETWEEN": TokenType.BETWEEN, 619 "CACHE": TokenType.CACHE, 620 "UNCACHE": TokenType.UNCACHE, 621 "CASE": TokenType.CASE, 622 "CHARACTER SET": TokenType.CHARACTER_SET, 623 "CLUSTER BY": TokenType.CLUSTER_BY, 624 "COLLATE": TokenType.COLLATE, 625 "COLUMN": TokenType.COLUMN, 626 "COMMIT": TokenType.COMMIT, 627 "CONNECT BY": TokenType.CONNECT_BY, 628 "CONSTRAINT": TokenType.CONSTRAINT, 629 "CREATE": TokenType.CREATE, 630 "CROSS": TokenType.CROSS, 631 "CUBE": TokenType.CUBE, 632 "CURRENT_DATE": TokenType.CURRENT_DATE, 633 "CURRENT_TIME": TokenType.CURRENT_TIME, 634 "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP, 635 "CURRENT_USER": TokenType.CURRENT_USER, 636 "DATABASE": TokenType.DATABASE, 637 "DEFAULT": TokenType.DEFAULT, 638 "DELETE": TokenType.DELETE, 639 "DESC": TokenType.DESC, 640 "DESCRIBE": TokenType.DESCRIBE, 641 "DISTINCT": TokenType.DISTINCT, 642 "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY, 643 "DIV": TokenType.DIV, 644 "DROP": TokenType.DROP, 645 "ELSE": TokenType.ELSE, 646 "END": TokenType.END, 647 "ESCAPE": TokenType.ESCAPE, 648 "EXCEPT": TokenType.EXCEPT, 649 "EXECUTE": TokenType.EXECUTE, 650 "EXISTS": TokenType.EXISTS, 651 "FALSE": TokenType.FALSE, 652 "FETCH": TokenType.FETCH, 653 "FILTER": TokenType.FILTER, 654 "FIRST": TokenType.FIRST, 655 "FULL": TokenType.FULL, 656 "FUNCTION": TokenType.FUNCTION, 657 "FOR": TokenType.FOR, 658 "FOREIGN KEY": TokenType.FOREIGN_KEY, 659 "FORMAT": TokenType.FORMAT, 660 "FROM": TokenType.FROM, 661 "GEOGRAPHY": TokenType.GEOGRAPHY, 662 "GEOMETRY": TokenType.GEOMETRY, 663 "GLOB": TokenType.GLOB, 664 "GROUP BY": TokenType.GROUP_BY, 665 "GROUPING SETS": TokenType.GROUPING_SETS, 666 "HAVING": TokenType.HAVING, 667 "ILIKE": TokenType.ILIKE, 668 "IN": TokenType.IN, 669 "INDEX": TokenType.INDEX, 670 "INET": TokenType.INET, 671 "INNER": TokenType.INNER, 672 "INSERT": TokenType.INSERT, 673 "INTERVAL": TokenType.INTERVAL, 674 "INTERSECT": TokenType.INTERSECT, 675 "INTO": TokenType.INTO, 676 "IS": TokenType.IS, 677 "ISNULL": TokenType.ISNULL, 678 "JOIN": TokenType.JOIN, 679 "KEEP": TokenType.KEEP, 680 "KILL": TokenType.KILL, 681 "LATERAL": TokenType.LATERAL, 682 "LEFT": TokenType.LEFT, 683 "LIKE": TokenType.LIKE, 684 "LIMIT": TokenType.LIMIT, 685 "LOAD": TokenType.LOAD, 686 "LOCK": TokenType.LOCK, 687 "MERGE": TokenType.MERGE, 688 "NATURAL": TokenType.NATURAL, 689 "NEXT": TokenType.NEXT, 690 "NOT": TokenType.NOT, 691 "NOTNULL": TokenType.NOTNULL, 692 "NULL": TokenType.NULL, 693 "OBJECT": TokenType.OBJECT, 694 "OFFSET": TokenType.OFFSET, 695 "ON": TokenType.ON, 696 "OR": TokenType.OR, 697 "XOR": TokenType.XOR, 698 "ORDER BY": TokenType.ORDER_BY, 699 "ORDINALITY": TokenType.ORDINALITY, 700 "OUTER": TokenType.OUTER, 701 "OVER": TokenType.OVER, 702 "OVERLAPS": TokenType.OVERLAPS, 703 "OVERWRITE": TokenType.OVERWRITE, 704 "PARTITION": TokenType.PARTITION, 705 "PARTITION BY": TokenType.PARTITION_BY, 706 "PARTITIONED BY": TokenType.PARTITION_BY, 707 "PARTITIONED_BY": TokenType.PARTITION_BY, 708 "PERCENT": TokenType.PERCENT, 709 "PIVOT": TokenType.PIVOT, 710 "PRAGMA": TokenType.PRAGMA, 711 "PRIMARY KEY": TokenType.PRIMARY_KEY, 712 "PROCEDURE": TokenType.PROCEDURE, 713 "QUALIFY": TokenType.QUALIFY, 714 "RANGE": TokenType.RANGE, 715 "RECURSIVE": TokenType.RECURSIVE, 716 "REGEXP": TokenType.RLIKE, 717 "REPLACE": TokenType.REPLACE, 718 "RETURNING": TokenType.RETURNING, 719 "REFERENCES": TokenType.REFERENCES, 720 "RIGHT": TokenType.RIGHT, 721 "RLIKE": TokenType.RLIKE, 722 "ROLLBACK": TokenType.ROLLBACK, 723 "ROLLUP": TokenType.ROLLUP, 724 "ROW": TokenType.ROW, 725 "ROWS": TokenType.ROWS, 726 "SCHEMA": TokenType.SCHEMA, 727 "SELECT": TokenType.SELECT, 728 "SEMI": TokenType.SEMI, 729 "SET": TokenType.SET, 730 "SETTINGS": TokenType.SETTINGS, 731 "SHOW": TokenType.SHOW, 732 "SIMILAR TO": TokenType.SIMILAR_TO, 733 "SOME": TokenType.SOME, 734 "SORT BY": TokenType.SORT_BY, 735 "START WITH": TokenType.START_WITH, 736 "TABLE": TokenType.TABLE, 737 "TABLESAMPLE": TokenType.TABLE_SAMPLE, 738 "TEMP": TokenType.TEMPORARY, 739 "TEMPORARY": TokenType.TEMPORARY, 740 "THEN": TokenType.THEN, 741 "TRUE": TokenType.TRUE, 742 "UNION": TokenType.UNION, 743 "UNKNOWN": TokenType.UNKNOWN, 744 "UNNEST": TokenType.UNNEST, 745 "UNPIVOT": TokenType.UNPIVOT, 746 "UPDATE": TokenType.UPDATE, 747 "USE": TokenType.USE, 748 "USING": TokenType.USING, 749 "UUID": TokenType.UUID, 750 "VALUES": TokenType.VALUES, 751 "VIEW": TokenType.VIEW, 752 "VOLATILE": TokenType.VOLATILE, 753 "WHEN": TokenType.WHEN, 754 "WHERE": TokenType.WHERE, 755 "WINDOW": TokenType.WINDOW, 756 "WITH": TokenType.WITH, 757 "APPLY": TokenType.APPLY, 758 "ARRAY": TokenType.ARRAY, 759 "BIT": TokenType.BIT, 760 "BOOL": TokenType.BOOLEAN, 761 "BOOLEAN": TokenType.BOOLEAN, 762 "BYTE": TokenType.TINYINT, 763 "MEDIUMINT": TokenType.MEDIUMINT, 764 "INT1": TokenType.TINYINT, 765 "TINYINT": TokenType.TINYINT, 766 "INT16": TokenType.SMALLINT, 767 "SHORT": TokenType.SMALLINT, 768 "SMALLINT": TokenType.SMALLINT, 769 "INT128": TokenType.INT128, 770 "HUGEINT": TokenType.INT128, 771 "INT2": TokenType.SMALLINT, 772 "INTEGER": TokenType.INT, 773 "INT": TokenType.INT, 774 "INT4": TokenType.INT, 775 "INT32": TokenType.INT, 776 "INT64": TokenType.BIGINT, 777 "LONG": TokenType.BIGINT, 778 "BIGINT": TokenType.BIGINT, 779 "INT8": TokenType.TINYINT, 780 "DEC": TokenType.DECIMAL, 781 "DECIMAL": TokenType.DECIMAL, 782 "BIGDECIMAL": TokenType.BIGDECIMAL, 783 "BIGNUMERIC": TokenType.BIGDECIMAL, 784 "MAP": TokenType.MAP, 785 "NULLABLE": TokenType.NULLABLE, 786 "NUMBER": TokenType.DECIMAL, 787 "NUMERIC": TokenType.DECIMAL, 788 "FIXED": TokenType.DECIMAL, 789 "REAL": TokenType.FLOAT, 790 "FLOAT": TokenType.FLOAT, 791 "FLOAT4": TokenType.FLOAT, 792 "FLOAT8": TokenType.DOUBLE, 793 "DOUBLE": TokenType.DOUBLE, 794 "DOUBLE PRECISION": TokenType.DOUBLE, 795 "JSON": TokenType.JSON, 796 "CHAR": TokenType.CHAR, 797 "CHARACTER": TokenType.CHAR, 798 "NCHAR": TokenType.NCHAR, 799 "VARCHAR": TokenType.VARCHAR, 800 "VARCHAR2": TokenType.VARCHAR, 801 "NVARCHAR": TokenType.NVARCHAR, 802 "NVARCHAR2": TokenType.NVARCHAR, 803 "STR": TokenType.TEXT, 804 "STRING": TokenType.TEXT, 805 "TEXT": TokenType.TEXT, 806 "LONGTEXT": TokenType.LONGTEXT, 807 "MEDIUMTEXT": TokenType.MEDIUMTEXT, 808 "TINYTEXT": TokenType.TINYTEXT, 809 "CLOB": TokenType.TEXT, 810 "LONGVARCHAR": TokenType.TEXT, 811 "BINARY": TokenType.BINARY, 812 "BLOB": TokenType.VARBINARY, 813 "LONGBLOB": TokenType.LONGBLOB, 814 "MEDIUMBLOB": TokenType.MEDIUMBLOB, 815 "TINYBLOB": TokenType.TINYBLOB, 816 "BYTEA": TokenType.VARBINARY, 817 "VARBINARY": TokenType.VARBINARY, 818 "TIME": TokenType.TIME, 819 "TIMETZ": TokenType.TIMETZ, 820 "TIMESTAMP": TokenType.TIMESTAMP, 821 "TIMESTAMPTZ": TokenType.TIMESTAMPTZ, 822 "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ, 823 "DATE": TokenType.DATE, 824 "DATETIME": TokenType.DATETIME, 825 "INT4RANGE": TokenType.INT4RANGE, 826 "INT4MULTIRANGE": TokenType.INT4MULTIRANGE, 827 "INT8RANGE": TokenType.INT8RANGE, 828 "INT8MULTIRANGE": TokenType.INT8MULTIRANGE, 829 "NUMRANGE": TokenType.NUMRANGE, 830 "NUMMULTIRANGE": TokenType.NUMMULTIRANGE, 831 "TSRANGE": TokenType.TSRANGE, 832 "TSMULTIRANGE": TokenType.TSMULTIRANGE, 833 "TSTZRANGE": TokenType.TSTZRANGE, 834 "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE, 835 "DATERANGE": TokenType.DATERANGE, 836 "DATEMULTIRANGE": TokenType.DATEMULTIRANGE, 837 "UNIQUE": TokenType.UNIQUE, 838 "STRUCT": TokenType.STRUCT, 839 "VARIANT": TokenType.VARIANT, 840 "ALTER": TokenType.ALTER, 841 "ANALYZE": TokenType.COMMAND, 842 "CALL": TokenType.COMMAND, 843 "COMMENT": TokenType.COMMENT, 844 "COPY": TokenType.COMMAND, 845 "EXPLAIN": TokenType.COMMAND, 846 "GRANT": TokenType.COMMAND, 847 "OPTIMIZE": TokenType.COMMAND, 848 "PREPARE": TokenType.COMMAND, 849 "TRUNCATE": TokenType.COMMAND, 850 "VACUUM": TokenType.COMMAND, 851 "USER-DEFINED": TokenType.USERDEFINED, 852 "FOR VERSION": TokenType.VERSION_SNAPSHOT, 853 "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT, 854 } 855 856 WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = { 857 " ": TokenType.SPACE, 858 "\t": TokenType.SPACE, 859 "\n": TokenType.BREAK, 860 "\r": TokenType.BREAK, 861 } 862 863 COMMANDS = { 864 TokenType.COMMAND, 865 TokenType.EXECUTE, 866 TokenType.FETCH, 867 TokenType.SHOW, 868 } 869 870 COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN} 871 872 # handle numeric literals like in hive (3L = BIGINT) 873 NUMERIC_LITERALS: t.Dict[str, str] = {} 874 875 COMMENTS = ["--", ("/*", "*/")] 876 877 __slots__ = ( 878 "sql", 879 "size", 880 "tokens", 881 "dialect", 882 "_start", 883 "_current", 884 "_line", 885 "_col", 886 "_comments", 887 "_char", 888 "_end", 889 "_peek", 890 "_prev_token_line", 891 "_rs_dialect_settings", 892 ) 893 894 def __init__(self, dialect: DialectType = None) -> None: 895 from sqlglot.dialects import Dialect 896 897 self.dialect = Dialect.get_or_raise(dialect) 898 899 if USE_RS_TOKENIZER: 900 self._rs_dialect_settings = RsTokenizerDialectSettings( 901 escape_sequences=self.dialect.ESCAPE_SEQUENCES, 902 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 903 ) 904 905 self.reset() 906 907 def reset(self) -> None: 908 self.sql = "" 909 self.size = 0 910 self.tokens: t.List[Token] = [] 911 self._start = 0 912 self._current = 0 913 self._line = 1 914 self._col = 0 915 self._comments: t.List[str] = [] 916 917 self._char = "" 918 self._end = False 919 self._peek = "" 920 self._prev_token_line = -1 921 922 def tokenize(self, sql: str) -> t.List[Token]: 923 """Returns a list of tokens corresponding to the SQL string `sql`.""" 924 if USE_RS_TOKENIZER: 925 return self.tokenize_rs(sql) 926 927 self.reset() 928 self.sql = sql 929 self.size = len(sql) 930 931 try: 932 self._scan() 933 except Exception as e: 934 start = max(self._current - 50, 0) 935 end = min(self._current + 50, self.size - 1) 936 context = self.sql[start:end] 937 raise TokenError(f"Error tokenizing '{context}'") from e 938 939 return self.tokens 940 941 def _scan(self, until: t.Optional[t.Callable] = None) -> None: 942 while self.size and not self._end: 943 current = self._current 944 945 # skip spaces inline rather than iteratively call advance() 946 # for performance reasons 947 while current < self.size: 948 char = self.sql[current] 949 950 if char.isspace() and (char == " " or char == "\t"): 951 current += 1 952 else: 953 break 954 955 n = current - self._current 956 self._start = current 957 self._advance(n if n > 1 else 1) 958 959 if self._char is None: 960 break 961 962 if not self._char.isspace(): 963 if self._char.isdigit(): 964 self._scan_number() 965 elif self._char in self._IDENTIFIERS: 966 self._scan_identifier(self._IDENTIFIERS[self._char]) 967 else: 968 self._scan_keywords() 969 970 if until and until(): 971 break 972 973 if self.tokens and self._comments: 974 self.tokens[-1].comments.extend(self._comments) 975 976 def _chars(self, size: int) -> str: 977 if size == 1: 978 return self._char 979 980 start = self._current - 1 981 end = start + size 982 983 return self.sql[start:end] if end <= self.size else "" 984 985 def _advance(self, i: int = 1, alnum: bool = False) -> None: 986 if self.WHITE_SPACE.get(self._char) is TokenType.BREAK: 987 # Ensures we don't count an extra line if we get a \r\n line break sequence 988 if self._char == "\r" and self._peek == "\n": 989 i = 2 990 self._start += 1 991 992 self._col = 1 993 self._line += 1 994 else: 995 self._col += i 996 997 self._current += i 998 self._end = self._current >= self.size 999 self._char = self.sql[self._current - 1] 1000 self._peek = "" if self._end else self.sql[self._current] 1001 1002 if alnum and self._char.isalnum(): 1003 # Here we use local variables instead of attributes for better performance 1004 _col = self._col 1005 _current = self._current 1006 _end = self._end 1007 _peek = self._peek 1008 1009 while _peek.isalnum(): 1010 _col += 1 1011 _current += 1 1012 _end = _current >= self.size 1013 _peek = "" if _end else self.sql[_current] 1014 1015 self._col = _col 1016 self._current = _current 1017 self._end = _end 1018 self._peek = _peek 1019 self._char = self.sql[_current - 1] 1020 1021 @property 1022 def _text(self) -> str: 1023 return self.sql[self._start : self._current] 1024 1025 def peek(self, i: int = 0) -> str: 1026 i = self._current + i 1027 if i < self.size: 1028 return self.sql[i] 1029 return "" 1030 1031 def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None: 1032 self._prev_token_line = self._line 1033 1034 if self._comments and token_type == TokenType.SEMICOLON and self.tokens: 1035 self.tokens[-1].comments.extend(self._comments) 1036 self._comments = [] 1037 1038 self.tokens.append( 1039 Token( 1040 token_type, 1041 text=self._text if text is None else text, 1042 line=self._line, 1043 col=self._col, 1044 start=self._start, 1045 end=self._current - 1, 1046 comments=self._comments, 1047 ) 1048 ) 1049 self._comments = [] 1050 1051 # If we have either a semicolon or a begin token before the command's token, we'll parse 1052 # whatever follows the command's token as a string 1053 if ( 1054 token_type in self.COMMANDS 1055 and self._peek != ";" 1056 and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS) 1057 ): 1058 start = self._current 1059 tokens = len(self.tokens) 1060 self._scan(lambda: self._peek == ";") 1061 self.tokens = self.tokens[:tokens] 1062 text = self.sql[start : self._current].strip() 1063 if text: 1064 self._add(TokenType.STRING, text) 1065 1066 def _scan_keywords(self) -> None: 1067 size = 0 1068 word = None 1069 chars = self._text 1070 char = chars 1071 prev_space = False 1072 skip = False 1073 trie = self._KEYWORD_TRIE 1074 single_token = char in self.SINGLE_TOKENS 1075 1076 while chars: 1077 if skip: 1078 result = TrieResult.PREFIX 1079 else: 1080 result, trie = in_trie(trie, char.upper()) 1081 1082 if result == TrieResult.FAILED: 1083 break 1084 if result == TrieResult.EXISTS: 1085 word = chars 1086 1087 end = self._current + size 1088 size += 1 1089 1090 if end < self.size: 1091 char = self.sql[end] 1092 single_token = single_token or char in self.SINGLE_TOKENS 1093 is_space = char.isspace() 1094 1095 if not is_space or not prev_space: 1096 if is_space: 1097 char = " " 1098 chars += char 1099 prev_space = is_space 1100 skip = False 1101 else: 1102 skip = True 1103 else: 1104 char = "" 1105 break 1106 1107 if word: 1108 if self._scan_string(word): 1109 return 1110 if self._scan_comment(word): 1111 return 1112 if prev_space or single_token or not char: 1113 self._advance(size - 1) 1114 word = word.upper() 1115 self._add(self.KEYWORDS[word], text=word) 1116 return 1117 1118 if self._char in self.SINGLE_TOKENS: 1119 self._add(self.SINGLE_TOKENS[self._char], text=self._char) 1120 return 1121 1122 self._scan_var() 1123 1124 def _scan_comment(self, comment_start: str) -> bool: 1125 if comment_start not in self._COMMENTS: 1126 return False 1127 1128 comment_start_line = self._line 1129 comment_start_size = len(comment_start) 1130 comment_end = self._COMMENTS[comment_start] 1131 1132 if comment_end: 1133 # Skip the comment's start delimiter 1134 self._advance(comment_start_size) 1135 1136 comment_end_size = len(comment_end) 1137 while not self._end and self._chars(comment_end_size) != comment_end: 1138 self._advance(alnum=True) 1139 1140 self._comments.append(self._text[comment_start_size : -comment_end_size + 1]) 1141 self._advance(comment_end_size - 1) 1142 else: 1143 while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK: 1144 self._advance(alnum=True) 1145 self._comments.append(self._text[comment_start_size:]) 1146 1147 # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding. 1148 # Multiple consecutive comments are preserved by appending them to the current comments list. 1149 if comment_start_line == self._prev_token_line: 1150 self.tokens[-1].comments.extend(self._comments) 1151 self._comments = [] 1152 self._prev_token_line = self._line 1153 1154 return True 1155 1156 def _scan_number(self) -> None: 1157 if self._char == "0": 1158 peek = self._peek.upper() 1159 if peek == "B": 1160 return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER) 1161 elif peek == "X": 1162 return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER) 1163 1164 decimal = False 1165 scientific = 0 1166 1167 while True: 1168 if self._peek.isdigit(): 1169 self._advance() 1170 elif self._peek == "." and not decimal: 1171 after = self.peek(1) 1172 if after.isdigit() or not after.isalpha(): 1173 decimal = True 1174 self._advance() 1175 else: 1176 return self._add(TokenType.VAR) 1177 elif self._peek in ("-", "+") and scientific == 1: 1178 scientific += 1 1179 self._advance() 1180 elif self._peek.upper() == "E" and not scientific: 1181 scientific += 1 1182 self._advance() 1183 elif self._peek.isidentifier(): 1184 number_text = self._text 1185 literal = "" 1186 1187 while self._peek.strip() and self._peek not in self.SINGLE_TOKENS: 1188 literal += self._peek 1189 self._advance() 1190 1191 token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), "")) 1192 1193 if token_type: 1194 self._add(TokenType.NUMBER, number_text) 1195 self._add(TokenType.DCOLON, "::") 1196 return self._add(token_type, literal) 1197 elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT: 1198 return self._add(TokenType.VAR) 1199 1200 self._advance(-len(literal)) 1201 return self._add(TokenType.NUMBER, number_text) 1202 else: 1203 return self._add(TokenType.NUMBER) 1204 1205 def _scan_bits(self) -> None: 1206 self._advance() 1207 value = self._extract_value() 1208 try: 1209 # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier 1210 int(value, 2) 1211 self._add(TokenType.BIT_STRING, value[2:]) # Drop the 0b 1212 except ValueError: 1213 self._add(TokenType.IDENTIFIER) 1214 1215 def _scan_hex(self) -> None: 1216 self._advance() 1217 value = self._extract_value() 1218 try: 1219 # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier 1220 int(value, 16) 1221 self._add(TokenType.HEX_STRING, value[2:]) # Drop the 0x 1222 except ValueError: 1223 self._add(TokenType.IDENTIFIER) 1224 1225 def _extract_value(self) -> str: 1226 while True: 1227 char = self._peek.strip() 1228 if char and char not in self.SINGLE_TOKENS: 1229 self._advance(alnum=True) 1230 else: 1231 break 1232 1233 return self._text 1234 1235 def _scan_string(self, start: str) -> bool: 1236 base = None 1237 token_type = TokenType.STRING 1238 1239 if start in self._QUOTES: 1240 end = self._QUOTES[start] 1241 elif start in self._FORMAT_STRINGS: 1242 end, token_type = self._FORMAT_STRINGS[start] 1243 1244 if token_type == TokenType.HEX_STRING: 1245 base = 16 1246 elif token_type == TokenType.BIT_STRING: 1247 base = 2 1248 elif token_type == TokenType.HEREDOC_STRING: 1249 self._advance() 1250 tag = "" if self._char == end else self._extract_string(end) 1251 end = f"{start}{tag}{end}" 1252 else: 1253 return False 1254 1255 self._advance(len(start)) 1256 text = self._extract_string(end) 1257 1258 if base: 1259 try: 1260 int(text, base) 1261 except: 1262 raise TokenError( 1263 f"Numeric string contains invalid characters from {self._line}:{self._start}" 1264 ) 1265 1266 self._add(token_type, text) 1267 return True 1268 1269 def _scan_identifier(self, identifier_end: str) -> None: 1270 self._advance() 1271 text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES) 1272 self._add(TokenType.IDENTIFIER, text) 1273 1274 def _scan_var(self) -> None: 1275 while True: 1276 char = self._peek.strip() 1277 if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS): 1278 self._advance(alnum=True) 1279 else: 1280 break 1281 1282 self._add( 1283 TokenType.VAR 1284 if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER 1285 else self.KEYWORDS.get(self._text.upper(), TokenType.VAR) 1286 ) 1287 1288 def _extract_string(self, delimiter: str, escapes=None) -> str: 1289 text = "" 1290 delim_size = len(delimiter) 1291 escapes = self._STRING_ESCAPES if escapes is None else escapes 1292 1293 while True: 1294 if ( 1295 self._char in escapes 1296 and (self._peek == delimiter or self._peek in escapes) 1297 and (self._char not in self._QUOTES or self._char == self._peek) 1298 ): 1299 if self._peek == delimiter: 1300 text += self._peek 1301 else: 1302 text += self._char + self._peek 1303 1304 if self._current + 1 < self.size: 1305 self._advance(2) 1306 else: 1307 raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}") 1308 else: 1309 if self._chars(delim_size) == delimiter: 1310 if delim_size > 1: 1311 self._advance(delim_size - 1) 1312 break 1313 1314 if self._end: 1315 raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}") 1316 1317 if ( 1318 self.dialect.ESCAPE_SEQUENCES 1319 and self._peek 1320 and self._char in self.STRING_ESCAPES 1321 ): 1322 escaped_sequence = self.dialect.ESCAPE_SEQUENCES.get(self._char + self._peek) 1323 if escaped_sequence: 1324 self._advance(2) 1325 text += escaped_sequence 1326 continue 1327 1328 current = self._current - 1 1329 self._advance(alnum=True) 1330 text += self.sql[current : self._current - 1] 1331 1332 return text 1333 1334 def tokenize_rs(self, sql: str) -> t.List[Token]: 1335 if not self._RS_TOKENIZER: 1336 raise SqlglotError("Rust tokenizer is not available") 1337 1338 try: 1339 tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1340 for token in tokens: 1341 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1342 return tokens 1343 except Exception as e: 1344 raise TokenError(str(e))
Tokenizer( dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None)
894 def __init__(self, dialect: DialectType = None) -> None: 895 from sqlglot.dialects import Dialect 896 897 self.dialect = Dialect.get_or_raise(dialect) 898 899 if USE_RS_TOKENIZER: 900 self._rs_dialect_settings = RsTokenizerDialectSettings( 901 escape_sequences=self.dialect.ESCAPE_SEQUENCES, 902 identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT, 903 ) 904 905 self.reset()
SINGLE_TOKENS =
{'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>}
KEYWORDS: Dict[str, TokenType] =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
WHITE_SPACE: Dict[Optional[str], TokenType] =
{' ': <TokenType.SPACE: 'SPACE'>, '\t': <TokenType.SPACE: 'SPACE'>, '\n': <TokenType.BREAK: 'BREAK'>, '\r': <TokenType.BREAK: 'BREAK'>}
COMMANDS =
{<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>, <TokenType.SHOW: 'SHOW'>}
COMMAND_PREFIX_TOKENS =
{<TokenType.SEMICOLON: 'SEMICOLON'>, <TokenType.BEGIN: 'BEGIN'>}
def
reset(self) -> None:
907 def reset(self) -> None: 908 self.sql = "" 909 self.size = 0 910 self.tokens: t.List[Token] = [] 911 self._start = 0 912 self._current = 0 913 self._line = 1 914 self._col = 0 915 self._comments: t.List[str] = [] 916 917 self._char = "" 918 self._end = False 919 self._peek = "" 920 self._prev_token_line = -1
922 def tokenize(self, sql: str) -> t.List[Token]: 923 """Returns a list of tokens corresponding to the SQL string `sql`.""" 924 if USE_RS_TOKENIZER: 925 return self.tokenize_rs(sql) 926 927 self.reset() 928 self.sql = sql 929 self.size = len(sql) 930 931 try: 932 self._scan() 933 except Exception as e: 934 start = max(self._current - 50, 0) 935 end = min(self._current + 50, self.size - 1) 936 context = self.sql[start:end] 937 raise TokenError(f"Error tokenizing '{context}'") from e 938 939 return self.tokens
Returns a list of tokens corresponding to the SQL string sql
.
1334 def tokenize_rs(self, sql: str) -> t.List[Token]: 1335 if not self._RS_TOKENIZER: 1336 raise SqlglotError("Rust tokenizer is not available") 1337 1338 try: 1339 tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings) 1340 for token in tokens: 1341 token.token_type = _ALL_TOKEN_TYPES[token.token_type_index] 1342 return tokens 1343 except Exception as e: 1344 raise TokenError(str(e))