Edit on GitHub

sqlglot.tokens

   1from __future__ import annotations
   2
   3import os
   4import typing as t
   5from enum import auto
   6
   7from sqlglot.errors import SqlglotError, TokenError
   8from sqlglot.helper import AutoName
   9from sqlglot.trie import TrieResult, in_trie, new_trie
  10
  11if t.TYPE_CHECKING:
  12    from sqlglot.dialects.dialect import DialectType
  13
  14
  15try:
  16    from sqlglotrs import (  # type: ignore
  17        Tokenizer as RsTokenizer,
  18        TokenizerDialectSettings as RsTokenizerDialectSettings,
  19        TokenizerSettings as RsTokenizerSettings,
  20        TokenTypeSettings as RsTokenTypeSettings,
  21    )
  22
  23    USE_RS_TOKENIZER = os.environ.get("SQLGLOTRS_TOKENIZER", "1") == "1"
  24except ImportError:
  25    USE_RS_TOKENIZER = False
  26
  27
  28class TokenType(AutoName):
  29    L_PAREN = auto()
  30    R_PAREN = auto()
  31    L_BRACKET = auto()
  32    R_BRACKET = auto()
  33    L_BRACE = auto()
  34    R_BRACE = auto()
  35    COMMA = auto()
  36    DOT = auto()
  37    DASH = auto()
  38    PLUS = auto()
  39    COLON = auto()
  40    DCOLON = auto()
  41    DQMARK = auto()
  42    SEMICOLON = auto()
  43    STAR = auto()
  44    BACKSLASH = auto()
  45    SLASH = auto()
  46    LT = auto()
  47    LTE = auto()
  48    GT = auto()
  49    GTE = auto()
  50    NOT = auto()
  51    EQ = auto()
  52    NEQ = auto()
  53    NULLSAFE_EQ = auto()
  54    COLON_EQ = auto()
  55    AND = auto()
  56    OR = auto()
  57    AMP = auto()
  58    DPIPE = auto()
  59    PIPE = auto()
  60    CARET = auto()
  61    TILDA = auto()
  62    ARROW = auto()
  63    DARROW = auto()
  64    FARROW = auto()
  65    HASH = auto()
  66    HASH_ARROW = auto()
  67    DHASH_ARROW = auto()
  68    LR_ARROW = auto()
  69    DAT = auto()
  70    LT_AT = auto()
  71    AT_GT = auto()
  72    DOLLAR = auto()
  73    PARAMETER = auto()
  74    SESSION_PARAMETER = auto()
  75    DAMP = auto()
  76    XOR = auto()
  77    DSTAR = auto()
  78
  79    BLOCK_START = auto()
  80    BLOCK_END = auto()
  81
  82    SPACE = auto()
  83    BREAK = auto()
  84
  85    STRING = auto()
  86    NUMBER = auto()
  87    IDENTIFIER = auto()
  88    DATABASE = auto()
  89    COLUMN = auto()
  90    COLUMN_DEF = auto()
  91    SCHEMA = auto()
  92    TABLE = auto()
  93    VAR = auto()
  94    BIT_STRING = auto()
  95    HEX_STRING = auto()
  96    BYTE_STRING = auto()
  97    NATIONAL_STRING = auto()
  98    RAW_STRING = auto()
  99    HEREDOC_STRING = auto()
 100    UNICODE_STRING = auto()
 101
 102    # types
 103    BIT = auto()
 104    BOOLEAN = auto()
 105    TINYINT = auto()
 106    UTINYINT = auto()
 107    SMALLINT = auto()
 108    USMALLINT = auto()
 109    MEDIUMINT = auto()
 110    UMEDIUMINT = auto()
 111    INT = auto()
 112    UINT = auto()
 113    BIGINT = auto()
 114    UBIGINT = auto()
 115    INT128 = auto()
 116    UINT128 = auto()
 117    INT256 = auto()
 118    UINT256 = auto()
 119    FLOAT = auto()
 120    DOUBLE = auto()
 121    DECIMAL = auto()
 122    UDECIMAL = auto()
 123    BIGDECIMAL = auto()
 124    CHAR = auto()
 125    NCHAR = auto()
 126    VARCHAR = auto()
 127    NVARCHAR = auto()
 128    TEXT = auto()
 129    MEDIUMTEXT = auto()
 130    LONGTEXT = auto()
 131    MEDIUMBLOB = auto()
 132    LONGBLOB = auto()
 133    TINYBLOB = auto()
 134    TINYTEXT = auto()
 135    BINARY = auto()
 136    VARBINARY = auto()
 137    JSON = auto()
 138    JSONB = auto()
 139    TIME = auto()
 140    TIMETZ = auto()
 141    TIMESTAMP = auto()
 142    TIMESTAMPTZ = auto()
 143    TIMESTAMPLTZ = auto()
 144    TIMESTAMP_S = auto()
 145    TIMESTAMP_MS = auto()
 146    TIMESTAMP_NS = auto()
 147    DATETIME = auto()
 148    DATETIME64 = auto()
 149    DATE = auto()
 150    INT4RANGE = auto()
 151    INT4MULTIRANGE = auto()
 152    INT8RANGE = auto()
 153    INT8MULTIRANGE = auto()
 154    NUMRANGE = auto()
 155    NUMMULTIRANGE = auto()
 156    TSRANGE = auto()
 157    TSMULTIRANGE = auto()
 158    TSTZRANGE = auto()
 159    TSTZMULTIRANGE = auto()
 160    DATERANGE = auto()
 161    DATEMULTIRANGE = auto()
 162    UUID = auto()
 163    GEOGRAPHY = auto()
 164    NULLABLE = auto()
 165    GEOMETRY = auto()
 166    HLLSKETCH = auto()
 167    HSTORE = auto()
 168    SUPER = auto()
 169    SERIAL = auto()
 170    SMALLSERIAL = auto()
 171    BIGSERIAL = auto()
 172    XML = auto()
 173    YEAR = auto()
 174    UNIQUEIDENTIFIER = auto()
 175    USERDEFINED = auto()
 176    MONEY = auto()
 177    SMALLMONEY = auto()
 178    ROWVERSION = auto()
 179    IMAGE = auto()
 180    VARIANT = auto()
 181    OBJECT = auto()
 182    INET = auto()
 183    IPADDRESS = auto()
 184    IPPREFIX = auto()
 185    ENUM = auto()
 186    ENUM8 = auto()
 187    ENUM16 = auto()
 188    FIXEDSTRING = auto()
 189    LOWCARDINALITY = auto()
 190    NESTED = auto()
 191    UNKNOWN = auto()
 192
 193    # keywords
 194    ALIAS = auto()
 195    ALTER = auto()
 196    ALWAYS = auto()
 197    ALL = auto()
 198    ANTI = auto()
 199    ANY = auto()
 200    APPLY = auto()
 201    ARRAY = auto()
 202    ASC = auto()
 203    ASOF = auto()
 204    AUTO_INCREMENT = auto()
 205    BEGIN = auto()
 206    BETWEEN = auto()
 207    CACHE = auto()
 208    CASE = auto()
 209    CHARACTER_SET = auto()
 210    CLUSTER_BY = auto()
 211    COLLATE = auto()
 212    COMMAND = auto()
 213    COMMENT = auto()
 214    COMMIT = auto()
 215    CONNECT_BY = auto()
 216    CONSTRAINT = auto()
 217    CREATE = auto()
 218    CROSS = auto()
 219    CUBE = auto()
 220    CURRENT_DATE = auto()
 221    CURRENT_DATETIME = auto()
 222    CURRENT_TIME = auto()
 223    CURRENT_TIMESTAMP = auto()
 224    CURRENT_USER = auto()
 225    DEFAULT = auto()
 226    DELETE = auto()
 227    DESC = auto()
 228    DESCRIBE = auto()
 229    DICTIONARY = auto()
 230    DISTINCT = auto()
 231    DISTRIBUTE_BY = auto()
 232    DIV = auto()
 233    DROP = auto()
 234    ELSE = auto()
 235    END = auto()
 236    ESCAPE = auto()
 237    EXCEPT = auto()
 238    EXECUTE = auto()
 239    EXISTS = auto()
 240    FALSE = auto()
 241    FETCH = auto()
 242    FILTER = auto()
 243    FINAL = auto()
 244    FIRST = auto()
 245    FOR = auto()
 246    FORCE = auto()
 247    FOREIGN_KEY = auto()
 248    FORMAT = auto()
 249    FROM = auto()
 250    FULL = auto()
 251    FUNCTION = auto()
 252    GLOB = auto()
 253    GLOBAL = auto()
 254    GROUP_BY = auto()
 255    GROUPING_SETS = auto()
 256    HAVING = auto()
 257    HINT = auto()
 258    IGNORE = auto()
 259    ILIKE = auto()
 260    ILIKE_ANY = auto()
 261    IN = auto()
 262    INDEX = auto()
 263    INNER = auto()
 264    INSERT = auto()
 265    INTERSECT = auto()
 266    INTERVAL = auto()
 267    INTO = auto()
 268    INTRODUCER = auto()
 269    IRLIKE = auto()
 270    IS = auto()
 271    ISNULL = auto()
 272    JOIN = auto()
 273    JOIN_MARKER = auto()
 274    KEEP = auto()
 275    KILL = auto()
 276    LANGUAGE = auto()
 277    LATERAL = auto()
 278    LEFT = auto()
 279    LIKE = auto()
 280    LIKE_ANY = auto()
 281    LIMIT = auto()
 282    LOAD = auto()
 283    LOCK = auto()
 284    MAP = auto()
 285    MATCH_RECOGNIZE = auto()
 286    MEMBER_OF = auto()
 287    MERGE = auto()
 288    MOD = auto()
 289    MODEL = auto()
 290    NATURAL = auto()
 291    NEXT = auto()
 292    NOTNULL = auto()
 293    NULL = auto()
 294    OBJECT_IDENTIFIER = auto()
 295    OFFSET = auto()
 296    ON = auto()
 297    OPERATOR = auto()
 298    ORDER_BY = auto()
 299    ORDERED = auto()
 300    ORDINALITY = auto()
 301    OUTER = auto()
 302    OVER = auto()
 303    OVERLAPS = auto()
 304    OVERWRITE = auto()
 305    PARTITION = auto()
 306    PARTITION_BY = auto()
 307    PERCENT = auto()
 308    PIVOT = auto()
 309    PLACEHOLDER = auto()
 310    PRAGMA = auto()
 311    PRIMARY_KEY = auto()
 312    PROCEDURE = auto()
 313    PROPERTIES = auto()
 314    PSEUDO_TYPE = auto()
 315    QUALIFY = auto()
 316    QUOTE = auto()
 317    RANGE = auto()
 318    RECURSIVE = auto()
 319    REFRESH = auto()
 320    REPLACE = auto()
 321    RETURNING = auto()
 322    REFERENCES = auto()
 323    RIGHT = auto()
 324    RLIKE = auto()
 325    ROLLBACK = auto()
 326    ROLLUP = auto()
 327    ROW = auto()
 328    ROWS = auto()
 329    SELECT = auto()
 330    SEMI = auto()
 331    SEPARATOR = auto()
 332    SERDE_PROPERTIES = auto()
 333    SET = auto()
 334    SETTINGS = auto()
 335    SHOW = auto()
 336    SIMILAR_TO = auto()
 337    SOME = auto()
 338    SORT_BY = auto()
 339    START_WITH = auto()
 340    STRUCT = auto()
 341    TABLE_SAMPLE = auto()
 342    TEMPORARY = auto()
 343    TOP = auto()
 344    THEN = auto()
 345    TRUE = auto()
 346    UNCACHE = auto()
 347    UNION = auto()
 348    UNNEST = auto()
 349    UNPIVOT = auto()
 350    UPDATE = auto()
 351    USE = auto()
 352    USING = auto()
 353    VALUES = auto()
 354    VIEW = auto()
 355    VOLATILE = auto()
 356    WHEN = auto()
 357    WHERE = auto()
 358    WINDOW = auto()
 359    WITH = auto()
 360    UNIQUE = auto()
 361    VERSION_SNAPSHOT = auto()
 362    TIMESTAMP_SNAPSHOT = auto()
 363
 364
 365_ALL_TOKEN_TYPES = list(TokenType)
 366_TOKEN_TYPE_TO_INDEX = {token_type: i for i, token_type in enumerate(_ALL_TOKEN_TYPES)}
 367
 368
 369class Token:
 370    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
 371
 372    @classmethod
 373    def number(cls, number: int) -> Token:
 374        """Returns a NUMBER token with `number` as its text."""
 375        return cls(TokenType.NUMBER, str(number))
 376
 377    @classmethod
 378    def string(cls, string: str) -> Token:
 379        """Returns a STRING token with `string` as its text."""
 380        return cls(TokenType.STRING, string)
 381
 382    @classmethod
 383    def identifier(cls, identifier: str) -> Token:
 384        """Returns an IDENTIFIER token with `identifier` as its text."""
 385        return cls(TokenType.IDENTIFIER, identifier)
 386
 387    @classmethod
 388    def var(cls, var: str) -> Token:
 389        """Returns an VAR token with `var` as its text."""
 390        return cls(TokenType.VAR, var)
 391
 392    def __init__(
 393        self,
 394        token_type: TokenType,
 395        text: str,
 396        line: int = 1,
 397        col: int = 1,
 398        start: int = 0,
 399        end: int = 0,
 400        comments: t.Optional[t.List[str]] = None,
 401    ) -> None:
 402        """Token initializer.
 403
 404        Args:
 405            token_type: The TokenType Enum.
 406            text: The text of the token.
 407            line: The line that the token ends on.
 408            col: The column that the token ends on.
 409            start: The start index of the token.
 410            end: The ending index of the token.
 411            comments: The comments to attach to the token.
 412        """
 413        self.token_type = token_type
 414        self.text = text
 415        self.line = line
 416        self.col = col
 417        self.start = start
 418        self.end = end
 419        self.comments = [] if comments is None else comments
 420
 421    def __repr__(self) -> str:
 422        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
 423        return f"<Token {attributes}>"
 424
 425
 426class _Tokenizer(type):
 427    def __new__(cls, clsname, bases, attrs):
 428        klass = super().__new__(cls, clsname, bases, attrs)
 429
 430        def _convert_quotes(arr: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
 431            return dict(
 432                (item, item) if isinstance(item, str) else (item[0], item[1]) for item in arr
 433            )
 434
 435        def _quotes_to_format(
 436            token_type: TokenType, arr: t.List[str | t.Tuple[str, str]]
 437        ) -> t.Dict[str, t.Tuple[str, TokenType]]:
 438            return {k: (v, token_type) for k, v in _convert_quotes(arr).items()}
 439
 440        klass._QUOTES = _convert_quotes(klass.QUOTES)
 441        klass._IDENTIFIERS = _convert_quotes(klass.IDENTIFIERS)
 442
 443        klass._FORMAT_STRINGS = {
 444            **{
 445                p + s: (e, TokenType.NATIONAL_STRING)
 446                for s, e in klass._QUOTES.items()
 447                for p in ("n", "N")
 448            },
 449            **_quotes_to_format(TokenType.BIT_STRING, klass.BIT_STRINGS),
 450            **_quotes_to_format(TokenType.BYTE_STRING, klass.BYTE_STRINGS),
 451            **_quotes_to_format(TokenType.HEX_STRING, klass.HEX_STRINGS),
 452            **_quotes_to_format(TokenType.RAW_STRING, klass.RAW_STRINGS),
 453            **_quotes_to_format(TokenType.HEREDOC_STRING, klass.HEREDOC_STRINGS),
 454            **_quotes_to_format(TokenType.UNICODE_STRING, klass.UNICODE_STRINGS),
 455        }
 456
 457        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
 458        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
 459        klass._COMMENTS = {
 460            **dict(
 461                (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
 462                for comment in klass.COMMENTS
 463            ),
 464            "{#": "#}",  # Ensure Jinja comments are tokenized correctly in all dialects
 465        }
 466
 467        klass._KEYWORD_TRIE = new_trie(
 468            key.upper()
 469            for key in (
 470                *klass.KEYWORDS,
 471                *klass._COMMENTS,
 472                *klass._QUOTES,
 473                *klass._FORMAT_STRINGS,
 474            )
 475            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
 476        )
 477
 478        if USE_RS_TOKENIZER:
 479            settings = RsTokenizerSettings(
 480                white_space={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.WHITE_SPACE.items()},
 481                single_tokens={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.SINGLE_TOKENS.items()},
 482                keywords={k: _TOKEN_TYPE_TO_INDEX[v] for k, v in klass.KEYWORDS.items()},
 483                numeric_literals=klass.NUMERIC_LITERALS,
 484                identifiers=klass._IDENTIFIERS,
 485                identifier_escapes=klass._IDENTIFIER_ESCAPES,
 486                string_escapes=klass._STRING_ESCAPES,
 487                quotes=klass._QUOTES,
 488                format_strings={
 489                    k: (v1, _TOKEN_TYPE_TO_INDEX[v2])
 490                    for k, (v1, v2) in klass._FORMAT_STRINGS.items()
 491                },
 492                has_bit_strings=bool(klass.BIT_STRINGS),
 493                has_hex_strings=bool(klass.HEX_STRINGS),
 494                comments=klass._COMMENTS,
 495                var_single_tokens=klass.VAR_SINGLE_TOKENS,
 496                commands={_TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMANDS},
 497                command_prefix_tokens={
 498                    _TOKEN_TYPE_TO_INDEX[v] for v in klass.COMMAND_PREFIX_TOKENS
 499                },
 500            )
 501            token_types = RsTokenTypeSettings(
 502                bit_string=_TOKEN_TYPE_TO_INDEX[TokenType.BIT_STRING],
 503                break_=_TOKEN_TYPE_TO_INDEX[TokenType.BREAK],
 504                dcolon=_TOKEN_TYPE_TO_INDEX[TokenType.DCOLON],
 505                heredoc_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEREDOC_STRING],
 506                hex_string=_TOKEN_TYPE_TO_INDEX[TokenType.HEX_STRING],
 507                identifier=_TOKEN_TYPE_TO_INDEX[TokenType.IDENTIFIER],
 508                number=_TOKEN_TYPE_TO_INDEX[TokenType.NUMBER],
 509                parameter=_TOKEN_TYPE_TO_INDEX[TokenType.PARAMETER],
 510                semicolon=_TOKEN_TYPE_TO_INDEX[TokenType.SEMICOLON],
 511                string=_TOKEN_TYPE_TO_INDEX[TokenType.STRING],
 512                var=_TOKEN_TYPE_TO_INDEX[TokenType.VAR],
 513            )
 514            klass._RS_TOKENIZER = RsTokenizer(settings, token_types)
 515        else:
 516            klass._RS_TOKENIZER = None
 517
 518        return klass
 519
 520
 521class Tokenizer(metaclass=_Tokenizer):
 522    SINGLE_TOKENS = {
 523        "(": TokenType.L_PAREN,
 524        ")": TokenType.R_PAREN,
 525        "[": TokenType.L_BRACKET,
 526        "]": TokenType.R_BRACKET,
 527        "{": TokenType.L_BRACE,
 528        "}": TokenType.R_BRACE,
 529        "&": TokenType.AMP,
 530        "^": TokenType.CARET,
 531        ":": TokenType.COLON,
 532        ",": TokenType.COMMA,
 533        ".": TokenType.DOT,
 534        "-": TokenType.DASH,
 535        "=": TokenType.EQ,
 536        ">": TokenType.GT,
 537        "<": TokenType.LT,
 538        "%": TokenType.MOD,
 539        "!": TokenType.NOT,
 540        "|": TokenType.PIPE,
 541        "+": TokenType.PLUS,
 542        ";": TokenType.SEMICOLON,
 543        "/": TokenType.SLASH,
 544        "\\": TokenType.BACKSLASH,
 545        "*": TokenType.STAR,
 546        "~": TokenType.TILDA,
 547        "?": TokenType.PLACEHOLDER,
 548        "@": TokenType.PARAMETER,
 549        # used for breaking a var like x'y' but nothing else
 550        # the token type doesn't matter
 551        "'": TokenType.QUOTE,
 552        "`": TokenType.IDENTIFIER,
 553        '"': TokenType.IDENTIFIER,
 554        "#": TokenType.HASH,
 555    }
 556
 557    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 558    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 559    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 560    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 561    HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
 562    UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 563    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 564    IDENTIFIER_ESCAPES = ['"']
 565    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 566    STRING_ESCAPES = ["'"]
 567    VAR_SINGLE_TOKENS: t.Set[str] = set()
 568
 569    # Autofilled
 570    _COMMENTS: t.Dict[str, str] = {}
 571    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 572    _IDENTIFIERS: t.Dict[str, str] = {}
 573    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 574    _QUOTES: t.Dict[str, str] = {}
 575    _STRING_ESCAPES: t.Set[str] = set()
 576    _KEYWORD_TRIE: t.Dict = {}
 577    _RS_TOKENIZER: t.Optional[t.Any] = None
 578
 579    KEYWORDS: t.Dict[str, TokenType] = {
 580        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 581        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 582        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 583        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 584        "/*+": TokenType.HINT,
 585        "==": TokenType.EQ,
 586        "::": TokenType.DCOLON,
 587        "||": TokenType.DPIPE,
 588        ">=": TokenType.GTE,
 589        "<=": TokenType.LTE,
 590        "<>": TokenType.NEQ,
 591        "!=": TokenType.NEQ,
 592        ":=": TokenType.COLON_EQ,
 593        "<=>": TokenType.NULLSAFE_EQ,
 594        "->": TokenType.ARROW,
 595        "->>": TokenType.DARROW,
 596        "=>": TokenType.FARROW,
 597        "#>": TokenType.HASH_ARROW,
 598        "#>>": TokenType.DHASH_ARROW,
 599        "<->": TokenType.LR_ARROW,
 600        "&&": TokenType.DAMP,
 601        "??": TokenType.DQMARK,
 602        "ALL": TokenType.ALL,
 603        "ALWAYS": TokenType.ALWAYS,
 604        "AND": TokenType.AND,
 605        "ANTI": TokenType.ANTI,
 606        "ANY": TokenType.ANY,
 607        "ASC": TokenType.ASC,
 608        "AS": TokenType.ALIAS,
 609        "ASOF": TokenType.ASOF,
 610        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 611        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 612        "BEGIN": TokenType.BEGIN,
 613        "BETWEEN": TokenType.BETWEEN,
 614        "CACHE": TokenType.CACHE,
 615        "UNCACHE": TokenType.UNCACHE,
 616        "CASE": TokenType.CASE,
 617        "CHARACTER SET": TokenType.CHARACTER_SET,
 618        "CLUSTER BY": TokenType.CLUSTER_BY,
 619        "COLLATE": TokenType.COLLATE,
 620        "COLUMN": TokenType.COLUMN,
 621        "COMMIT": TokenType.COMMIT,
 622        "CONNECT BY": TokenType.CONNECT_BY,
 623        "CONSTRAINT": TokenType.CONSTRAINT,
 624        "CREATE": TokenType.CREATE,
 625        "CROSS": TokenType.CROSS,
 626        "CUBE": TokenType.CUBE,
 627        "CURRENT_DATE": TokenType.CURRENT_DATE,
 628        "CURRENT_TIME": TokenType.CURRENT_TIME,
 629        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 630        "CURRENT_USER": TokenType.CURRENT_USER,
 631        "DATABASE": TokenType.DATABASE,
 632        "DEFAULT": TokenType.DEFAULT,
 633        "DELETE": TokenType.DELETE,
 634        "DESC": TokenType.DESC,
 635        "DESCRIBE": TokenType.DESCRIBE,
 636        "DISTINCT": TokenType.DISTINCT,
 637        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 638        "DIV": TokenType.DIV,
 639        "DROP": TokenType.DROP,
 640        "ELSE": TokenType.ELSE,
 641        "END": TokenType.END,
 642        "ESCAPE": TokenType.ESCAPE,
 643        "EXCEPT": TokenType.EXCEPT,
 644        "EXECUTE": TokenType.EXECUTE,
 645        "EXISTS": TokenType.EXISTS,
 646        "FALSE": TokenType.FALSE,
 647        "FETCH": TokenType.FETCH,
 648        "FILTER": TokenType.FILTER,
 649        "FIRST": TokenType.FIRST,
 650        "FULL": TokenType.FULL,
 651        "FUNCTION": TokenType.FUNCTION,
 652        "FOR": TokenType.FOR,
 653        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 654        "FORMAT": TokenType.FORMAT,
 655        "FROM": TokenType.FROM,
 656        "GEOGRAPHY": TokenType.GEOGRAPHY,
 657        "GEOMETRY": TokenType.GEOMETRY,
 658        "GLOB": TokenType.GLOB,
 659        "GROUP BY": TokenType.GROUP_BY,
 660        "GROUPING SETS": TokenType.GROUPING_SETS,
 661        "HAVING": TokenType.HAVING,
 662        "ILIKE": TokenType.ILIKE,
 663        "IN": TokenType.IN,
 664        "INDEX": TokenType.INDEX,
 665        "INET": TokenType.INET,
 666        "INNER": TokenType.INNER,
 667        "INSERT": TokenType.INSERT,
 668        "INTERVAL": TokenType.INTERVAL,
 669        "INTERSECT": TokenType.INTERSECT,
 670        "INTO": TokenType.INTO,
 671        "IS": TokenType.IS,
 672        "ISNULL": TokenType.ISNULL,
 673        "JOIN": TokenType.JOIN,
 674        "KEEP": TokenType.KEEP,
 675        "KILL": TokenType.KILL,
 676        "LATERAL": TokenType.LATERAL,
 677        "LEFT": TokenType.LEFT,
 678        "LIKE": TokenType.LIKE,
 679        "LIMIT": TokenType.LIMIT,
 680        "LOAD": TokenType.LOAD,
 681        "LOCK": TokenType.LOCK,
 682        "MERGE": TokenType.MERGE,
 683        "NATURAL": TokenType.NATURAL,
 684        "NEXT": TokenType.NEXT,
 685        "NOT": TokenType.NOT,
 686        "NOTNULL": TokenType.NOTNULL,
 687        "NULL": TokenType.NULL,
 688        "OBJECT": TokenType.OBJECT,
 689        "OFFSET": TokenType.OFFSET,
 690        "ON": TokenType.ON,
 691        "OR": TokenType.OR,
 692        "XOR": TokenType.XOR,
 693        "ORDER BY": TokenType.ORDER_BY,
 694        "ORDINALITY": TokenType.ORDINALITY,
 695        "OUTER": TokenType.OUTER,
 696        "OVER": TokenType.OVER,
 697        "OVERLAPS": TokenType.OVERLAPS,
 698        "OVERWRITE": TokenType.OVERWRITE,
 699        "PARTITION": TokenType.PARTITION,
 700        "PARTITION BY": TokenType.PARTITION_BY,
 701        "PARTITIONED BY": TokenType.PARTITION_BY,
 702        "PARTITIONED_BY": TokenType.PARTITION_BY,
 703        "PERCENT": TokenType.PERCENT,
 704        "PIVOT": TokenType.PIVOT,
 705        "PRAGMA": TokenType.PRAGMA,
 706        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 707        "PROCEDURE": TokenType.PROCEDURE,
 708        "QUALIFY": TokenType.QUALIFY,
 709        "RANGE": TokenType.RANGE,
 710        "RECURSIVE": TokenType.RECURSIVE,
 711        "REGEXP": TokenType.RLIKE,
 712        "REPLACE": TokenType.REPLACE,
 713        "RETURNING": TokenType.RETURNING,
 714        "REFERENCES": TokenType.REFERENCES,
 715        "RIGHT": TokenType.RIGHT,
 716        "RLIKE": TokenType.RLIKE,
 717        "ROLLBACK": TokenType.ROLLBACK,
 718        "ROLLUP": TokenType.ROLLUP,
 719        "ROW": TokenType.ROW,
 720        "ROWS": TokenType.ROWS,
 721        "SCHEMA": TokenType.SCHEMA,
 722        "SELECT": TokenType.SELECT,
 723        "SEMI": TokenType.SEMI,
 724        "SET": TokenType.SET,
 725        "SETTINGS": TokenType.SETTINGS,
 726        "SHOW": TokenType.SHOW,
 727        "SIMILAR TO": TokenType.SIMILAR_TO,
 728        "SOME": TokenType.SOME,
 729        "SORT BY": TokenType.SORT_BY,
 730        "START WITH": TokenType.START_WITH,
 731        "TABLE": TokenType.TABLE,
 732        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 733        "TEMP": TokenType.TEMPORARY,
 734        "TEMPORARY": TokenType.TEMPORARY,
 735        "THEN": TokenType.THEN,
 736        "TRUE": TokenType.TRUE,
 737        "UNION": TokenType.UNION,
 738        "UNKNOWN": TokenType.UNKNOWN,
 739        "UNNEST": TokenType.UNNEST,
 740        "UNPIVOT": TokenType.UNPIVOT,
 741        "UPDATE": TokenType.UPDATE,
 742        "USE": TokenType.USE,
 743        "USING": TokenType.USING,
 744        "UUID": TokenType.UUID,
 745        "VALUES": TokenType.VALUES,
 746        "VIEW": TokenType.VIEW,
 747        "VOLATILE": TokenType.VOLATILE,
 748        "WHEN": TokenType.WHEN,
 749        "WHERE": TokenType.WHERE,
 750        "WINDOW": TokenType.WINDOW,
 751        "WITH": TokenType.WITH,
 752        "APPLY": TokenType.APPLY,
 753        "ARRAY": TokenType.ARRAY,
 754        "BIT": TokenType.BIT,
 755        "BOOL": TokenType.BOOLEAN,
 756        "BOOLEAN": TokenType.BOOLEAN,
 757        "BYTE": TokenType.TINYINT,
 758        "MEDIUMINT": TokenType.MEDIUMINT,
 759        "INT1": TokenType.TINYINT,
 760        "TINYINT": TokenType.TINYINT,
 761        "INT16": TokenType.SMALLINT,
 762        "SHORT": TokenType.SMALLINT,
 763        "SMALLINT": TokenType.SMALLINT,
 764        "INT128": TokenType.INT128,
 765        "HUGEINT": TokenType.INT128,
 766        "INT2": TokenType.SMALLINT,
 767        "INTEGER": TokenType.INT,
 768        "INT": TokenType.INT,
 769        "INT4": TokenType.INT,
 770        "INT32": TokenType.INT,
 771        "INT64": TokenType.BIGINT,
 772        "LONG": TokenType.BIGINT,
 773        "BIGINT": TokenType.BIGINT,
 774        "INT8": TokenType.TINYINT,
 775        "DEC": TokenType.DECIMAL,
 776        "DECIMAL": TokenType.DECIMAL,
 777        "BIGDECIMAL": TokenType.BIGDECIMAL,
 778        "BIGNUMERIC": TokenType.BIGDECIMAL,
 779        "MAP": TokenType.MAP,
 780        "NULLABLE": TokenType.NULLABLE,
 781        "NUMBER": TokenType.DECIMAL,
 782        "NUMERIC": TokenType.DECIMAL,
 783        "FIXED": TokenType.DECIMAL,
 784        "REAL": TokenType.FLOAT,
 785        "FLOAT": TokenType.FLOAT,
 786        "FLOAT4": TokenType.FLOAT,
 787        "FLOAT8": TokenType.DOUBLE,
 788        "DOUBLE": TokenType.DOUBLE,
 789        "DOUBLE PRECISION": TokenType.DOUBLE,
 790        "JSON": TokenType.JSON,
 791        "CHAR": TokenType.CHAR,
 792        "CHARACTER": TokenType.CHAR,
 793        "NCHAR": TokenType.NCHAR,
 794        "VARCHAR": TokenType.VARCHAR,
 795        "VARCHAR2": TokenType.VARCHAR,
 796        "NVARCHAR": TokenType.NVARCHAR,
 797        "NVARCHAR2": TokenType.NVARCHAR,
 798        "STR": TokenType.TEXT,
 799        "STRING": TokenType.TEXT,
 800        "TEXT": TokenType.TEXT,
 801        "LONGTEXT": TokenType.LONGTEXT,
 802        "MEDIUMTEXT": TokenType.MEDIUMTEXT,
 803        "TINYTEXT": TokenType.TINYTEXT,
 804        "CLOB": TokenType.TEXT,
 805        "LONGVARCHAR": TokenType.TEXT,
 806        "BINARY": TokenType.BINARY,
 807        "BLOB": TokenType.VARBINARY,
 808        "LONGBLOB": TokenType.LONGBLOB,
 809        "MEDIUMBLOB": TokenType.MEDIUMBLOB,
 810        "TINYBLOB": TokenType.TINYBLOB,
 811        "BYTEA": TokenType.VARBINARY,
 812        "VARBINARY": TokenType.VARBINARY,
 813        "TIME": TokenType.TIME,
 814        "TIMETZ": TokenType.TIMETZ,
 815        "TIMESTAMP": TokenType.TIMESTAMP,
 816        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 817        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 818        "DATE": TokenType.DATE,
 819        "DATETIME": TokenType.DATETIME,
 820        "INT4RANGE": TokenType.INT4RANGE,
 821        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
 822        "INT8RANGE": TokenType.INT8RANGE,
 823        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
 824        "NUMRANGE": TokenType.NUMRANGE,
 825        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
 826        "TSRANGE": TokenType.TSRANGE,
 827        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
 828        "TSTZRANGE": TokenType.TSTZRANGE,
 829        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
 830        "DATERANGE": TokenType.DATERANGE,
 831        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
 832        "UNIQUE": TokenType.UNIQUE,
 833        "STRUCT": TokenType.STRUCT,
 834        "VARIANT": TokenType.VARIANT,
 835        "ALTER": TokenType.ALTER,
 836        "ANALYZE": TokenType.COMMAND,
 837        "CALL": TokenType.COMMAND,
 838        "COMMENT": TokenType.COMMENT,
 839        "COPY": TokenType.COMMAND,
 840        "EXPLAIN": TokenType.COMMAND,
 841        "GRANT": TokenType.COMMAND,
 842        "OPTIMIZE": TokenType.COMMAND,
 843        "PREPARE": TokenType.COMMAND,
 844        "TRUNCATE": TokenType.COMMAND,
 845        "VACUUM": TokenType.COMMAND,
 846        "USER-DEFINED": TokenType.USERDEFINED,
 847        "FOR VERSION": TokenType.VERSION_SNAPSHOT,
 848        "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT,
 849    }
 850
 851    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 852        " ": TokenType.SPACE,
 853        "\t": TokenType.SPACE,
 854        "\n": TokenType.BREAK,
 855        "\r": TokenType.BREAK,
 856    }
 857
 858    COMMANDS = {
 859        TokenType.COMMAND,
 860        TokenType.EXECUTE,
 861        TokenType.FETCH,
 862        TokenType.SHOW,
 863    }
 864
 865    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 866
 867    # handle numeric literals like in hive (3L = BIGINT)
 868    NUMERIC_LITERALS: t.Dict[str, str] = {}
 869
 870    COMMENTS = ["--", ("/*", "*/")]
 871
 872    __slots__ = (
 873        "sql",
 874        "size",
 875        "tokens",
 876        "dialect",
 877        "_start",
 878        "_current",
 879        "_line",
 880        "_col",
 881        "_comments",
 882        "_char",
 883        "_end",
 884        "_peek",
 885        "_prev_token_line",
 886        "_rs_dialect_settings",
 887    )
 888
 889    def __init__(self, dialect: DialectType = None) -> None:
 890        from sqlglot.dialects import Dialect
 891
 892        self.dialect = Dialect.get_or_raise(dialect)
 893
 894        if USE_RS_TOKENIZER:
 895            self._rs_dialect_settings = RsTokenizerDialectSettings(
 896                escape_sequences=self.dialect.ESCAPE_SEQUENCES,
 897                identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
 898            )
 899
 900        self.reset()
 901
 902    def reset(self) -> None:
 903        self.sql = ""
 904        self.size = 0
 905        self.tokens: t.List[Token] = []
 906        self._start = 0
 907        self._current = 0
 908        self._line = 1
 909        self._col = 0
 910        self._comments: t.List[str] = []
 911
 912        self._char = ""
 913        self._end = False
 914        self._peek = ""
 915        self._prev_token_line = -1
 916
 917    def tokenize(self, sql: str) -> t.List[Token]:
 918        """Returns a list of tokens corresponding to the SQL string `sql`."""
 919        if USE_RS_TOKENIZER:
 920            return self.tokenize_rs(sql)
 921
 922        self.reset()
 923        self.sql = sql
 924        self.size = len(sql)
 925
 926        try:
 927            self._scan()
 928        except Exception as e:
 929            start = max(self._current - 50, 0)
 930            end = min(self._current + 50, self.size - 1)
 931            context = self.sql[start:end]
 932            raise TokenError(f"Error tokenizing '{context}'") from e
 933
 934        return self.tokens
 935
 936    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 937        while self.size and not self._end:
 938            current = self._current
 939
 940            # skip spaces inline rather than iteratively call advance()
 941            # for performance reasons
 942            while current < self.size:
 943                char = self.sql[current]
 944
 945                if char.isspace() and (char == " " or char == "\t"):
 946                    current += 1
 947                else:
 948                    break
 949
 950            n = current - self._current
 951            self._start = current
 952            self._advance(n if n > 1 else 1)
 953
 954            if self._char is None:
 955                break
 956
 957            if not self._char.isspace():
 958                if self._char.isdigit():
 959                    self._scan_number()
 960                elif self._char in self._IDENTIFIERS:
 961                    self._scan_identifier(self._IDENTIFIERS[self._char])
 962                else:
 963                    self._scan_keywords()
 964
 965            if until and until():
 966                break
 967
 968        if self.tokens and self._comments:
 969            self.tokens[-1].comments.extend(self._comments)
 970
 971    def _chars(self, size: int) -> str:
 972        if size == 1:
 973            return self._char
 974
 975        start = self._current - 1
 976        end = start + size
 977
 978        return self.sql[start:end] if end <= self.size else ""
 979
 980    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 981        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 982            # Ensures we don't count an extra line if we get a \r\n line break sequence
 983            if self._char == "\r" and self._peek == "\n":
 984                i = 2
 985                self._start += 1
 986
 987            self._col = 1
 988            self._line += 1
 989        else:
 990            self._col += i
 991
 992        self._current += i
 993        self._end = self._current >= self.size
 994        self._char = self.sql[self._current - 1]
 995        self._peek = "" if self._end else self.sql[self._current]
 996
 997        if alnum and self._char.isalnum():
 998            # Here we use local variables instead of attributes for better performance
 999            _col = self._col
1000            _current = self._current
1001            _end = self._end
1002            _peek = self._peek
1003
1004            while _peek.isalnum():
1005                _col += 1
1006                _current += 1
1007                _end = _current >= self.size
1008                _peek = "" if _end else self.sql[_current]
1009
1010            self._col = _col
1011            self._current = _current
1012            self._end = _end
1013            self._peek = _peek
1014            self._char = self.sql[_current - 1]
1015
1016    @property
1017    def _text(self) -> str:
1018        return self.sql[self._start : self._current]
1019
1020    def peek(self, i: int = 0) -> str:
1021        i = self._current + i
1022        if i < self.size:
1023            return self.sql[i]
1024        return ""
1025
1026    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
1027        self._prev_token_line = self._line
1028
1029        if self._comments and token_type == TokenType.SEMICOLON and self.tokens:
1030            self.tokens[-1].comments.extend(self._comments)
1031            self._comments = []
1032
1033        self.tokens.append(
1034            Token(
1035                token_type,
1036                text=self._text if text is None else text,
1037                line=self._line,
1038                col=self._col,
1039                start=self._start,
1040                end=self._current - 1,
1041                comments=self._comments,
1042            )
1043        )
1044        self._comments = []
1045
1046        # If we have either a semicolon or a begin token before the command's token, we'll parse
1047        # whatever follows the command's token as a string
1048        if (
1049            token_type in self.COMMANDS
1050            and self._peek != ";"
1051            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
1052        ):
1053            start = self._current
1054            tokens = len(self.tokens)
1055            self._scan(lambda: self._peek == ";")
1056            self.tokens = self.tokens[:tokens]
1057            text = self.sql[start : self._current].strip()
1058            if text:
1059                self._add(TokenType.STRING, text)
1060
1061    def _scan_keywords(self) -> None:
1062        size = 0
1063        word = None
1064        chars = self._text
1065        char = chars
1066        prev_space = False
1067        skip = False
1068        trie = self._KEYWORD_TRIE
1069        single_token = char in self.SINGLE_TOKENS
1070
1071        while chars:
1072            if skip:
1073                result = TrieResult.PREFIX
1074            else:
1075                result, trie = in_trie(trie, char.upper())
1076
1077            if result == TrieResult.FAILED:
1078                break
1079            if result == TrieResult.EXISTS:
1080                word = chars
1081
1082            end = self._current + size
1083            size += 1
1084
1085            if end < self.size:
1086                char = self.sql[end]
1087                single_token = single_token or char in self.SINGLE_TOKENS
1088                is_space = char.isspace()
1089
1090                if not is_space or not prev_space:
1091                    if is_space:
1092                        char = " "
1093                    chars += char
1094                    prev_space = is_space
1095                    skip = False
1096                else:
1097                    skip = True
1098            else:
1099                char = ""
1100                break
1101
1102        if word:
1103            if self._scan_string(word):
1104                return
1105            if self._scan_comment(word):
1106                return
1107            if prev_space or single_token or not char:
1108                self._advance(size - 1)
1109                word = word.upper()
1110                self._add(self.KEYWORDS[word], text=word)
1111                return
1112
1113        if self._char in self.SINGLE_TOKENS:
1114            self._add(self.SINGLE_TOKENS[self._char], text=self._char)
1115            return
1116
1117        self._scan_var()
1118
1119    def _scan_comment(self, comment_start: str) -> bool:
1120        if comment_start not in self._COMMENTS:
1121            return False
1122
1123        comment_start_line = self._line
1124        comment_start_size = len(comment_start)
1125        comment_end = self._COMMENTS[comment_start]
1126
1127        if comment_end:
1128            # Skip the comment's start delimiter
1129            self._advance(comment_start_size)
1130
1131            comment_end_size = len(comment_end)
1132            while not self._end and self._chars(comment_end_size) != comment_end:
1133                self._advance(alnum=True)
1134
1135            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
1136            self._advance(comment_end_size - 1)
1137        else:
1138            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
1139                self._advance(alnum=True)
1140            self._comments.append(self._text[comment_start_size:])
1141
1142        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1143        # Multiple consecutive comments are preserved by appending them to the current comments list.
1144        if comment_start_line == self._prev_token_line:
1145            self.tokens[-1].comments.extend(self._comments)
1146            self._comments = []
1147            self._prev_token_line = self._line
1148
1149        return True
1150
1151    def _scan_number(self) -> None:
1152        if self._char == "0":
1153            peek = self._peek.upper()
1154            if peek == "B":
1155                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
1156            elif peek == "X":
1157                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
1158
1159        decimal = False
1160        scientific = 0
1161
1162        while True:
1163            if self._peek.isdigit():
1164                self._advance()
1165            elif self._peek == "." and not decimal:
1166                after = self.peek(1)
1167                if after.isdigit() or not after.isalpha():
1168                    decimal = True
1169                    self._advance()
1170                else:
1171                    return self._add(TokenType.VAR)
1172            elif self._peek in ("-", "+") and scientific == 1:
1173                scientific += 1
1174                self._advance()
1175            elif self._peek.upper() == "E" and not scientific:
1176                scientific += 1
1177                self._advance()
1178            elif self._peek.isidentifier():
1179                number_text = self._text
1180                literal = ""
1181
1182                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1183                    literal += self._peek
1184                    self._advance()
1185
1186                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), ""))
1187
1188                if token_type:
1189                    self._add(TokenType.NUMBER, number_text)
1190                    self._add(TokenType.DCOLON, "::")
1191                    return self._add(token_type, literal)
1192                elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT:
1193                    return self._add(TokenType.VAR)
1194
1195                self._advance(-len(literal))
1196                return self._add(TokenType.NUMBER, number_text)
1197            else:
1198                return self._add(TokenType.NUMBER)
1199
1200    def _scan_bits(self) -> None:
1201        self._advance()
1202        value = self._extract_value()
1203        try:
1204            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1205            int(value, 2)
1206            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1207        except ValueError:
1208            self._add(TokenType.IDENTIFIER)
1209
1210    def _scan_hex(self) -> None:
1211        self._advance()
1212        value = self._extract_value()
1213        try:
1214            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1215            int(value, 16)
1216            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1217        except ValueError:
1218            self._add(TokenType.IDENTIFIER)
1219
1220    def _extract_value(self) -> str:
1221        while True:
1222            char = self._peek.strip()
1223            if char and char not in self.SINGLE_TOKENS:
1224                self._advance(alnum=True)
1225            else:
1226                break
1227
1228        return self._text
1229
1230    def _scan_string(self, start: str) -> bool:
1231        base = None
1232        token_type = TokenType.STRING
1233
1234        if start in self._QUOTES:
1235            end = self._QUOTES[start]
1236        elif start in self._FORMAT_STRINGS:
1237            end, token_type = self._FORMAT_STRINGS[start]
1238
1239            if token_type == TokenType.HEX_STRING:
1240                base = 16
1241            elif token_type == TokenType.BIT_STRING:
1242                base = 2
1243            elif token_type == TokenType.HEREDOC_STRING:
1244                self._advance()
1245                tag = "" if self._char == end else self._extract_string(end)
1246                end = f"{start}{tag}{end}"
1247        else:
1248            return False
1249
1250        self._advance(len(start))
1251        text = self._extract_string(end)
1252
1253        if base:
1254            try:
1255                int(text, base)
1256            except:
1257                raise TokenError(
1258                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1259                )
1260
1261        self._add(token_type, text)
1262        return True
1263
1264    def _scan_identifier(self, identifier_end: str) -> None:
1265        self._advance()
1266        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1267        self._add(TokenType.IDENTIFIER, text)
1268
1269    def _scan_var(self) -> None:
1270        while True:
1271            char = self._peek.strip()
1272            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1273                self._advance(alnum=True)
1274            else:
1275                break
1276
1277        self._add(
1278            TokenType.VAR
1279            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1280            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1281        )
1282
1283    def _extract_string(self, delimiter: str, escapes=None) -> str:
1284        text = ""
1285        delim_size = len(delimiter)
1286        escapes = self._STRING_ESCAPES if escapes is None else escapes
1287
1288        while True:
1289            if (
1290                self._char in escapes
1291                and (self._peek == delimiter or self._peek in escapes)
1292                and (self._char not in self._QUOTES or self._char == self._peek)
1293            ):
1294                if self._peek == delimiter:
1295                    text += self._peek
1296                else:
1297                    text += self._char + self._peek
1298
1299                if self._current + 1 < self.size:
1300                    self._advance(2)
1301                else:
1302                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}")
1303            else:
1304                if self._chars(delim_size) == delimiter:
1305                    if delim_size > 1:
1306                        self._advance(delim_size - 1)
1307                    break
1308
1309                if self._end:
1310                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}")
1311
1312                if (
1313                    self.dialect.ESCAPE_SEQUENCES
1314                    and self._peek
1315                    and self._char in self.STRING_ESCAPES
1316                ):
1317                    escaped_sequence = self.dialect.ESCAPE_SEQUENCES.get(self._char + self._peek)
1318                    if escaped_sequence:
1319                        self._advance(2)
1320                        text += escaped_sequence
1321                        continue
1322
1323                current = self._current - 1
1324                self._advance(alnum=True)
1325                text += self.sql[current : self._current - 1]
1326
1327        return text
1328
1329    def tokenize_rs(self, sql: str) -> t.List[Token]:
1330        if not self._RS_TOKENIZER:
1331            raise SqlglotError("Rust tokenizer is not available")
1332
1333        try:
1334            tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings)
1335            for token in tokens:
1336                token.token_type = _ALL_TOKEN_TYPES[token.token_type_index]
1337            return tokens
1338        except Exception as e:
1339            raise TokenError(str(e))
class TokenType(sqlglot.helper.AutoName):
 29class TokenType(AutoName):
 30    L_PAREN = auto()
 31    R_PAREN = auto()
 32    L_BRACKET = auto()
 33    R_BRACKET = auto()
 34    L_BRACE = auto()
 35    R_BRACE = auto()
 36    COMMA = auto()
 37    DOT = auto()
 38    DASH = auto()
 39    PLUS = auto()
 40    COLON = auto()
 41    DCOLON = auto()
 42    DQMARK = auto()
 43    SEMICOLON = auto()
 44    STAR = auto()
 45    BACKSLASH = auto()
 46    SLASH = auto()
 47    LT = auto()
 48    LTE = auto()
 49    GT = auto()
 50    GTE = auto()
 51    NOT = auto()
 52    EQ = auto()
 53    NEQ = auto()
 54    NULLSAFE_EQ = auto()
 55    COLON_EQ = auto()
 56    AND = auto()
 57    OR = auto()
 58    AMP = auto()
 59    DPIPE = auto()
 60    PIPE = auto()
 61    CARET = auto()
 62    TILDA = auto()
 63    ARROW = auto()
 64    DARROW = auto()
 65    FARROW = auto()
 66    HASH = auto()
 67    HASH_ARROW = auto()
 68    DHASH_ARROW = auto()
 69    LR_ARROW = auto()
 70    DAT = auto()
 71    LT_AT = auto()
 72    AT_GT = auto()
 73    DOLLAR = auto()
 74    PARAMETER = auto()
 75    SESSION_PARAMETER = auto()
 76    DAMP = auto()
 77    XOR = auto()
 78    DSTAR = auto()
 79
 80    BLOCK_START = auto()
 81    BLOCK_END = auto()
 82
 83    SPACE = auto()
 84    BREAK = auto()
 85
 86    STRING = auto()
 87    NUMBER = auto()
 88    IDENTIFIER = auto()
 89    DATABASE = auto()
 90    COLUMN = auto()
 91    COLUMN_DEF = auto()
 92    SCHEMA = auto()
 93    TABLE = auto()
 94    VAR = auto()
 95    BIT_STRING = auto()
 96    HEX_STRING = auto()
 97    BYTE_STRING = auto()
 98    NATIONAL_STRING = auto()
 99    RAW_STRING = auto()
100    HEREDOC_STRING = auto()
101    UNICODE_STRING = auto()
102
103    # types
104    BIT = auto()
105    BOOLEAN = auto()
106    TINYINT = auto()
107    UTINYINT = auto()
108    SMALLINT = auto()
109    USMALLINT = auto()
110    MEDIUMINT = auto()
111    UMEDIUMINT = auto()
112    INT = auto()
113    UINT = auto()
114    BIGINT = auto()
115    UBIGINT = auto()
116    INT128 = auto()
117    UINT128 = auto()
118    INT256 = auto()
119    UINT256 = auto()
120    FLOAT = auto()
121    DOUBLE = auto()
122    DECIMAL = auto()
123    UDECIMAL = auto()
124    BIGDECIMAL = auto()
125    CHAR = auto()
126    NCHAR = auto()
127    VARCHAR = auto()
128    NVARCHAR = auto()
129    TEXT = auto()
130    MEDIUMTEXT = auto()
131    LONGTEXT = auto()
132    MEDIUMBLOB = auto()
133    LONGBLOB = auto()
134    TINYBLOB = auto()
135    TINYTEXT = auto()
136    BINARY = auto()
137    VARBINARY = auto()
138    JSON = auto()
139    JSONB = auto()
140    TIME = auto()
141    TIMETZ = auto()
142    TIMESTAMP = auto()
143    TIMESTAMPTZ = auto()
144    TIMESTAMPLTZ = auto()
145    TIMESTAMP_S = auto()
146    TIMESTAMP_MS = auto()
147    TIMESTAMP_NS = auto()
148    DATETIME = auto()
149    DATETIME64 = auto()
150    DATE = auto()
151    INT4RANGE = auto()
152    INT4MULTIRANGE = auto()
153    INT8RANGE = auto()
154    INT8MULTIRANGE = auto()
155    NUMRANGE = auto()
156    NUMMULTIRANGE = auto()
157    TSRANGE = auto()
158    TSMULTIRANGE = auto()
159    TSTZRANGE = auto()
160    TSTZMULTIRANGE = auto()
161    DATERANGE = auto()
162    DATEMULTIRANGE = auto()
163    UUID = auto()
164    GEOGRAPHY = auto()
165    NULLABLE = auto()
166    GEOMETRY = auto()
167    HLLSKETCH = auto()
168    HSTORE = auto()
169    SUPER = auto()
170    SERIAL = auto()
171    SMALLSERIAL = auto()
172    BIGSERIAL = auto()
173    XML = auto()
174    YEAR = auto()
175    UNIQUEIDENTIFIER = auto()
176    USERDEFINED = auto()
177    MONEY = auto()
178    SMALLMONEY = auto()
179    ROWVERSION = auto()
180    IMAGE = auto()
181    VARIANT = auto()
182    OBJECT = auto()
183    INET = auto()
184    IPADDRESS = auto()
185    IPPREFIX = auto()
186    ENUM = auto()
187    ENUM8 = auto()
188    ENUM16 = auto()
189    FIXEDSTRING = auto()
190    LOWCARDINALITY = auto()
191    NESTED = auto()
192    UNKNOWN = auto()
193
194    # keywords
195    ALIAS = auto()
196    ALTER = auto()
197    ALWAYS = auto()
198    ALL = auto()
199    ANTI = auto()
200    ANY = auto()
201    APPLY = auto()
202    ARRAY = auto()
203    ASC = auto()
204    ASOF = auto()
205    AUTO_INCREMENT = auto()
206    BEGIN = auto()
207    BETWEEN = auto()
208    CACHE = auto()
209    CASE = auto()
210    CHARACTER_SET = auto()
211    CLUSTER_BY = auto()
212    COLLATE = auto()
213    COMMAND = auto()
214    COMMENT = auto()
215    COMMIT = auto()
216    CONNECT_BY = auto()
217    CONSTRAINT = auto()
218    CREATE = auto()
219    CROSS = auto()
220    CUBE = auto()
221    CURRENT_DATE = auto()
222    CURRENT_DATETIME = auto()
223    CURRENT_TIME = auto()
224    CURRENT_TIMESTAMP = auto()
225    CURRENT_USER = auto()
226    DEFAULT = auto()
227    DELETE = auto()
228    DESC = auto()
229    DESCRIBE = auto()
230    DICTIONARY = auto()
231    DISTINCT = auto()
232    DISTRIBUTE_BY = auto()
233    DIV = auto()
234    DROP = auto()
235    ELSE = auto()
236    END = auto()
237    ESCAPE = auto()
238    EXCEPT = auto()
239    EXECUTE = auto()
240    EXISTS = auto()
241    FALSE = auto()
242    FETCH = auto()
243    FILTER = auto()
244    FINAL = auto()
245    FIRST = auto()
246    FOR = auto()
247    FORCE = auto()
248    FOREIGN_KEY = auto()
249    FORMAT = auto()
250    FROM = auto()
251    FULL = auto()
252    FUNCTION = auto()
253    GLOB = auto()
254    GLOBAL = auto()
255    GROUP_BY = auto()
256    GROUPING_SETS = auto()
257    HAVING = auto()
258    HINT = auto()
259    IGNORE = auto()
260    ILIKE = auto()
261    ILIKE_ANY = auto()
262    IN = auto()
263    INDEX = auto()
264    INNER = auto()
265    INSERT = auto()
266    INTERSECT = auto()
267    INTERVAL = auto()
268    INTO = auto()
269    INTRODUCER = auto()
270    IRLIKE = auto()
271    IS = auto()
272    ISNULL = auto()
273    JOIN = auto()
274    JOIN_MARKER = auto()
275    KEEP = auto()
276    KILL = auto()
277    LANGUAGE = auto()
278    LATERAL = auto()
279    LEFT = auto()
280    LIKE = auto()
281    LIKE_ANY = auto()
282    LIMIT = auto()
283    LOAD = auto()
284    LOCK = auto()
285    MAP = auto()
286    MATCH_RECOGNIZE = auto()
287    MEMBER_OF = auto()
288    MERGE = auto()
289    MOD = auto()
290    MODEL = auto()
291    NATURAL = auto()
292    NEXT = auto()
293    NOTNULL = auto()
294    NULL = auto()
295    OBJECT_IDENTIFIER = auto()
296    OFFSET = auto()
297    ON = auto()
298    OPERATOR = auto()
299    ORDER_BY = auto()
300    ORDERED = auto()
301    ORDINALITY = auto()
302    OUTER = auto()
303    OVER = auto()
304    OVERLAPS = auto()
305    OVERWRITE = auto()
306    PARTITION = auto()
307    PARTITION_BY = auto()
308    PERCENT = auto()
309    PIVOT = auto()
310    PLACEHOLDER = auto()
311    PRAGMA = auto()
312    PRIMARY_KEY = auto()
313    PROCEDURE = auto()
314    PROPERTIES = auto()
315    PSEUDO_TYPE = auto()
316    QUALIFY = auto()
317    QUOTE = auto()
318    RANGE = auto()
319    RECURSIVE = auto()
320    REFRESH = auto()
321    REPLACE = auto()
322    RETURNING = auto()
323    REFERENCES = auto()
324    RIGHT = auto()
325    RLIKE = auto()
326    ROLLBACK = auto()
327    ROLLUP = auto()
328    ROW = auto()
329    ROWS = auto()
330    SELECT = auto()
331    SEMI = auto()
332    SEPARATOR = auto()
333    SERDE_PROPERTIES = auto()
334    SET = auto()
335    SETTINGS = auto()
336    SHOW = auto()
337    SIMILAR_TO = auto()
338    SOME = auto()
339    SORT_BY = auto()
340    START_WITH = auto()
341    STRUCT = auto()
342    TABLE_SAMPLE = auto()
343    TEMPORARY = auto()
344    TOP = auto()
345    THEN = auto()
346    TRUE = auto()
347    UNCACHE = auto()
348    UNION = auto()
349    UNNEST = auto()
350    UNPIVOT = auto()
351    UPDATE = auto()
352    USE = auto()
353    USING = auto()
354    VALUES = auto()
355    VIEW = auto()
356    VOLATILE = auto()
357    WHEN = auto()
358    WHERE = auto()
359    WINDOW = auto()
360    WITH = auto()
361    UNIQUE = auto()
362    VERSION_SNAPSHOT = auto()
363    TIMESTAMP_SNAPSHOT = auto()

An enumeration.

L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
DQMARK = <TokenType.DQMARK: 'DQMARK'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
COLON_EQ = <TokenType.COLON_EQ: 'COLON_EQ'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE = <TokenType.PIPE: 'PIPE'>
CARET = <TokenType.CARET: 'CARET'>
TILDA = <TokenType.TILDA: 'TILDA'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
DAT = <TokenType.DAT: 'DAT'>
LT_AT = <TokenType.LT_AT: 'LT_AT'>
AT_GT = <TokenType.AT_GT: 'AT_GT'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
DAMP = <TokenType.DAMP: 'DAMP'>
XOR = <TokenType.XOR: 'XOR'>
DSTAR = <TokenType.DSTAR: 'DSTAR'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE = <TokenType.DATABASE: 'DATABASE'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
NATIONAL_STRING = <TokenType.NATIONAL_STRING: 'NATIONAL_STRING'>
RAW_STRING = <TokenType.RAW_STRING: 'RAW_STRING'>
HEREDOC_STRING = <TokenType.HEREDOC_STRING: 'HEREDOC_STRING'>
UNICODE_STRING = <TokenType.UNICODE_STRING: 'UNICODE_STRING'>
BIT = <TokenType.BIT: 'BIT'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
UTINYINT = <TokenType.UTINYINT: 'UTINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
USMALLINT = <TokenType.USMALLINT: 'USMALLINT'>
MEDIUMINT = <TokenType.MEDIUMINT: 'MEDIUMINT'>
UMEDIUMINT = <TokenType.UMEDIUMINT: 'UMEDIUMINT'>
INT = <TokenType.INT: 'INT'>
UINT = <TokenType.UINT: 'UINT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
UBIGINT = <TokenType.UBIGINT: 'UBIGINT'>
INT128 = <TokenType.INT128: 'INT128'>
UINT128 = <TokenType.UINT128: 'UINT128'>
INT256 = <TokenType.INT256: 'INT256'>
UINT256 = <TokenType.UINT256: 'UINT256'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
UDECIMAL = <TokenType.UDECIMAL: 'UDECIMAL'>
BIGDECIMAL = <TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
TINYBLOB = <TokenType.TINYBLOB: 'TINYBLOB'>
TINYTEXT = <TokenType.TINYTEXT: 'TINYTEXT'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMETZ = <TokenType.TIMETZ: 'TIMETZ'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
TIMESTAMP_S = <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>
TIMESTAMP_MS = <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>
TIMESTAMP_NS = <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATETIME64 = <TokenType.DATETIME64: 'DATETIME64'>
DATE = <TokenType.DATE: 'DATE'>
INT4RANGE = <TokenType.INT4RANGE: 'INT4RANGE'>
INT4MULTIRANGE = <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>
INT8RANGE = <TokenType.INT8RANGE: 'INT8RANGE'>
INT8MULTIRANGE = <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>
NUMRANGE = <TokenType.NUMRANGE: 'NUMRANGE'>
NUMMULTIRANGE = <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>
TSRANGE = <TokenType.TSRANGE: 'TSRANGE'>
TSMULTIRANGE = <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>
TSTZRANGE = <TokenType.TSTZRANGE: 'TSTZRANGE'>
TSTZMULTIRANGE = <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>
DATERANGE = <TokenType.DATERANGE: 'DATERANGE'>
DATEMULTIRANGE = <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
YEAR = <TokenType.YEAR: 'YEAR'>
UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
USERDEFINED = <TokenType.USERDEFINED: 'USERDEFINED'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
INET = <TokenType.INET: 'INET'>
IPADDRESS = <TokenType.IPADDRESS: 'IPADDRESS'>
IPPREFIX = <TokenType.IPPREFIX: 'IPPREFIX'>
ENUM = <TokenType.ENUM: 'ENUM'>
ENUM8 = <TokenType.ENUM8: 'ENUM8'>
ENUM16 = <TokenType.ENUM16: 'ENUM16'>
FIXEDSTRING = <TokenType.FIXEDSTRING: 'FIXEDSTRING'>
LOWCARDINALITY = <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>
NESTED = <TokenType.NESTED: 'NESTED'>
UNKNOWN = <TokenType.UNKNOWN: 'UNKNOWN'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALWAYS = <TokenType.ALWAYS: 'ALWAYS'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
CONNECT_BY = <TokenType.CONNECT_BY: 'CONNECT_BY'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER = <TokenType.CURRENT_USER: 'CURRENT_USER'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DICTIONARY = <TokenType.DICTIONARY: 'DICTIONARY'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOR = <TokenType.FOR: 'FOR'>
FORCE = <TokenType.FORCE: 'FORCE'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IGNORE = <TokenType.IGNORE: 'IGNORE'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY = <TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
JOIN_MARKER = <TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP = <TokenType.KEEP: 'KEEP'>
KILL = <TokenType.KILL: 'KILL'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIKE_ANY = <TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LOAD = <TokenType.LOAD: 'LOAD'>
LOCK = <TokenType.LOCK: 'LOCK'>
MAP = <TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MEMBER_OF = <TokenType.MEMBER_OF: 'MEMBER_OF'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
MODEL = <TokenType.MODEL: 'MODEL'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
OBJECT_IDENTIFIER = <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
OPERATOR = <TokenType.OPERATOR: 'OPERATOR'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUTER = <TokenType.OUTER: 'OUTER'>
OVER = <TokenType.OVER: 'OVER'>
OVERLAPS = <TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA = <TokenType.PRAGMA: 'PRAGMA'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REFRESH = <TokenType.REFRESH: 'REFRESH'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SETTINGS = <TokenType.SETTINGS: 'SETTINGS'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
SORT_BY = <TokenType.SORT_BY: 'SORT_BY'>
START_WITH = <TokenType.START_WITH: 'START_WITH'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRUE = <TokenType.TRUE: 'TRUE'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VIEW = <TokenType.VIEW: 'VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
VERSION_SNAPSHOT = <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>
TIMESTAMP_SNAPSHOT = <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>
Inherited Members
enum.Enum
name
value
class Token:
370class Token:
371    __slots__ = ("token_type", "text", "line", "col", "start", "end", "comments")
372
373    @classmethod
374    def number(cls, number: int) -> Token:
375        """Returns a NUMBER token with `number` as its text."""
376        return cls(TokenType.NUMBER, str(number))
377
378    @classmethod
379    def string(cls, string: str) -> Token:
380        """Returns a STRING token with `string` as its text."""
381        return cls(TokenType.STRING, string)
382
383    @classmethod
384    def identifier(cls, identifier: str) -> Token:
385        """Returns an IDENTIFIER token with `identifier` as its text."""
386        return cls(TokenType.IDENTIFIER, identifier)
387
388    @classmethod
389    def var(cls, var: str) -> Token:
390        """Returns an VAR token with `var` as its text."""
391        return cls(TokenType.VAR, var)
392
393    def __init__(
394        self,
395        token_type: TokenType,
396        text: str,
397        line: int = 1,
398        col: int = 1,
399        start: int = 0,
400        end: int = 0,
401        comments: t.Optional[t.List[str]] = None,
402    ) -> None:
403        """Token initializer.
404
405        Args:
406            token_type: The TokenType Enum.
407            text: The text of the token.
408            line: The line that the token ends on.
409            col: The column that the token ends on.
410            start: The start index of the token.
411            end: The ending index of the token.
412            comments: The comments to attach to the token.
413        """
414        self.token_type = token_type
415        self.text = text
416        self.line = line
417        self.col = col
418        self.start = start
419        self.end = end
420        self.comments = [] if comments is None else comments
421
422    def __repr__(self) -> str:
423        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
424        return f"<Token {attributes}>"
Token( token_type: TokenType, text: str, line: int = 1, col: int = 1, start: int = 0, end: int = 0, comments: Optional[List[str]] = None)
393    def __init__(
394        self,
395        token_type: TokenType,
396        text: str,
397        line: int = 1,
398        col: int = 1,
399        start: int = 0,
400        end: int = 0,
401        comments: t.Optional[t.List[str]] = None,
402    ) -> None:
403        """Token initializer.
404
405        Args:
406            token_type: The TokenType Enum.
407            text: The text of the token.
408            line: The line that the token ends on.
409            col: The column that the token ends on.
410            start: The start index of the token.
411            end: The ending index of the token.
412            comments: The comments to attach to the token.
413        """
414        self.token_type = token_type
415        self.text = text
416        self.line = line
417        self.col = col
418        self.start = start
419        self.end = end
420        self.comments = [] if comments is None else comments

Token initializer.

Arguments:
  • token_type: The TokenType Enum.
  • text: The text of the token.
  • line: The line that the token ends on.
  • col: The column that the token ends on.
  • start: The start index of the token.
  • end: The ending index of the token.
  • comments: The comments to attach to the token.
@classmethod
def number(cls, number: int) -> Token:
373    @classmethod
374    def number(cls, number: int) -> Token:
375        """Returns a NUMBER token with `number` as its text."""
376        return cls(TokenType.NUMBER, str(number))

Returns a NUMBER token with number as its text.

@classmethod
def string(cls, string: str) -> Token:
378    @classmethod
379    def string(cls, string: str) -> Token:
380        """Returns a STRING token with `string` as its text."""
381        return cls(TokenType.STRING, string)

Returns a STRING token with string as its text.

@classmethod
def identifier(cls, identifier: str) -> Token:
383    @classmethod
384    def identifier(cls, identifier: str) -> Token:
385        """Returns an IDENTIFIER token with `identifier` as its text."""
386        return cls(TokenType.IDENTIFIER, identifier)

Returns an IDENTIFIER token with identifier as its text.

@classmethod
def var(cls, var: str) -> Token:
388    @classmethod
389    def var(cls, var: str) -> Token:
390        """Returns an VAR token with `var` as its text."""
391        return cls(TokenType.VAR, var)

Returns an VAR token with var as its text.

token_type
text
line
col
start
end
comments
class Tokenizer:
 522class Tokenizer(metaclass=_Tokenizer):
 523    SINGLE_TOKENS = {
 524        "(": TokenType.L_PAREN,
 525        ")": TokenType.R_PAREN,
 526        "[": TokenType.L_BRACKET,
 527        "]": TokenType.R_BRACKET,
 528        "{": TokenType.L_BRACE,
 529        "}": TokenType.R_BRACE,
 530        "&": TokenType.AMP,
 531        "^": TokenType.CARET,
 532        ":": TokenType.COLON,
 533        ",": TokenType.COMMA,
 534        ".": TokenType.DOT,
 535        "-": TokenType.DASH,
 536        "=": TokenType.EQ,
 537        ">": TokenType.GT,
 538        "<": TokenType.LT,
 539        "%": TokenType.MOD,
 540        "!": TokenType.NOT,
 541        "|": TokenType.PIPE,
 542        "+": TokenType.PLUS,
 543        ";": TokenType.SEMICOLON,
 544        "/": TokenType.SLASH,
 545        "\\": TokenType.BACKSLASH,
 546        "*": TokenType.STAR,
 547        "~": TokenType.TILDA,
 548        "?": TokenType.PLACEHOLDER,
 549        "@": TokenType.PARAMETER,
 550        # used for breaking a var like x'y' but nothing else
 551        # the token type doesn't matter
 552        "'": TokenType.QUOTE,
 553        "`": TokenType.IDENTIFIER,
 554        '"': TokenType.IDENTIFIER,
 555        "#": TokenType.HASH,
 556    }
 557
 558    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 559    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 560    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 561    RAW_STRINGS: t.List[str | t.Tuple[str, str]] = []
 562    HEREDOC_STRINGS: t.List[str | t.Tuple[str, str]] = []
 563    UNICODE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 564    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 565    IDENTIFIER_ESCAPES = ['"']
 566    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 567    STRING_ESCAPES = ["'"]
 568    VAR_SINGLE_TOKENS: t.Set[str] = set()
 569
 570    # Autofilled
 571    _COMMENTS: t.Dict[str, str] = {}
 572    _FORMAT_STRINGS: t.Dict[str, t.Tuple[str, TokenType]] = {}
 573    _IDENTIFIERS: t.Dict[str, str] = {}
 574    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 575    _QUOTES: t.Dict[str, str] = {}
 576    _STRING_ESCAPES: t.Set[str] = set()
 577    _KEYWORD_TRIE: t.Dict = {}
 578    _RS_TOKENIZER: t.Optional[t.Any] = None
 579
 580    KEYWORDS: t.Dict[str, TokenType] = {
 581        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 582        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 583        **{f"{{{{{postfix}": TokenType.BLOCK_START for postfix in ("+", "-")},
 584        **{f"{prefix}}}}}": TokenType.BLOCK_END for prefix in ("+", "-")},
 585        "/*+": TokenType.HINT,
 586        "==": TokenType.EQ,
 587        "::": TokenType.DCOLON,
 588        "||": TokenType.DPIPE,
 589        ">=": TokenType.GTE,
 590        "<=": TokenType.LTE,
 591        "<>": TokenType.NEQ,
 592        "!=": TokenType.NEQ,
 593        ":=": TokenType.COLON_EQ,
 594        "<=>": TokenType.NULLSAFE_EQ,
 595        "->": TokenType.ARROW,
 596        "->>": TokenType.DARROW,
 597        "=>": TokenType.FARROW,
 598        "#>": TokenType.HASH_ARROW,
 599        "#>>": TokenType.DHASH_ARROW,
 600        "<->": TokenType.LR_ARROW,
 601        "&&": TokenType.DAMP,
 602        "??": TokenType.DQMARK,
 603        "ALL": TokenType.ALL,
 604        "ALWAYS": TokenType.ALWAYS,
 605        "AND": TokenType.AND,
 606        "ANTI": TokenType.ANTI,
 607        "ANY": TokenType.ANY,
 608        "ASC": TokenType.ASC,
 609        "AS": TokenType.ALIAS,
 610        "ASOF": TokenType.ASOF,
 611        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 612        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 613        "BEGIN": TokenType.BEGIN,
 614        "BETWEEN": TokenType.BETWEEN,
 615        "CACHE": TokenType.CACHE,
 616        "UNCACHE": TokenType.UNCACHE,
 617        "CASE": TokenType.CASE,
 618        "CHARACTER SET": TokenType.CHARACTER_SET,
 619        "CLUSTER BY": TokenType.CLUSTER_BY,
 620        "COLLATE": TokenType.COLLATE,
 621        "COLUMN": TokenType.COLUMN,
 622        "COMMIT": TokenType.COMMIT,
 623        "CONNECT BY": TokenType.CONNECT_BY,
 624        "CONSTRAINT": TokenType.CONSTRAINT,
 625        "CREATE": TokenType.CREATE,
 626        "CROSS": TokenType.CROSS,
 627        "CUBE": TokenType.CUBE,
 628        "CURRENT_DATE": TokenType.CURRENT_DATE,
 629        "CURRENT_TIME": TokenType.CURRENT_TIME,
 630        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 631        "CURRENT_USER": TokenType.CURRENT_USER,
 632        "DATABASE": TokenType.DATABASE,
 633        "DEFAULT": TokenType.DEFAULT,
 634        "DELETE": TokenType.DELETE,
 635        "DESC": TokenType.DESC,
 636        "DESCRIBE": TokenType.DESCRIBE,
 637        "DISTINCT": TokenType.DISTINCT,
 638        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 639        "DIV": TokenType.DIV,
 640        "DROP": TokenType.DROP,
 641        "ELSE": TokenType.ELSE,
 642        "END": TokenType.END,
 643        "ESCAPE": TokenType.ESCAPE,
 644        "EXCEPT": TokenType.EXCEPT,
 645        "EXECUTE": TokenType.EXECUTE,
 646        "EXISTS": TokenType.EXISTS,
 647        "FALSE": TokenType.FALSE,
 648        "FETCH": TokenType.FETCH,
 649        "FILTER": TokenType.FILTER,
 650        "FIRST": TokenType.FIRST,
 651        "FULL": TokenType.FULL,
 652        "FUNCTION": TokenType.FUNCTION,
 653        "FOR": TokenType.FOR,
 654        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 655        "FORMAT": TokenType.FORMAT,
 656        "FROM": TokenType.FROM,
 657        "GEOGRAPHY": TokenType.GEOGRAPHY,
 658        "GEOMETRY": TokenType.GEOMETRY,
 659        "GLOB": TokenType.GLOB,
 660        "GROUP BY": TokenType.GROUP_BY,
 661        "GROUPING SETS": TokenType.GROUPING_SETS,
 662        "HAVING": TokenType.HAVING,
 663        "ILIKE": TokenType.ILIKE,
 664        "IN": TokenType.IN,
 665        "INDEX": TokenType.INDEX,
 666        "INET": TokenType.INET,
 667        "INNER": TokenType.INNER,
 668        "INSERT": TokenType.INSERT,
 669        "INTERVAL": TokenType.INTERVAL,
 670        "INTERSECT": TokenType.INTERSECT,
 671        "INTO": TokenType.INTO,
 672        "IS": TokenType.IS,
 673        "ISNULL": TokenType.ISNULL,
 674        "JOIN": TokenType.JOIN,
 675        "KEEP": TokenType.KEEP,
 676        "KILL": TokenType.KILL,
 677        "LATERAL": TokenType.LATERAL,
 678        "LEFT": TokenType.LEFT,
 679        "LIKE": TokenType.LIKE,
 680        "LIMIT": TokenType.LIMIT,
 681        "LOAD": TokenType.LOAD,
 682        "LOCK": TokenType.LOCK,
 683        "MERGE": TokenType.MERGE,
 684        "NATURAL": TokenType.NATURAL,
 685        "NEXT": TokenType.NEXT,
 686        "NOT": TokenType.NOT,
 687        "NOTNULL": TokenType.NOTNULL,
 688        "NULL": TokenType.NULL,
 689        "OBJECT": TokenType.OBJECT,
 690        "OFFSET": TokenType.OFFSET,
 691        "ON": TokenType.ON,
 692        "OR": TokenType.OR,
 693        "XOR": TokenType.XOR,
 694        "ORDER BY": TokenType.ORDER_BY,
 695        "ORDINALITY": TokenType.ORDINALITY,
 696        "OUTER": TokenType.OUTER,
 697        "OVER": TokenType.OVER,
 698        "OVERLAPS": TokenType.OVERLAPS,
 699        "OVERWRITE": TokenType.OVERWRITE,
 700        "PARTITION": TokenType.PARTITION,
 701        "PARTITION BY": TokenType.PARTITION_BY,
 702        "PARTITIONED BY": TokenType.PARTITION_BY,
 703        "PARTITIONED_BY": TokenType.PARTITION_BY,
 704        "PERCENT": TokenType.PERCENT,
 705        "PIVOT": TokenType.PIVOT,
 706        "PRAGMA": TokenType.PRAGMA,
 707        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 708        "PROCEDURE": TokenType.PROCEDURE,
 709        "QUALIFY": TokenType.QUALIFY,
 710        "RANGE": TokenType.RANGE,
 711        "RECURSIVE": TokenType.RECURSIVE,
 712        "REGEXP": TokenType.RLIKE,
 713        "REPLACE": TokenType.REPLACE,
 714        "RETURNING": TokenType.RETURNING,
 715        "REFERENCES": TokenType.REFERENCES,
 716        "RIGHT": TokenType.RIGHT,
 717        "RLIKE": TokenType.RLIKE,
 718        "ROLLBACK": TokenType.ROLLBACK,
 719        "ROLLUP": TokenType.ROLLUP,
 720        "ROW": TokenType.ROW,
 721        "ROWS": TokenType.ROWS,
 722        "SCHEMA": TokenType.SCHEMA,
 723        "SELECT": TokenType.SELECT,
 724        "SEMI": TokenType.SEMI,
 725        "SET": TokenType.SET,
 726        "SETTINGS": TokenType.SETTINGS,
 727        "SHOW": TokenType.SHOW,
 728        "SIMILAR TO": TokenType.SIMILAR_TO,
 729        "SOME": TokenType.SOME,
 730        "SORT BY": TokenType.SORT_BY,
 731        "START WITH": TokenType.START_WITH,
 732        "TABLE": TokenType.TABLE,
 733        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 734        "TEMP": TokenType.TEMPORARY,
 735        "TEMPORARY": TokenType.TEMPORARY,
 736        "THEN": TokenType.THEN,
 737        "TRUE": TokenType.TRUE,
 738        "UNION": TokenType.UNION,
 739        "UNKNOWN": TokenType.UNKNOWN,
 740        "UNNEST": TokenType.UNNEST,
 741        "UNPIVOT": TokenType.UNPIVOT,
 742        "UPDATE": TokenType.UPDATE,
 743        "USE": TokenType.USE,
 744        "USING": TokenType.USING,
 745        "UUID": TokenType.UUID,
 746        "VALUES": TokenType.VALUES,
 747        "VIEW": TokenType.VIEW,
 748        "VOLATILE": TokenType.VOLATILE,
 749        "WHEN": TokenType.WHEN,
 750        "WHERE": TokenType.WHERE,
 751        "WINDOW": TokenType.WINDOW,
 752        "WITH": TokenType.WITH,
 753        "APPLY": TokenType.APPLY,
 754        "ARRAY": TokenType.ARRAY,
 755        "BIT": TokenType.BIT,
 756        "BOOL": TokenType.BOOLEAN,
 757        "BOOLEAN": TokenType.BOOLEAN,
 758        "BYTE": TokenType.TINYINT,
 759        "MEDIUMINT": TokenType.MEDIUMINT,
 760        "INT1": TokenType.TINYINT,
 761        "TINYINT": TokenType.TINYINT,
 762        "INT16": TokenType.SMALLINT,
 763        "SHORT": TokenType.SMALLINT,
 764        "SMALLINT": TokenType.SMALLINT,
 765        "INT128": TokenType.INT128,
 766        "HUGEINT": TokenType.INT128,
 767        "INT2": TokenType.SMALLINT,
 768        "INTEGER": TokenType.INT,
 769        "INT": TokenType.INT,
 770        "INT4": TokenType.INT,
 771        "INT32": TokenType.INT,
 772        "INT64": TokenType.BIGINT,
 773        "LONG": TokenType.BIGINT,
 774        "BIGINT": TokenType.BIGINT,
 775        "INT8": TokenType.TINYINT,
 776        "DEC": TokenType.DECIMAL,
 777        "DECIMAL": TokenType.DECIMAL,
 778        "BIGDECIMAL": TokenType.BIGDECIMAL,
 779        "BIGNUMERIC": TokenType.BIGDECIMAL,
 780        "MAP": TokenType.MAP,
 781        "NULLABLE": TokenType.NULLABLE,
 782        "NUMBER": TokenType.DECIMAL,
 783        "NUMERIC": TokenType.DECIMAL,
 784        "FIXED": TokenType.DECIMAL,
 785        "REAL": TokenType.FLOAT,
 786        "FLOAT": TokenType.FLOAT,
 787        "FLOAT4": TokenType.FLOAT,
 788        "FLOAT8": TokenType.DOUBLE,
 789        "DOUBLE": TokenType.DOUBLE,
 790        "DOUBLE PRECISION": TokenType.DOUBLE,
 791        "JSON": TokenType.JSON,
 792        "CHAR": TokenType.CHAR,
 793        "CHARACTER": TokenType.CHAR,
 794        "NCHAR": TokenType.NCHAR,
 795        "VARCHAR": TokenType.VARCHAR,
 796        "VARCHAR2": TokenType.VARCHAR,
 797        "NVARCHAR": TokenType.NVARCHAR,
 798        "NVARCHAR2": TokenType.NVARCHAR,
 799        "STR": TokenType.TEXT,
 800        "STRING": TokenType.TEXT,
 801        "TEXT": TokenType.TEXT,
 802        "LONGTEXT": TokenType.LONGTEXT,
 803        "MEDIUMTEXT": TokenType.MEDIUMTEXT,
 804        "TINYTEXT": TokenType.TINYTEXT,
 805        "CLOB": TokenType.TEXT,
 806        "LONGVARCHAR": TokenType.TEXT,
 807        "BINARY": TokenType.BINARY,
 808        "BLOB": TokenType.VARBINARY,
 809        "LONGBLOB": TokenType.LONGBLOB,
 810        "MEDIUMBLOB": TokenType.MEDIUMBLOB,
 811        "TINYBLOB": TokenType.TINYBLOB,
 812        "BYTEA": TokenType.VARBINARY,
 813        "VARBINARY": TokenType.VARBINARY,
 814        "TIME": TokenType.TIME,
 815        "TIMETZ": TokenType.TIMETZ,
 816        "TIMESTAMP": TokenType.TIMESTAMP,
 817        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 818        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 819        "DATE": TokenType.DATE,
 820        "DATETIME": TokenType.DATETIME,
 821        "INT4RANGE": TokenType.INT4RANGE,
 822        "INT4MULTIRANGE": TokenType.INT4MULTIRANGE,
 823        "INT8RANGE": TokenType.INT8RANGE,
 824        "INT8MULTIRANGE": TokenType.INT8MULTIRANGE,
 825        "NUMRANGE": TokenType.NUMRANGE,
 826        "NUMMULTIRANGE": TokenType.NUMMULTIRANGE,
 827        "TSRANGE": TokenType.TSRANGE,
 828        "TSMULTIRANGE": TokenType.TSMULTIRANGE,
 829        "TSTZRANGE": TokenType.TSTZRANGE,
 830        "TSTZMULTIRANGE": TokenType.TSTZMULTIRANGE,
 831        "DATERANGE": TokenType.DATERANGE,
 832        "DATEMULTIRANGE": TokenType.DATEMULTIRANGE,
 833        "UNIQUE": TokenType.UNIQUE,
 834        "STRUCT": TokenType.STRUCT,
 835        "VARIANT": TokenType.VARIANT,
 836        "ALTER": TokenType.ALTER,
 837        "ANALYZE": TokenType.COMMAND,
 838        "CALL": TokenType.COMMAND,
 839        "COMMENT": TokenType.COMMENT,
 840        "COPY": TokenType.COMMAND,
 841        "EXPLAIN": TokenType.COMMAND,
 842        "GRANT": TokenType.COMMAND,
 843        "OPTIMIZE": TokenType.COMMAND,
 844        "PREPARE": TokenType.COMMAND,
 845        "TRUNCATE": TokenType.COMMAND,
 846        "VACUUM": TokenType.COMMAND,
 847        "USER-DEFINED": TokenType.USERDEFINED,
 848        "FOR VERSION": TokenType.VERSION_SNAPSHOT,
 849        "FOR TIMESTAMP": TokenType.TIMESTAMP_SNAPSHOT,
 850    }
 851
 852    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 853        " ": TokenType.SPACE,
 854        "\t": TokenType.SPACE,
 855        "\n": TokenType.BREAK,
 856        "\r": TokenType.BREAK,
 857    }
 858
 859    COMMANDS = {
 860        TokenType.COMMAND,
 861        TokenType.EXECUTE,
 862        TokenType.FETCH,
 863        TokenType.SHOW,
 864    }
 865
 866    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 867
 868    # handle numeric literals like in hive (3L = BIGINT)
 869    NUMERIC_LITERALS: t.Dict[str, str] = {}
 870
 871    COMMENTS = ["--", ("/*", "*/")]
 872
 873    __slots__ = (
 874        "sql",
 875        "size",
 876        "tokens",
 877        "dialect",
 878        "_start",
 879        "_current",
 880        "_line",
 881        "_col",
 882        "_comments",
 883        "_char",
 884        "_end",
 885        "_peek",
 886        "_prev_token_line",
 887        "_rs_dialect_settings",
 888    )
 889
 890    def __init__(self, dialect: DialectType = None) -> None:
 891        from sqlglot.dialects import Dialect
 892
 893        self.dialect = Dialect.get_or_raise(dialect)
 894
 895        if USE_RS_TOKENIZER:
 896            self._rs_dialect_settings = RsTokenizerDialectSettings(
 897                escape_sequences=self.dialect.ESCAPE_SEQUENCES,
 898                identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
 899            )
 900
 901        self.reset()
 902
 903    def reset(self) -> None:
 904        self.sql = ""
 905        self.size = 0
 906        self.tokens: t.List[Token] = []
 907        self._start = 0
 908        self._current = 0
 909        self._line = 1
 910        self._col = 0
 911        self._comments: t.List[str] = []
 912
 913        self._char = ""
 914        self._end = False
 915        self._peek = ""
 916        self._prev_token_line = -1
 917
 918    def tokenize(self, sql: str) -> t.List[Token]:
 919        """Returns a list of tokens corresponding to the SQL string `sql`."""
 920        if USE_RS_TOKENIZER:
 921            return self.tokenize_rs(sql)
 922
 923        self.reset()
 924        self.sql = sql
 925        self.size = len(sql)
 926
 927        try:
 928            self._scan()
 929        except Exception as e:
 930            start = max(self._current - 50, 0)
 931            end = min(self._current + 50, self.size - 1)
 932            context = self.sql[start:end]
 933            raise TokenError(f"Error tokenizing '{context}'") from e
 934
 935        return self.tokens
 936
 937    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 938        while self.size and not self._end:
 939            current = self._current
 940
 941            # skip spaces inline rather than iteratively call advance()
 942            # for performance reasons
 943            while current < self.size:
 944                char = self.sql[current]
 945
 946                if char.isspace() and (char == " " or char == "\t"):
 947                    current += 1
 948                else:
 949                    break
 950
 951            n = current - self._current
 952            self._start = current
 953            self._advance(n if n > 1 else 1)
 954
 955            if self._char is None:
 956                break
 957
 958            if not self._char.isspace():
 959                if self._char.isdigit():
 960                    self._scan_number()
 961                elif self._char in self._IDENTIFIERS:
 962                    self._scan_identifier(self._IDENTIFIERS[self._char])
 963                else:
 964                    self._scan_keywords()
 965
 966            if until and until():
 967                break
 968
 969        if self.tokens and self._comments:
 970            self.tokens[-1].comments.extend(self._comments)
 971
 972    def _chars(self, size: int) -> str:
 973        if size == 1:
 974            return self._char
 975
 976        start = self._current - 1
 977        end = start + size
 978
 979        return self.sql[start:end] if end <= self.size else ""
 980
 981    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 982        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 983            # Ensures we don't count an extra line if we get a \r\n line break sequence
 984            if self._char == "\r" and self._peek == "\n":
 985                i = 2
 986                self._start += 1
 987
 988            self._col = 1
 989            self._line += 1
 990        else:
 991            self._col += i
 992
 993        self._current += i
 994        self._end = self._current >= self.size
 995        self._char = self.sql[self._current - 1]
 996        self._peek = "" if self._end else self.sql[self._current]
 997
 998        if alnum and self._char.isalnum():
 999            # Here we use local variables instead of attributes for better performance
1000            _col = self._col
1001            _current = self._current
1002            _end = self._end
1003            _peek = self._peek
1004
1005            while _peek.isalnum():
1006                _col += 1
1007                _current += 1
1008                _end = _current >= self.size
1009                _peek = "" if _end else self.sql[_current]
1010
1011            self._col = _col
1012            self._current = _current
1013            self._end = _end
1014            self._peek = _peek
1015            self._char = self.sql[_current - 1]
1016
1017    @property
1018    def _text(self) -> str:
1019        return self.sql[self._start : self._current]
1020
1021    def peek(self, i: int = 0) -> str:
1022        i = self._current + i
1023        if i < self.size:
1024            return self.sql[i]
1025        return ""
1026
1027    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
1028        self._prev_token_line = self._line
1029
1030        if self._comments and token_type == TokenType.SEMICOLON and self.tokens:
1031            self.tokens[-1].comments.extend(self._comments)
1032            self._comments = []
1033
1034        self.tokens.append(
1035            Token(
1036                token_type,
1037                text=self._text if text is None else text,
1038                line=self._line,
1039                col=self._col,
1040                start=self._start,
1041                end=self._current - 1,
1042                comments=self._comments,
1043            )
1044        )
1045        self._comments = []
1046
1047        # If we have either a semicolon or a begin token before the command's token, we'll parse
1048        # whatever follows the command's token as a string
1049        if (
1050            token_type in self.COMMANDS
1051            and self._peek != ";"
1052            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
1053        ):
1054            start = self._current
1055            tokens = len(self.tokens)
1056            self._scan(lambda: self._peek == ";")
1057            self.tokens = self.tokens[:tokens]
1058            text = self.sql[start : self._current].strip()
1059            if text:
1060                self._add(TokenType.STRING, text)
1061
1062    def _scan_keywords(self) -> None:
1063        size = 0
1064        word = None
1065        chars = self._text
1066        char = chars
1067        prev_space = False
1068        skip = False
1069        trie = self._KEYWORD_TRIE
1070        single_token = char in self.SINGLE_TOKENS
1071
1072        while chars:
1073            if skip:
1074                result = TrieResult.PREFIX
1075            else:
1076                result, trie = in_trie(trie, char.upper())
1077
1078            if result == TrieResult.FAILED:
1079                break
1080            if result == TrieResult.EXISTS:
1081                word = chars
1082
1083            end = self._current + size
1084            size += 1
1085
1086            if end < self.size:
1087                char = self.sql[end]
1088                single_token = single_token or char in self.SINGLE_TOKENS
1089                is_space = char.isspace()
1090
1091                if not is_space or not prev_space:
1092                    if is_space:
1093                        char = " "
1094                    chars += char
1095                    prev_space = is_space
1096                    skip = False
1097                else:
1098                    skip = True
1099            else:
1100                char = ""
1101                break
1102
1103        if word:
1104            if self._scan_string(word):
1105                return
1106            if self._scan_comment(word):
1107                return
1108            if prev_space or single_token or not char:
1109                self._advance(size - 1)
1110                word = word.upper()
1111                self._add(self.KEYWORDS[word], text=word)
1112                return
1113
1114        if self._char in self.SINGLE_TOKENS:
1115            self._add(self.SINGLE_TOKENS[self._char], text=self._char)
1116            return
1117
1118        self._scan_var()
1119
1120    def _scan_comment(self, comment_start: str) -> bool:
1121        if comment_start not in self._COMMENTS:
1122            return False
1123
1124        comment_start_line = self._line
1125        comment_start_size = len(comment_start)
1126        comment_end = self._COMMENTS[comment_start]
1127
1128        if comment_end:
1129            # Skip the comment's start delimiter
1130            self._advance(comment_start_size)
1131
1132            comment_end_size = len(comment_end)
1133            while not self._end and self._chars(comment_end_size) != comment_end:
1134                self._advance(alnum=True)
1135
1136            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
1137            self._advance(comment_end_size - 1)
1138        else:
1139            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
1140                self._advance(alnum=True)
1141            self._comments.append(self._text[comment_start_size:])
1142
1143        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1144        # Multiple consecutive comments are preserved by appending them to the current comments list.
1145        if comment_start_line == self._prev_token_line:
1146            self.tokens[-1].comments.extend(self._comments)
1147            self._comments = []
1148            self._prev_token_line = self._line
1149
1150        return True
1151
1152    def _scan_number(self) -> None:
1153        if self._char == "0":
1154            peek = self._peek.upper()
1155            if peek == "B":
1156                return self._scan_bits() if self.BIT_STRINGS else self._add(TokenType.NUMBER)
1157            elif peek == "X":
1158                return self._scan_hex() if self.HEX_STRINGS else self._add(TokenType.NUMBER)
1159
1160        decimal = False
1161        scientific = 0
1162
1163        while True:
1164            if self._peek.isdigit():
1165                self._advance()
1166            elif self._peek == "." and not decimal:
1167                after = self.peek(1)
1168                if after.isdigit() or not after.isalpha():
1169                    decimal = True
1170                    self._advance()
1171                else:
1172                    return self._add(TokenType.VAR)
1173            elif self._peek in ("-", "+") and scientific == 1:
1174                scientific += 1
1175                self._advance()
1176            elif self._peek.upper() == "E" and not scientific:
1177                scientific += 1
1178                self._advance()
1179            elif self._peek.isidentifier():
1180                number_text = self._text
1181                literal = ""
1182
1183                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1184                    literal += self._peek
1185                    self._advance()
1186
1187                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal.upper(), ""))
1188
1189                if token_type:
1190                    self._add(TokenType.NUMBER, number_text)
1191                    self._add(TokenType.DCOLON, "::")
1192                    return self._add(token_type, literal)
1193                elif self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT:
1194                    return self._add(TokenType.VAR)
1195
1196                self._advance(-len(literal))
1197                return self._add(TokenType.NUMBER, number_text)
1198            else:
1199                return self._add(TokenType.NUMBER)
1200
1201    def _scan_bits(self) -> None:
1202        self._advance()
1203        value = self._extract_value()
1204        try:
1205            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1206            int(value, 2)
1207            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1208        except ValueError:
1209            self._add(TokenType.IDENTIFIER)
1210
1211    def _scan_hex(self) -> None:
1212        self._advance()
1213        value = self._extract_value()
1214        try:
1215            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1216            int(value, 16)
1217            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1218        except ValueError:
1219            self._add(TokenType.IDENTIFIER)
1220
1221    def _extract_value(self) -> str:
1222        while True:
1223            char = self._peek.strip()
1224            if char and char not in self.SINGLE_TOKENS:
1225                self._advance(alnum=True)
1226            else:
1227                break
1228
1229        return self._text
1230
1231    def _scan_string(self, start: str) -> bool:
1232        base = None
1233        token_type = TokenType.STRING
1234
1235        if start in self._QUOTES:
1236            end = self._QUOTES[start]
1237        elif start in self._FORMAT_STRINGS:
1238            end, token_type = self._FORMAT_STRINGS[start]
1239
1240            if token_type == TokenType.HEX_STRING:
1241                base = 16
1242            elif token_type == TokenType.BIT_STRING:
1243                base = 2
1244            elif token_type == TokenType.HEREDOC_STRING:
1245                self._advance()
1246                tag = "" if self._char == end else self._extract_string(end)
1247                end = f"{start}{tag}{end}"
1248        else:
1249            return False
1250
1251        self._advance(len(start))
1252        text = self._extract_string(end)
1253
1254        if base:
1255            try:
1256                int(text, base)
1257            except:
1258                raise TokenError(
1259                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1260                )
1261
1262        self._add(token_type, text)
1263        return True
1264
1265    def _scan_identifier(self, identifier_end: str) -> None:
1266        self._advance()
1267        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1268        self._add(TokenType.IDENTIFIER, text)
1269
1270    def _scan_var(self) -> None:
1271        while True:
1272            char = self._peek.strip()
1273            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1274                self._advance(alnum=True)
1275            else:
1276                break
1277
1278        self._add(
1279            TokenType.VAR
1280            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1281            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1282        )
1283
1284    def _extract_string(self, delimiter: str, escapes=None) -> str:
1285        text = ""
1286        delim_size = len(delimiter)
1287        escapes = self._STRING_ESCAPES if escapes is None else escapes
1288
1289        while True:
1290            if (
1291                self._char in escapes
1292                and (self._peek == delimiter or self._peek in escapes)
1293                and (self._char not in self._QUOTES or self._char == self._peek)
1294            ):
1295                if self._peek == delimiter:
1296                    text += self._peek
1297                else:
1298                    text += self._char + self._peek
1299
1300                if self._current + 1 < self.size:
1301                    self._advance(2)
1302                else:
1303                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._current}")
1304            else:
1305                if self._chars(delim_size) == delimiter:
1306                    if delim_size > 1:
1307                        self._advance(delim_size - 1)
1308                    break
1309
1310                if self._end:
1311                    raise TokenError(f"Missing {delimiter} from {self._line}:{self._start}")
1312
1313                if (
1314                    self.dialect.ESCAPE_SEQUENCES
1315                    and self._peek
1316                    and self._char in self.STRING_ESCAPES
1317                ):
1318                    escaped_sequence = self.dialect.ESCAPE_SEQUENCES.get(self._char + self._peek)
1319                    if escaped_sequence:
1320                        self._advance(2)
1321                        text += escaped_sequence
1322                        continue
1323
1324                current = self._current - 1
1325                self._advance(alnum=True)
1326                text += self.sql[current : self._current - 1]
1327
1328        return text
1329
1330    def tokenize_rs(self, sql: str) -> t.List[Token]:
1331        if not self._RS_TOKENIZER:
1332            raise SqlglotError("Rust tokenizer is not available")
1333
1334        try:
1335            tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings)
1336            for token in tokens:
1337                token.token_type = _ALL_TOKEN_TYPES[token.token_type_index]
1338            return tokens
1339        except Exception as e:
1340            raise TokenError(str(e))
Tokenizer( dialect: Union[str, sqlglot.dialects.dialect.Dialect, Type[sqlglot.dialects.dialect.Dialect], NoneType] = None)
890    def __init__(self, dialect: DialectType = None) -> None:
891        from sqlglot.dialects import Dialect
892
893        self.dialect = Dialect.get_or_raise(dialect)
894
895        if USE_RS_TOKENIZER:
896            self._rs_dialect_settings = RsTokenizerDialectSettings(
897                escape_sequences=self.dialect.ESCAPE_SEQUENCES,
898                identifiers_can_start_with_digit=self.dialect.IDENTIFIERS_CAN_START_WITH_DIGIT,
899            )
900
901        self.reset()
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>}
BIT_STRINGS: List[Union[str, Tuple[str, str]]] = []
BYTE_STRINGS: List[Union[str, Tuple[str, str]]] = []
HEX_STRINGS: List[Union[str, Tuple[str, str]]] = []
RAW_STRINGS: List[Union[str, Tuple[str, str]]] = []
HEREDOC_STRINGS: List[Union[str, Tuple[str, str]]] = []
UNICODE_STRINGS: List[Union[str, Tuple[str, str]]] = []
IDENTIFIERS: List[Union[str, Tuple[str, str]]] = ['"']
IDENTIFIER_ESCAPES = ['"']
QUOTES: List[Union[str, Tuple[str, str]]] = ["'"]
STRING_ESCAPES = ["'"]
VAR_SINGLE_TOKENS: Set[str] = set()
KEYWORDS: Dict[str, TokenType] = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
WHITE_SPACE: Dict[Optional[str], TokenType] = {' ': <TokenType.SPACE: 'SPACE'>, '\t': <TokenType.SPACE: 'SPACE'>, '\n': <TokenType.BREAK: 'BREAK'>, '\r': <TokenType.BREAK: 'BREAK'>}
COMMANDS = {<TokenType.FETCH: 'FETCH'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.COMMAND: 'COMMAND'>}
COMMAND_PREFIX_TOKENS = {<TokenType.BEGIN: 'BEGIN'>, <TokenType.SEMICOLON: 'SEMICOLON'>}
NUMERIC_LITERALS: Dict[str, str] = {}
COMMENTS = ['--', ('/*', '*/')]
dialect
def reset(self) -> None:
903    def reset(self) -> None:
904        self.sql = ""
905        self.size = 0
906        self.tokens: t.List[Token] = []
907        self._start = 0
908        self._current = 0
909        self._line = 1
910        self._col = 0
911        self._comments: t.List[str] = []
912
913        self._char = ""
914        self._end = False
915        self._peek = ""
916        self._prev_token_line = -1
def tokenize(self, sql: str) -> List[Token]:
918    def tokenize(self, sql: str) -> t.List[Token]:
919        """Returns a list of tokens corresponding to the SQL string `sql`."""
920        if USE_RS_TOKENIZER:
921            return self.tokenize_rs(sql)
922
923        self.reset()
924        self.sql = sql
925        self.size = len(sql)
926
927        try:
928            self._scan()
929        except Exception as e:
930            start = max(self._current - 50, 0)
931            end = min(self._current + 50, self.size - 1)
932            context = self.sql[start:end]
933            raise TokenError(f"Error tokenizing '{context}'") from e
934
935        return self.tokens

Returns a list of tokens corresponding to the SQL string sql.

def peek(self, i: int = 0) -> str:
1021    def peek(self, i: int = 0) -> str:
1022        i = self._current + i
1023        if i < self.size:
1024            return self.sql[i]
1025        return ""
def tokenize_rs(self, sql: str) -> List[Token]:
1330    def tokenize_rs(self, sql: str) -> t.List[Token]:
1331        if not self._RS_TOKENIZER:
1332            raise SqlglotError("Rust tokenizer is not available")
1333
1334        try:
1335            tokens = self._RS_TOKENIZER.tokenize(sql, self._rs_dialect_settings)
1336            for token in tokens:
1337                token.token_type = _ALL_TOKEN_TYPES[token.token_type_index]
1338            return tokens
1339        except Exception as e:
1340            raise TokenError(str(e))
size
sql
tokens