Edit on GitHub

sqlglot.tokens

   1from __future__ import annotations
   2
   3import typing as t
   4from enum import auto
   5
   6from sqlglot.helper import AutoName
   7from sqlglot.trie import in_trie, new_trie
   8
   9
  10class TokenType(AutoName):
  11    L_PAREN = auto()
  12    R_PAREN = auto()
  13    L_BRACKET = auto()
  14    R_BRACKET = auto()
  15    L_BRACE = auto()
  16    R_BRACE = auto()
  17    COMMA = auto()
  18    DOT = auto()
  19    DASH = auto()
  20    PLUS = auto()
  21    COLON = auto()
  22    DCOLON = auto()
  23    SEMICOLON = auto()
  24    STAR = auto()
  25    BACKSLASH = auto()
  26    SLASH = auto()
  27    LT = auto()
  28    LTE = auto()
  29    GT = auto()
  30    GTE = auto()
  31    NOT = auto()
  32    EQ = auto()
  33    NEQ = auto()
  34    NULLSAFE_EQ = auto()
  35    AND = auto()
  36    OR = auto()
  37    AMP = auto()
  38    DPIPE = auto()
  39    PIPE = auto()
  40    CARET = auto()
  41    TILDA = auto()
  42    ARROW = auto()
  43    DARROW = auto()
  44    FARROW = auto()
  45    HASH = auto()
  46    HASH_ARROW = auto()
  47    DHASH_ARROW = auto()
  48    LR_ARROW = auto()
  49    LT_AT = auto()
  50    AT_GT = auto()
  51    DOLLAR = auto()
  52    PARAMETER = auto()
  53    SESSION_PARAMETER = auto()
  54    NATIONAL = auto()
  55    DAMP = auto()
  56
  57    BLOCK_START = auto()
  58    BLOCK_END = auto()
  59
  60    SPACE = auto()
  61    BREAK = auto()
  62
  63    STRING = auto()
  64    NUMBER = auto()
  65    IDENTIFIER = auto()
  66    DATABASE = auto()
  67    COLUMN = auto()
  68    COLUMN_DEF = auto()
  69    SCHEMA = auto()
  70    TABLE = auto()
  71    VAR = auto()
  72    BIT_STRING = auto()
  73    HEX_STRING = auto()
  74    BYTE_STRING = auto()
  75
  76    # types
  77    BIT = auto()
  78    BOOLEAN = auto()
  79    TINYINT = auto()
  80    UTINYINT = auto()
  81    SMALLINT = auto()
  82    USMALLINT = auto()
  83    INT = auto()
  84    UINT = auto()
  85    BIGINT = auto()
  86    UBIGINT = auto()
  87    INT128 = auto()
  88    UINT128 = auto()
  89    INT256 = auto()
  90    UINT256 = auto()
  91    FLOAT = auto()
  92    DOUBLE = auto()
  93    DECIMAL = auto()
  94    BIGDECIMAL = auto()
  95    CHAR = auto()
  96    NCHAR = auto()
  97    VARCHAR = auto()
  98    NVARCHAR = auto()
  99    TEXT = auto()
 100    MEDIUMTEXT = auto()
 101    LONGTEXT = auto()
 102    MEDIUMBLOB = auto()
 103    LONGBLOB = auto()
 104    BINARY = auto()
 105    VARBINARY = auto()
 106    JSON = auto()
 107    JSONB = auto()
 108    TIME = auto()
 109    TIMESTAMP = auto()
 110    TIMESTAMPTZ = auto()
 111    TIMESTAMPLTZ = auto()
 112    DATETIME = auto()
 113    DATETIME64 = auto()
 114    DATE = auto()
 115    UUID = auto()
 116    GEOGRAPHY = auto()
 117    NULLABLE = auto()
 118    GEOMETRY = auto()
 119    HLLSKETCH = auto()
 120    HSTORE = auto()
 121    SUPER = auto()
 122    SERIAL = auto()
 123    SMALLSERIAL = auto()
 124    BIGSERIAL = auto()
 125    XML = auto()
 126    UNIQUEIDENTIFIER = auto()
 127    MONEY = auto()
 128    SMALLMONEY = auto()
 129    ROWVERSION = auto()
 130    IMAGE = auto()
 131    VARIANT = auto()
 132    OBJECT = auto()
 133    INET = auto()
 134
 135    # keywords
 136    ALIAS = auto()
 137    ALTER = auto()
 138    ALWAYS = auto()
 139    ALL = auto()
 140    ANTI = auto()
 141    ANY = auto()
 142    APPLY = auto()
 143    ARRAY = auto()
 144    ASC = auto()
 145    ASOF = auto()
 146    AT_TIME_ZONE = auto()
 147    AUTO_INCREMENT = auto()
 148    BEGIN = auto()
 149    BETWEEN = auto()
 150    BOTH = auto()
 151    BUCKET = auto()
 152    BY_DEFAULT = auto()
 153    CACHE = auto()
 154    CASCADE = auto()
 155    CASE = auto()
 156    CHARACTER_SET = auto()
 157    CLUSTER_BY = auto()
 158    COLLATE = auto()
 159    COMMAND = auto()
 160    COMMENT = auto()
 161    COMMIT = auto()
 162    COMPOUND = auto()
 163    CONSTRAINT = auto()
 164    CREATE = auto()
 165    CROSS = auto()
 166    CUBE = auto()
 167    CURRENT_DATE = auto()
 168    CURRENT_DATETIME = auto()
 169    CURRENT_ROW = auto()
 170    CURRENT_TIME = auto()
 171    CURRENT_TIMESTAMP = auto()
 172    CURRENT_USER = auto()
 173    DEFAULT = auto()
 174    DELETE = auto()
 175    DESC = auto()
 176    DESCRIBE = auto()
 177    DISTINCT = auto()
 178    DISTINCT_FROM = auto()
 179    DISTRIBUTE_BY = auto()
 180    DIV = auto()
 181    DROP = auto()
 182    ELSE = auto()
 183    END = auto()
 184    ESCAPE = auto()
 185    EXCEPT = auto()
 186    EXECUTE = auto()
 187    EXISTS = auto()
 188    FALSE = auto()
 189    FETCH = auto()
 190    FILTER = auto()
 191    FINAL = auto()
 192    FIRST = auto()
 193    FOLLOWING = auto()
 194    FOR = auto()
 195    FOREIGN_KEY = auto()
 196    FORMAT = auto()
 197    FROM = auto()
 198    FULL = auto()
 199    FUNCTION = auto()
 200    GLOB = auto()
 201    GLOBAL = auto()
 202    GROUP_BY = auto()
 203    GROUPING_SETS = auto()
 204    HAVING = auto()
 205    HINT = auto()
 206    IF = auto()
 207    IGNORE_NULLS = auto()
 208    ILIKE = auto()
 209    ILIKE_ANY = auto()
 210    IN = auto()
 211    INDEX = auto()
 212    INNER = auto()
 213    INSERT = auto()
 214    INTERSECT = auto()
 215    INTERVAL = auto()
 216    INTO = auto()
 217    INTRODUCER = auto()
 218    IRLIKE = auto()
 219    IS = auto()
 220    ISNULL = auto()
 221    JOIN = auto()
 222    JOIN_MARKER = auto()
 223    KEEP = auto()
 224    LANGUAGE = auto()
 225    LATERAL = auto()
 226    LAZY = auto()
 227    LEADING = auto()
 228    LEFT = auto()
 229    LIKE = auto()
 230    LIKE_ANY = auto()
 231    LIMIT = auto()
 232    LOAD_DATA = auto()
 233    LOCAL = auto()
 234    LOCK = auto()
 235    MAP = auto()
 236    MATCH_RECOGNIZE = auto()
 237    MATERIALIZED = auto()
 238    MERGE = auto()
 239    MOD = auto()
 240    NATURAL = auto()
 241    NEXT = auto()
 242    NEXT_VALUE_FOR = auto()
 243    NO_ACTION = auto()
 244    NOTNULL = auto()
 245    NULL = auto()
 246    NULLS_FIRST = auto()
 247    NULLS_LAST = auto()
 248    OFFSET = auto()
 249    ON = auto()
 250    ONLY = auto()
 251    OPTIONS = auto()
 252    ORDER_BY = auto()
 253    ORDERED = auto()
 254    ORDINALITY = auto()
 255    OUTER = auto()
 256    OUT_OF = auto()
 257    OVER = auto()
 258    OVERLAPS = auto()
 259    OVERWRITE = auto()
 260    PARTITION = auto()
 261    PARTITION_BY = auto()
 262    PERCENT = auto()
 263    PIVOT = auto()
 264    PLACEHOLDER = auto()
 265    PRAGMA = auto()
 266    PRECEDING = auto()
 267    PRIMARY_KEY = auto()
 268    PROCEDURE = auto()
 269    PROPERTIES = auto()
 270    PSEUDO_TYPE = auto()
 271    QUALIFY = auto()
 272    QUOTE = auto()
 273    RANGE = auto()
 274    RECURSIVE = auto()
 275    REPLACE = auto()
 276    RESPECT_NULLS = auto()
 277    RETURNING = auto()
 278    REFERENCES = auto()
 279    RIGHT = auto()
 280    RLIKE = auto()
 281    ROLLBACK = auto()
 282    ROLLUP = auto()
 283    ROW = auto()
 284    ROWS = auto()
 285    SEED = auto()
 286    SELECT = auto()
 287    SEMI = auto()
 288    SEPARATOR = auto()
 289    SERDE_PROPERTIES = auto()
 290    SET = auto()
 291    SETTINGS = auto()
 292    SHOW = auto()
 293    SIMILAR_TO = auto()
 294    SOME = auto()
 295    SORTKEY = auto()
 296    SORT_BY = auto()
 297    STRUCT = auto()
 298    TABLE_SAMPLE = auto()
 299    TEMPORARY = auto()
 300    TOP = auto()
 301    THEN = auto()
 302    TRAILING = auto()
 303    TRUE = auto()
 304    UNBOUNDED = auto()
 305    UNCACHE = auto()
 306    UNION = auto()
 307    UNLOGGED = auto()
 308    UNNEST = auto()
 309    UNPIVOT = auto()
 310    UPDATE = auto()
 311    USE = auto()
 312    USING = auto()
 313    VALUES = auto()
 314    VIEW = auto()
 315    VOLATILE = auto()
 316    WHEN = auto()
 317    WHERE = auto()
 318    WINDOW = auto()
 319    WITH = auto()
 320    WITH_TIME_ZONE = auto()
 321    WITH_LOCAL_TIME_ZONE = auto()
 322    WITHIN_GROUP = auto()
 323    WITHOUT_TIME_ZONE = auto()
 324    UNIQUE = auto()
 325
 326
 327class Token:
 328    __slots__ = ("token_type", "text", "line", "col", "end", "comments")
 329
 330    @classmethod
 331    def number(cls, number: int) -> Token:
 332        """Returns a NUMBER token with `number` as its text."""
 333        return cls(TokenType.NUMBER, str(number))
 334
 335    @classmethod
 336    def string(cls, string: str) -> Token:
 337        """Returns a STRING token with `string` as its text."""
 338        return cls(TokenType.STRING, string)
 339
 340    @classmethod
 341    def identifier(cls, identifier: str) -> Token:
 342        """Returns an IDENTIFIER token with `identifier` as its text."""
 343        return cls(TokenType.IDENTIFIER, identifier)
 344
 345    @classmethod
 346    def var(cls, var: str) -> Token:
 347        """Returns an VAR token with `var` as its text."""
 348        return cls(TokenType.VAR, var)
 349
 350    def __init__(
 351        self,
 352        token_type: TokenType,
 353        text: str,
 354        line: int = 1,
 355        col: int = 1,
 356        end: int = 0,
 357        comments: t.List[str] = [],
 358    ) -> None:
 359        self.token_type = token_type
 360        self.text = text
 361        self.line = line
 362        size = len(text)
 363        self.col = col
 364        self.end = end if end else size
 365        self.comments = comments
 366
 367    @property
 368    def start(self) -> int:
 369        """Returns the start of the token."""
 370        return self.end - len(self.text)
 371
 372    def __repr__(self) -> str:
 373        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
 374        return f"<Token {attributes}>"
 375
 376
 377class _Tokenizer(type):
 378    def __new__(cls, clsname, bases, attrs):
 379        klass = super().__new__(cls, clsname, bases, attrs)
 380
 381        klass._QUOTES = {
 382            f"{prefix}{s}": e
 383            for s, e in cls._delimeter_list_to_dict(klass.QUOTES).items()
 384            for prefix in (("",) if s[0].isalpha() else ("", "n", "N"))
 385        }
 386        klass._BIT_STRINGS = cls._delimeter_list_to_dict(klass.BIT_STRINGS)
 387        klass._HEX_STRINGS = cls._delimeter_list_to_dict(klass.HEX_STRINGS)
 388        klass._BYTE_STRINGS = cls._delimeter_list_to_dict(klass.BYTE_STRINGS)
 389        klass._IDENTIFIERS = cls._delimeter_list_to_dict(klass.IDENTIFIERS)
 390        klass._STRING_ESCAPES = set(klass.STRING_ESCAPES)
 391        klass._IDENTIFIER_ESCAPES = set(klass.IDENTIFIER_ESCAPES)
 392        klass._COMMENTS = dict(
 393            (comment, None) if isinstance(comment, str) else (comment[0], comment[1])
 394            for comment in klass.COMMENTS
 395        )
 396
 397        klass.KEYWORD_TRIE = new_trie(
 398            key.upper()
 399            for key in {
 400                **klass.KEYWORDS,
 401                **{comment: TokenType.COMMENT for comment in klass._COMMENTS},
 402                **{quote: TokenType.QUOTE for quote in klass._QUOTES},
 403                **{bit_string: TokenType.BIT_STRING for bit_string in klass._BIT_STRINGS},
 404                **{hex_string: TokenType.HEX_STRING for hex_string in klass._HEX_STRINGS},
 405                **{byte_string: TokenType.BYTE_STRING for byte_string in klass._BYTE_STRINGS},
 406            }
 407            if " " in key or any(single in key for single in klass.SINGLE_TOKENS)
 408        )
 409
 410        return klass
 411
 412    @staticmethod
 413    def _delimeter_list_to_dict(list: t.List[str | t.Tuple[str, str]]) -> t.Dict[str, str]:
 414        return dict((item, item) if isinstance(item, str) else (item[0], item[1]) for item in list)
 415
 416
 417class Tokenizer(metaclass=_Tokenizer):
 418    SINGLE_TOKENS = {
 419        "(": TokenType.L_PAREN,
 420        ")": TokenType.R_PAREN,
 421        "[": TokenType.L_BRACKET,
 422        "]": TokenType.R_BRACKET,
 423        "{": TokenType.L_BRACE,
 424        "}": TokenType.R_BRACE,
 425        "&": TokenType.AMP,
 426        "^": TokenType.CARET,
 427        ":": TokenType.COLON,
 428        ",": TokenType.COMMA,
 429        ".": TokenType.DOT,
 430        "-": TokenType.DASH,
 431        "=": TokenType.EQ,
 432        ">": TokenType.GT,
 433        "<": TokenType.LT,
 434        "%": TokenType.MOD,
 435        "!": TokenType.NOT,
 436        "|": TokenType.PIPE,
 437        "+": TokenType.PLUS,
 438        ";": TokenType.SEMICOLON,
 439        "/": TokenType.SLASH,
 440        "\\": TokenType.BACKSLASH,
 441        "*": TokenType.STAR,
 442        "~": TokenType.TILDA,
 443        "?": TokenType.PLACEHOLDER,
 444        "@": TokenType.PARAMETER,
 445        # used for breaking a var like x'y' but nothing else
 446        # the token type doesn't matter
 447        "'": TokenType.QUOTE,
 448        "`": TokenType.IDENTIFIER,
 449        '"': TokenType.IDENTIFIER,
 450        "#": TokenType.HASH,
 451    }
 452
 453    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 454    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 455    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 456    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 457    IDENTIFIER_ESCAPES = ['"']
 458    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 459    STRING_ESCAPES = ["'"]
 460    VAR_SINGLE_TOKENS: t.Set[str] = set()
 461
 462    _COMMENTS: t.Dict[str, str] = {}
 463    _BIT_STRINGS: t.Dict[str, str] = {}
 464    _BYTE_STRINGS: t.Dict[str, str] = {}
 465    _HEX_STRINGS: t.Dict[str, str] = {}
 466    _IDENTIFIERS: t.Dict[str, str] = {}
 467    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 468    _QUOTES: t.Dict[str, str] = {}
 469    _STRING_ESCAPES: t.Set[str] = set()
 470
 471    KEYWORDS: t.Dict[t.Optional[str], TokenType] = {
 472        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 473        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 474        "{{+": TokenType.BLOCK_START,
 475        "{{-": TokenType.BLOCK_START,
 476        "+}}": TokenType.BLOCK_END,
 477        "-}}": TokenType.BLOCK_END,
 478        "/*+": TokenType.HINT,
 479        "==": TokenType.EQ,
 480        "::": TokenType.DCOLON,
 481        "||": TokenType.DPIPE,
 482        ">=": TokenType.GTE,
 483        "<=": TokenType.LTE,
 484        "<>": TokenType.NEQ,
 485        "!=": TokenType.NEQ,
 486        "<=>": TokenType.NULLSAFE_EQ,
 487        "->": TokenType.ARROW,
 488        "->>": TokenType.DARROW,
 489        "=>": TokenType.FARROW,
 490        "#>": TokenType.HASH_ARROW,
 491        "#>>": TokenType.DHASH_ARROW,
 492        "<->": TokenType.LR_ARROW,
 493        "&&": TokenType.DAMP,
 494        "ALL": TokenType.ALL,
 495        "ALWAYS": TokenType.ALWAYS,
 496        "AND": TokenType.AND,
 497        "ANTI": TokenType.ANTI,
 498        "ANY": TokenType.ANY,
 499        "ASC": TokenType.ASC,
 500        "AS": TokenType.ALIAS,
 501        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
 502        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 503        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 504        "BEGIN": TokenType.BEGIN,
 505        "BETWEEN": TokenType.BETWEEN,
 506        "BOTH": TokenType.BOTH,
 507        "BUCKET": TokenType.BUCKET,
 508        "BY DEFAULT": TokenType.BY_DEFAULT,
 509        "CACHE": TokenType.CACHE,
 510        "UNCACHE": TokenType.UNCACHE,
 511        "CASE": TokenType.CASE,
 512        "CASCADE": TokenType.CASCADE,
 513        "CHARACTER SET": TokenType.CHARACTER_SET,
 514        "CLUSTER BY": TokenType.CLUSTER_BY,
 515        "COLLATE": TokenType.COLLATE,
 516        "COLUMN": TokenType.COLUMN,
 517        "COMMIT": TokenType.COMMIT,
 518        "COMPOUND": TokenType.COMPOUND,
 519        "CONSTRAINT": TokenType.CONSTRAINT,
 520        "CREATE": TokenType.CREATE,
 521        "CROSS": TokenType.CROSS,
 522        "CUBE": TokenType.CUBE,
 523        "CURRENT_DATE": TokenType.CURRENT_DATE,
 524        "CURRENT ROW": TokenType.CURRENT_ROW,
 525        "CURRENT_TIME": TokenType.CURRENT_TIME,
 526        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 527        "CURRENT_USER": TokenType.CURRENT_USER,
 528        "DATABASE": TokenType.DATABASE,
 529        "DEFAULT": TokenType.DEFAULT,
 530        "DELETE": TokenType.DELETE,
 531        "DESC": TokenType.DESC,
 532        "DESCRIBE": TokenType.DESCRIBE,
 533        "DISTINCT": TokenType.DISTINCT,
 534        "DISTINCT FROM": TokenType.DISTINCT_FROM,
 535        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 536        "DIV": TokenType.DIV,
 537        "DROP": TokenType.DROP,
 538        "ELSE": TokenType.ELSE,
 539        "END": TokenType.END,
 540        "ESCAPE": TokenType.ESCAPE,
 541        "EXCEPT": TokenType.EXCEPT,
 542        "EXECUTE": TokenType.EXECUTE,
 543        "EXISTS": TokenType.EXISTS,
 544        "FALSE": TokenType.FALSE,
 545        "FETCH": TokenType.FETCH,
 546        "FILTER": TokenType.FILTER,
 547        "FIRST": TokenType.FIRST,
 548        "FULL": TokenType.FULL,
 549        "FUNCTION": TokenType.FUNCTION,
 550        "FOLLOWING": TokenType.FOLLOWING,
 551        "FOR": TokenType.FOR,
 552        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 553        "FORMAT": TokenType.FORMAT,
 554        "FROM": TokenType.FROM,
 555        "GEOGRAPHY": TokenType.GEOGRAPHY,
 556        "GEOMETRY": TokenType.GEOMETRY,
 557        "GLOB": TokenType.GLOB,
 558        "GROUP BY": TokenType.GROUP_BY,
 559        "GROUPING SETS": TokenType.GROUPING_SETS,
 560        "HAVING": TokenType.HAVING,
 561        "IF": TokenType.IF,
 562        "ILIKE": TokenType.ILIKE,
 563        "IGNORE NULLS": TokenType.IGNORE_NULLS,
 564        "IN": TokenType.IN,
 565        "INDEX": TokenType.INDEX,
 566        "INET": TokenType.INET,
 567        "INNER": TokenType.INNER,
 568        "INSERT": TokenType.INSERT,
 569        "INTERVAL": TokenType.INTERVAL,
 570        "INTERSECT": TokenType.INTERSECT,
 571        "INTO": TokenType.INTO,
 572        "IS": TokenType.IS,
 573        "ISNULL": TokenType.ISNULL,
 574        "JOIN": TokenType.JOIN,
 575        "KEEP": TokenType.KEEP,
 576        "LATERAL": TokenType.LATERAL,
 577        "LAZY": TokenType.LAZY,
 578        "LEADING": TokenType.LEADING,
 579        "LEFT": TokenType.LEFT,
 580        "LIKE": TokenType.LIKE,
 581        "LIMIT": TokenType.LIMIT,
 582        "LOAD DATA": TokenType.LOAD_DATA,
 583        "LOCAL": TokenType.LOCAL,
 584        "LOCK": TokenType.LOCK,
 585        "MATERIALIZED": TokenType.MATERIALIZED,
 586        "MERGE": TokenType.MERGE,
 587        "NATURAL": TokenType.NATURAL,
 588        "NEXT": TokenType.NEXT,
 589        "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR,
 590        "NO ACTION": TokenType.NO_ACTION,
 591        "NOT": TokenType.NOT,
 592        "NOTNULL": TokenType.NOTNULL,
 593        "NULL": TokenType.NULL,
 594        "NULLS FIRST": TokenType.NULLS_FIRST,
 595        "NULLS LAST": TokenType.NULLS_LAST,
 596        "OBJECT": TokenType.OBJECT,
 597        "OFFSET": TokenType.OFFSET,
 598        "ON": TokenType.ON,
 599        "ONLY": TokenType.ONLY,
 600        "OPTIONS": TokenType.OPTIONS,
 601        "OR": TokenType.OR,
 602        "ORDER BY": TokenType.ORDER_BY,
 603        "ORDINALITY": TokenType.ORDINALITY,
 604        "OUTER": TokenType.OUTER,
 605        "OUT OF": TokenType.OUT_OF,
 606        "OVER": TokenType.OVER,
 607        "OVERLAPS": TokenType.OVERLAPS,
 608        "OVERWRITE": TokenType.OVERWRITE,
 609        "PARTITION": TokenType.PARTITION,
 610        "PARTITION BY": TokenType.PARTITION_BY,
 611        "PARTITIONED BY": TokenType.PARTITION_BY,
 612        "PARTITIONED_BY": TokenType.PARTITION_BY,
 613        "PERCENT": TokenType.PERCENT,
 614        "PIVOT": TokenType.PIVOT,
 615        "PRAGMA": TokenType.PRAGMA,
 616        "PRECEDING": TokenType.PRECEDING,
 617        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 618        "PROCEDURE": TokenType.PROCEDURE,
 619        "QUALIFY": TokenType.QUALIFY,
 620        "RANGE": TokenType.RANGE,
 621        "RECURSIVE": TokenType.RECURSIVE,
 622        "REGEXP": TokenType.RLIKE,
 623        "REPLACE": TokenType.REPLACE,
 624        "RESPECT NULLS": TokenType.RESPECT_NULLS,
 625        "REFERENCES": TokenType.REFERENCES,
 626        "RIGHT": TokenType.RIGHT,
 627        "RLIKE": TokenType.RLIKE,
 628        "ROLLBACK": TokenType.ROLLBACK,
 629        "ROLLUP": TokenType.ROLLUP,
 630        "ROW": TokenType.ROW,
 631        "ROWS": TokenType.ROWS,
 632        "SCHEMA": TokenType.SCHEMA,
 633        "SEED": TokenType.SEED,
 634        "SELECT": TokenType.SELECT,
 635        "SEMI": TokenType.SEMI,
 636        "SET": TokenType.SET,
 637        "SETTINGS": TokenType.SETTINGS,
 638        "SHOW": TokenType.SHOW,
 639        "SIMILAR TO": TokenType.SIMILAR_TO,
 640        "SOME": TokenType.SOME,
 641        "SORTKEY": TokenType.SORTKEY,
 642        "SORT BY": TokenType.SORT_BY,
 643        "TABLE": TokenType.TABLE,
 644        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 645        "TEMP": TokenType.TEMPORARY,
 646        "TEMPORARY": TokenType.TEMPORARY,
 647        "THEN": TokenType.THEN,
 648        "TRUE": TokenType.TRUE,
 649        "TRAILING": TokenType.TRAILING,
 650        "UNBOUNDED": TokenType.UNBOUNDED,
 651        "UNION": TokenType.UNION,
 652        "UNLOGGED": TokenType.UNLOGGED,
 653        "UNNEST": TokenType.UNNEST,
 654        "UNPIVOT": TokenType.UNPIVOT,
 655        "UPDATE": TokenType.UPDATE,
 656        "USE": TokenType.USE,
 657        "USING": TokenType.USING,
 658        "UUID": TokenType.UUID,
 659        "VALUES": TokenType.VALUES,
 660        "VIEW": TokenType.VIEW,
 661        "VOLATILE": TokenType.VOLATILE,
 662        "WHEN": TokenType.WHEN,
 663        "WHERE": TokenType.WHERE,
 664        "WINDOW": TokenType.WINDOW,
 665        "WITH": TokenType.WITH,
 666        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
 667        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
 668        "WITHIN GROUP": TokenType.WITHIN_GROUP,
 669        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
 670        "APPLY": TokenType.APPLY,
 671        "ARRAY": TokenType.ARRAY,
 672        "BIT": TokenType.BIT,
 673        "BOOL": TokenType.BOOLEAN,
 674        "BOOLEAN": TokenType.BOOLEAN,
 675        "BYTE": TokenType.TINYINT,
 676        "TINYINT": TokenType.TINYINT,
 677        "SHORT": TokenType.SMALLINT,
 678        "SMALLINT": TokenType.SMALLINT,
 679        "INT2": TokenType.SMALLINT,
 680        "INTEGER": TokenType.INT,
 681        "INT": TokenType.INT,
 682        "INT4": TokenType.INT,
 683        "LONG": TokenType.BIGINT,
 684        "BIGINT": TokenType.BIGINT,
 685        "INT8": TokenType.BIGINT,
 686        "DEC": TokenType.DECIMAL,
 687        "DECIMAL": TokenType.DECIMAL,
 688        "BIGDECIMAL": TokenType.BIGDECIMAL,
 689        "BIGNUMERIC": TokenType.BIGDECIMAL,
 690        "MAP": TokenType.MAP,
 691        "NULLABLE": TokenType.NULLABLE,
 692        "NUMBER": TokenType.DECIMAL,
 693        "NUMERIC": TokenType.DECIMAL,
 694        "FIXED": TokenType.DECIMAL,
 695        "REAL": TokenType.FLOAT,
 696        "FLOAT": TokenType.FLOAT,
 697        "FLOAT4": TokenType.FLOAT,
 698        "FLOAT8": TokenType.DOUBLE,
 699        "DOUBLE": TokenType.DOUBLE,
 700        "DOUBLE PRECISION": TokenType.DOUBLE,
 701        "JSON": TokenType.JSON,
 702        "CHAR": TokenType.CHAR,
 703        "CHARACTER": TokenType.CHAR,
 704        "NCHAR": TokenType.NCHAR,
 705        "VARCHAR": TokenType.VARCHAR,
 706        "VARCHAR2": TokenType.VARCHAR,
 707        "NVARCHAR": TokenType.NVARCHAR,
 708        "NVARCHAR2": TokenType.NVARCHAR,
 709        "STR": TokenType.TEXT,
 710        "STRING": TokenType.TEXT,
 711        "TEXT": TokenType.TEXT,
 712        "CLOB": TokenType.TEXT,
 713        "LONGVARCHAR": TokenType.TEXT,
 714        "BINARY": TokenType.BINARY,
 715        "BLOB": TokenType.VARBINARY,
 716        "BYTEA": TokenType.VARBINARY,
 717        "VARBINARY": TokenType.VARBINARY,
 718        "TIME": TokenType.TIME,
 719        "TIMESTAMP": TokenType.TIMESTAMP,
 720        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 721        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 722        "DATE": TokenType.DATE,
 723        "DATETIME": TokenType.DATETIME,
 724        "UNIQUE": TokenType.UNIQUE,
 725        "STRUCT": TokenType.STRUCT,
 726        "VARIANT": TokenType.VARIANT,
 727        "ALTER": TokenType.ALTER,
 728        "ALTER AGGREGATE": TokenType.COMMAND,
 729        "ALTER DEFAULT": TokenType.COMMAND,
 730        "ALTER DOMAIN": TokenType.COMMAND,
 731        "ALTER ROLE": TokenType.COMMAND,
 732        "ALTER RULE": TokenType.COMMAND,
 733        "ALTER SEQUENCE": TokenType.COMMAND,
 734        "ALTER TYPE": TokenType.COMMAND,
 735        "ALTER USER": TokenType.COMMAND,
 736        "ALTER VIEW": TokenType.COMMAND,
 737        "ANALYZE": TokenType.COMMAND,
 738        "CALL": TokenType.COMMAND,
 739        "COMMENT": TokenType.COMMENT,
 740        "COPY": TokenType.COMMAND,
 741        "EXPLAIN": TokenType.COMMAND,
 742        "GRANT": TokenType.COMMAND,
 743        "OPTIMIZE": TokenType.COMMAND,
 744        "PREPARE": TokenType.COMMAND,
 745        "TRUNCATE": TokenType.COMMAND,
 746        "VACUUM": TokenType.COMMAND,
 747    }
 748
 749    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 750        " ": TokenType.SPACE,
 751        "\t": TokenType.SPACE,
 752        "\n": TokenType.BREAK,
 753        "\r": TokenType.BREAK,
 754        "\r\n": TokenType.BREAK,
 755    }
 756
 757    COMMANDS = {
 758        TokenType.COMMAND,
 759        TokenType.EXECUTE,
 760        TokenType.FETCH,
 761        TokenType.SHOW,
 762    }
 763
 764    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 765
 766    # handle numeric literals like in hive (3L = BIGINT)
 767    NUMERIC_LITERALS: t.Dict[str, str] = {}
 768    ENCODE: t.Optional[str] = None
 769
 770    COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")]
 771    KEYWORD_TRIE: t.Dict = {}  # autofilled
 772
 773    IDENTIFIER_CAN_START_WITH_DIGIT = False
 774
 775    __slots__ = (
 776        "sql",
 777        "size",
 778        "tokens",
 779        "_start",
 780        "_current",
 781        "_line",
 782        "_col",
 783        "_comments",
 784        "_char",
 785        "_end",
 786        "_peek",
 787        "_prev_token_line",
 788    )
 789
 790    def __init__(self) -> None:
 791        self.reset()
 792
 793    def reset(self) -> None:
 794        self.sql = ""
 795        self.size = 0
 796        self.tokens: t.List[Token] = []
 797        self._start = 0
 798        self._current = 0
 799        self._line = 1
 800        self._col = 1
 801        self._comments: t.List[str] = []
 802
 803        self._char = ""
 804        self._end = False
 805        self._peek = ""
 806        self._prev_token_line = -1
 807
 808    def tokenize(self, sql: str) -> t.List[Token]:
 809        """Returns a list of tokens corresponding to the SQL string `sql`."""
 810        self.reset()
 811        self.sql = sql
 812        self.size = len(sql)
 813        try:
 814            self._scan()
 815        except Exception as e:
 816            start = self._current - 50
 817            end = self._current + 50
 818            start = start if start > 0 else 0
 819            end = end if end < self.size else self.size - 1
 820            context = self.sql[start:end]
 821            raise ValueError(f"Error tokenizing '{context}'") from e
 822
 823        return self.tokens
 824
 825    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 826        while self.size and not self._end:
 827            self._start = self._current
 828            self._advance()
 829
 830            if self._char is None:
 831                break
 832
 833            if self._char not in self.WHITE_SPACE:
 834                if self._char.isdigit():
 835                    self._scan_number()
 836                elif self._char in self._IDENTIFIERS:
 837                    self._scan_identifier(self._IDENTIFIERS[self._char])
 838                else:
 839                    self._scan_keywords()
 840
 841            if until and until():
 842                break
 843
 844        if self.tokens:
 845            self.tokens[-1].comments.extend(self._comments)
 846
 847    def _chars(self, size: int) -> str:
 848        if size == 1:
 849            return self._char
 850        start = self._current - 1
 851        end = start + size
 852        if end <= self.size:
 853            return self.sql[start:end]
 854        return ""
 855
 856    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 857        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 858            self._col = 1
 859            self._line += 1
 860        else:
 861            self._col += i
 862
 863        self._current += i
 864        self._end = self._current >= self.size
 865        self._char = self.sql[self._current - 1]
 866        self._peek = "" if self._end else self.sql[self._current]
 867
 868        if alnum and self._char.isalnum():
 869            _col = self._col
 870            _current = self._current
 871            _end = self._end
 872            _peek = self._peek
 873
 874            while _peek.isalnum():
 875                _col += 1
 876                _current += 1
 877                _end = _current >= self.size
 878                _peek = "" if _end else self.sql[_current]
 879
 880            self._col = _col
 881            self._current = _current
 882            self._end = _end
 883            self._peek = _peek
 884            self._char = self.sql[_current - 1]
 885
 886    @property
 887    def _text(self) -> str:
 888        return self.sql[self._start : self._current]
 889
 890    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 891        self._prev_token_line = self._line
 892        self.tokens.append(
 893            Token(
 894                token_type,
 895                self._text if text is None else text,
 896                self._line,
 897                self._col,
 898                self._current,
 899                self._comments,
 900            )
 901        )
 902        self._comments = []
 903
 904        # If we have either a semicolon or a begin token before the command's token, we'll parse
 905        # whatever follows the command's token as a string
 906        if (
 907            token_type in self.COMMANDS
 908            and self._peek != ";"
 909            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 910        ):
 911            start = self._current
 912            tokens = len(self.tokens)
 913            self._scan(lambda: self._peek == ";")
 914            self.tokens = self.tokens[:tokens]
 915            text = self.sql[start : self._current].strip()
 916            if text:
 917                self._add(TokenType.STRING, text)
 918
 919    def _scan_keywords(self) -> None:
 920        size = 0
 921        word = None
 922        chars = self._text
 923        char = chars
 924        prev_space = False
 925        skip = False
 926        trie = self.KEYWORD_TRIE
 927        single_token = char in self.SINGLE_TOKENS
 928
 929        while chars:
 930            if skip:
 931                result = 1
 932            else:
 933                result, trie = in_trie(trie, char.upper())
 934
 935            if result == 0:
 936                break
 937            if result == 2:
 938                word = chars
 939
 940            size += 1
 941            end = self._current - 1 + size
 942
 943            if end < self.size:
 944                char = self.sql[end]
 945                single_token = single_token or char in self.SINGLE_TOKENS
 946                is_space = char in self.WHITE_SPACE
 947
 948                if not is_space or not prev_space:
 949                    if is_space:
 950                        char = " "
 951                    chars += char
 952                    prev_space = is_space
 953                    skip = False
 954                else:
 955                    skip = True
 956            else:
 957                char = ""
 958                chars = " "
 959
 960        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
 961
 962        if not word:
 963            if self._char in self.SINGLE_TOKENS:
 964                self._add(self.SINGLE_TOKENS[self._char], text=self._char)
 965                return
 966            self._scan_var()
 967            return
 968
 969        if self._scan_string(word):
 970            return
 971        if self._scan_formatted_string(word):
 972            return
 973        if self._scan_comment(word):
 974            return
 975
 976        self._advance(size - 1)
 977        word = word.upper()
 978        self._add(self.KEYWORDS[word], text=word)
 979
 980    def _scan_comment(self, comment_start: str) -> bool:
 981        if comment_start not in self._COMMENTS:
 982            return False
 983
 984        comment_start_line = self._line
 985        comment_start_size = len(comment_start)
 986        comment_end = self._COMMENTS[comment_start]
 987
 988        if comment_end:
 989            # Skip the comment's start delimiter
 990            self._advance(comment_start_size)
 991
 992            comment_end_size = len(comment_end)
 993            while not self._end and self._chars(comment_end_size) != comment_end:
 994                self._advance(alnum=True)
 995
 996            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
 997            self._advance(comment_end_size - 1)
 998        else:
 999            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
1000                self._advance(alnum=True)
1001            self._comments.append(self._text[comment_start_size:])
1002
1003        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1004        # Multiple consecutive comments are preserved by appending them to the current comments list.
1005        if comment_start_line == self._prev_token_line:
1006            self.tokens[-1].comments.extend(self._comments)
1007            self._comments = []
1008            self._prev_token_line = self._line
1009
1010        return True
1011
1012    def _scan_number(self) -> None:
1013        if self._char == "0":
1014            peek = self._peek.upper()
1015            if peek == "B":
1016                return self._scan_bits() if self._BIT_STRINGS else self._add(TokenType.NUMBER)
1017            elif peek == "X":
1018                return self._scan_hex() if self._HEX_STRINGS else self._add(TokenType.NUMBER)
1019
1020        decimal = False
1021        scientific = 0
1022
1023        while True:
1024            if self._peek.isdigit():
1025                self._advance()
1026            elif self._peek == "." and not decimal:
1027                decimal = True
1028                self._advance()
1029            elif self._peek in ("-", "+") and scientific == 1:
1030                scientific += 1
1031                self._advance()
1032            elif self._peek.upper() == "E" and not scientific:
1033                scientific += 1
1034                self._advance()
1035            elif self._peek.isidentifier():
1036                number_text = self._text
1037                literal = ""
1038
1039                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1040                    literal += self._peek.upper()
1041                    self._advance()
1042
1043                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))
1044
1045                if token_type:
1046                    self._add(TokenType.NUMBER, number_text)
1047                    self._add(TokenType.DCOLON, "::")
1048                    return self._add(token_type, literal)
1049                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
1050                    return self._add(TokenType.VAR)
1051
1052                self._add(TokenType.NUMBER, number_text)
1053                return self._advance(-len(literal))
1054            else:
1055                return self._add(TokenType.NUMBER)
1056
1057    def _scan_bits(self) -> None:
1058        self._advance()
1059        value = self._extract_value()
1060        try:
1061            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1062            int(value, 2)
1063            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1064        except ValueError:
1065            self._add(TokenType.IDENTIFIER)
1066
1067    def _scan_hex(self) -> None:
1068        self._advance()
1069        value = self._extract_value()
1070        try:
1071            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1072            int(value, 16)
1073            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1074        except ValueError:
1075            self._add(TokenType.IDENTIFIER)
1076
1077    def _extract_value(self) -> str:
1078        while True:
1079            char = self._peek.strip()
1080            if char and char not in self.SINGLE_TOKENS:
1081                self._advance(alnum=True)
1082            else:
1083                break
1084
1085        return self._text
1086
1087    def _scan_string(self, quote: str) -> bool:
1088        quote_end = self._QUOTES.get(quote)
1089        if quote_end is None:
1090            return False
1091
1092        self._advance(len(quote))
1093        text = self._extract_string(quote_end)
1094        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
1095        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
1096        return True
1097
1098    # X'1234', b'0110', E'\\\\\' etc.
1099    def _scan_formatted_string(self, string_start: str) -> bool:
1100        if string_start in self._HEX_STRINGS:
1101            delimiters = self._HEX_STRINGS
1102            token_type = TokenType.HEX_STRING
1103            base = 16
1104        elif string_start in self._BIT_STRINGS:
1105            delimiters = self._BIT_STRINGS
1106            token_type = TokenType.BIT_STRING
1107            base = 2
1108        elif string_start in self._BYTE_STRINGS:
1109            delimiters = self._BYTE_STRINGS
1110            token_type = TokenType.BYTE_STRING
1111            base = None
1112        else:
1113            return False
1114
1115        self._advance(len(string_start))
1116        string_end = delimiters[string_start]
1117        text = self._extract_string(string_end)
1118
1119        if base:
1120            try:
1121                int(text, base)
1122            except:
1123                raise RuntimeError(
1124                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1125                )
1126
1127        self._add(token_type, text)
1128        return True
1129
1130    def _scan_identifier(self, identifier_end: str) -> None:
1131        self._advance()
1132        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1133        self._add(TokenType.IDENTIFIER, text)
1134
1135    def _scan_var(self) -> None:
1136        while True:
1137            char = self._peek.strip()
1138            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1139                self._advance(alnum=True)
1140            else:
1141                break
1142
1143        self._add(
1144            TokenType.VAR
1145            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1146            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1147        )
1148
1149    def _extract_string(self, delimiter: str, escapes=None) -> str:
1150        text = ""
1151        delim_size = len(delimiter)
1152        escapes = self._STRING_ESCAPES if escapes is None else escapes
1153
1154        while True:
1155            if self._char in escapes and (self._peek == delimiter or self._peek in escapes):
1156                if self._peek == delimiter:
1157                    text += self._peek
1158                else:
1159                    text += self._char + self._peek
1160
1161                if self._current + 1 < self.size:
1162                    self._advance(2)
1163                else:
1164                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1165            else:
1166                if self._chars(delim_size) == delimiter:
1167                    if delim_size > 1:
1168                        self._advance(delim_size - 1)
1169                    break
1170
1171                if self._end:
1172                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1173
1174                current = self._current - 1
1175                self._advance(alnum=True)
1176                text += self.sql[current : self._current - 1]
1177
1178        return text
class TokenType(sqlglot.helper.AutoName):
 11class TokenType(AutoName):
 12    L_PAREN = auto()
 13    R_PAREN = auto()
 14    L_BRACKET = auto()
 15    R_BRACKET = auto()
 16    L_BRACE = auto()
 17    R_BRACE = auto()
 18    COMMA = auto()
 19    DOT = auto()
 20    DASH = auto()
 21    PLUS = auto()
 22    COLON = auto()
 23    DCOLON = auto()
 24    SEMICOLON = auto()
 25    STAR = auto()
 26    BACKSLASH = auto()
 27    SLASH = auto()
 28    LT = auto()
 29    LTE = auto()
 30    GT = auto()
 31    GTE = auto()
 32    NOT = auto()
 33    EQ = auto()
 34    NEQ = auto()
 35    NULLSAFE_EQ = auto()
 36    AND = auto()
 37    OR = auto()
 38    AMP = auto()
 39    DPIPE = auto()
 40    PIPE = auto()
 41    CARET = auto()
 42    TILDA = auto()
 43    ARROW = auto()
 44    DARROW = auto()
 45    FARROW = auto()
 46    HASH = auto()
 47    HASH_ARROW = auto()
 48    DHASH_ARROW = auto()
 49    LR_ARROW = auto()
 50    LT_AT = auto()
 51    AT_GT = auto()
 52    DOLLAR = auto()
 53    PARAMETER = auto()
 54    SESSION_PARAMETER = auto()
 55    NATIONAL = auto()
 56    DAMP = auto()
 57
 58    BLOCK_START = auto()
 59    BLOCK_END = auto()
 60
 61    SPACE = auto()
 62    BREAK = auto()
 63
 64    STRING = auto()
 65    NUMBER = auto()
 66    IDENTIFIER = auto()
 67    DATABASE = auto()
 68    COLUMN = auto()
 69    COLUMN_DEF = auto()
 70    SCHEMA = auto()
 71    TABLE = auto()
 72    VAR = auto()
 73    BIT_STRING = auto()
 74    HEX_STRING = auto()
 75    BYTE_STRING = auto()
 76
 77    # types
 78    BIT = auto()
 79    BOOLEAN = auto()
 80    TINYINT = auto()
 81    UTINYINT = auto()
 82    SMALLINT = auto()
 83    USMALLINT = auto()
 84    INT = auto()
 85    UINT = auto()
 86    BIGINT = auto()
 87    UBIGINT = auto()
 88    INT128 = auto()
 89    UINT128 = auto()
 90    INT256 = auto()
 91    UINT256 = auto()
 92    FLOAT = auto()
 93    DOUBLE = auto()
 94    DECIMAL = auto()
 95    BIGDECIMAL = auto()
 96    CHAR = auto()
 97    NCHAR = auto()
 98    VARCHAR = auto()
 99    NVARCHAR = auto()
100    TEXT = auto()
101    MEDIUMTEXT = auto()
102    LONGTEXT = auto()
103    MEDIUMBLOB = auto()
104    LONGBLOB = auto()
105    BINARY = auto()
106    VARBINARY = auto()
107    JSON = auto()
108    JSONB = auto()
109    TIME = auto()
110    TIMESTAMP = auto()
111    TIMESTAMPTZ = auto()
112    TIMESTAMPLTZ = auto()
113    DATETIME = auto()
114    DATETIME64 = auto()
115    DATE = auto()
116    UUID = auto()
117    GEOGRAPHY = auto()
118    NULLABLE = auto()
119    GEOMETRY = auto()
120    HLLSKETCH = auto()
121    HSTORE = auto()
122    SUPER = auto()
123    SERIAL = auto()
124    SMALLSERIAL = auto()
125    BIGSERIAL = auto()
126    XML = auto()
127    UNIQUEIDENTIFIER = auto()
128    MONEY = auto()
129    SMALLMONEY = auto()
130    ROWVERSION = auto()
131    IMAGE = auto()
132    VARIANT = auto()
133    OBJECT = auto()
134    INET = auto()
135
136    # keywords
137    ALIAS = auto()
138    ALTER = auto()
139    ALWAYS = auto()
140    ALL = auto()
141    ANTI = auto()
142    ANY = auto()
143    APPLY = auto()
144    ARRAY = auto()
145    ASC = auto()
146    ASOF = auto()
147    AT_TIME_ZONE = auto()
148    AUTO_INCREMENT = auto()
149    BEGIN = auto()
150    BETWEEN = auto()
151    BOTH = auto()
152    BUCKET = auto()
153    BY_DEFAULT = auto()
154    CACHE = auto()
155    CASCADE = auto()
156    CASE = auto()
157    CHARACTER_SET = auto()
158    CLUSTER_BY = auto()
159    COLLATE = auto()
160    COMMAND = auto()
161    COMMENT = auto()
162    COMMIT = auto()
163    COMPOUND = auto()
164    CONSTRAINT = auto()
165    CREATE = auto()
166    CROSS = auto()
167    CUBE = auto()
168    CURRENT_DATE = auto()
169    CURRENT_DATETIME = auto()
170    CURRENT_ROW = auto()
171    CURRENT_TIME = auto()
172    CURRENT_TIMESTAMP = auto()
173    CURRENT_USER = auto()
174    DEFAULT = auto()
175    DELETE = auto()
176    DESC = auto()
177    DESCRIBE = auto()
178    DISTINCT = auto()
179    DISTINCT_FROM = auto()
180    DISTRIBUTE_BY = auto()
181    DIV = auto()
182    DROP = auto()
183    ELSE = auto()
184    END = auto()
185    ESCAPE = auto()
186    EXCEPT = auto()
187    EXECUTE = auto()
188    EXISTS = auto()
189    FALSE = auto()
190    FETCH = auto()
191    FILTER = auto()
192    FINAL = auto()
193    FIRST = auto()
194    FOLLOWING = auto()
195    FOR = auto()
196    FOREIGN_KEY = auto()
197    FORMAT = auto()
198    FROM = auto()
199    FULL = auto()
200    FUNCTION = auto()
201    GLOB = auto()
202    GLOBAL = auto()
203    GROUP_BY = auto()
204    GROUPING_SETS = auto()
205    HAVING = auto()
206    HINT = auto()
207    IF = auto()
208    IGNORE_NULLS = auto()
209    ILIKE = auto()
210    ILIKE_ANY = auto()
211    IN = auto()
212    INDEX = auto()
213    INNER = auto()
214    INSERT = auto()
215    INTERSECT = auto()
216    INTERVAL = auto()
217    INTO = auto()
218    INTRODUCER = auto()
219    IRLIKE = auto()
220    IS = auto()
221    ISNULL = auto()
222    JOIN = auto()
223    JOIN_MARKER = auto()
224    KEEP = auto()
225    LANGUAGE = auto()
226    LATERAL = auto()
227    LAZY = auto()
228    LEADING = auto()
229    LEFT = auto()
230    LIKE = auto()
231    LIKE_ANY = auto()
232    LIMIT = auto()
233    LOAD_DATA = auto()
234    LOCAL = auto()
235    LOCK = auto()
236    MAP = auto()
237    MATCH_RECOGNIZE = auto()
238    MATERIALIZED = auto()
239    MERGE = auto()
240    MOD = auto()
241    NATURAL = auto()
242    NEXT = auto()
243    NEXT_VALUE_FOR = auto()
244    NO_ACTION = auto()
245    NOTNULL = auto()
246    NULL = auto()
247    NULLS_FIRST = auto()
248    NULLS_LAST = auto()
249    OFFSET = auto()
250    ON = auto()
251    ONLY = auto()
252    OPTIONS = auto()
253    ORDER_BY = auto()
254    ORDERED = auto()
255    ORDINALITY = auto()
256    OUTER = auto()
257    OUT_OF = auto()
258    OVER = auto()
259    OVERLAPS = auto()
260    OVERWRITE = auto()
261    PARTITION = auto()
262    PARTITION_BY = auto()
263    PERCENT = auto()
264    PIVOT = auto()
265    PLACEHOLDER = auto()
266    PRAGMA = auto()
267    PRECEDING = auto()
268    PRIMARY_KEY = auto()
269    PROCEDURE = auto()
270    PROPERTIES = auto()
271    PSEUDO_TYPE = auto()
272    QUALIFY = auto()
273    QUOTE = auto()
274    RANGE = auto()
275    RECURSIVE = auto()
276    REPLACE = auto()
277    RESPECT_NULLS = auto()
278    RETURNING = auto()
279    REFERENCES = auto()
280    RIGHT = auto()
281    RLIKE = auto()
282    ROLLBACK = auto()
283    ROLLUP = auto()
284    ROW = auto()
285    ROWS = auto()
286    SEED = auto()
287    SELECT = auto()
288    SEMI = auto()
289    SEPARATOR = auto()
290    SERDE_PROPERTIES = auto()
291    SET = auto()
292    SETTINGS = auto()
293    SHOW = auto()
294    SIMILAR_TO = auto()
295    SOME = auto()
296    SORTKEY = auto()
297    SORT_BY = auto()
298    STRUCT = auto()
299    TABLE_SAMPLE = auto()
300    TEMPORARY = auto()
301    TOP = auto()
302    THEN = auto()
303    TRAILING = auto()
304    TRUE = auto()
305    UNBOUNDED = auto()
306    UNCACHE = auto()
307    UNION = auto()
308    UNLOGGED = auto()
309    UNNEST = auto()
310    UNPIVOT = auto()
311    UPDATE = auto()
312    USE = auto()
313    USING = auto()
314    VALUES = auto()
315    VIEW = auto()
316    VOLATILE = auto()
317    WHEN = auto()
318    WHERE = auto()
319    WINDOW = auto()
320    WITH = auto()
321    WITH_TIME_ZONE = auto()
322    WITH_LOCAL_TIME_ZONE = auto()
323    WITHIN_GROUP = auto()
324    WITHOUT_TIME_ZONE = auto()
325    UNIQUE = auto()

An enumeration.

L_PAREN = <TokenType.L_PAREN: 'L_PAREN'>
R_PAREN = <TokenType.R_PAREN: 'R_PAREN'>
L_BRACKET = <TokenType.L_BRACKET: 'L_BRACKET'>
R_BRACKET = <TokenType.R_BRACKET: 'R_BRACKET'>
L_BRACE = <TokenType.L_BRACE: 'L_BRACE'>
R_BRACE = <TokenType.R_BRACE: 'R_BRACE'>
COMMA = <TokenType.COMMA: 'COMMA'>
DOT = <TokenType.DOT: 'DOT'>
DASH = <TokenType.DASH: 'DASH'>
PLUS = <TokenType.PLUS: 'PLUS'>
COLON = <TokenType.COLON: 'COLON'>
DCOLON = <TokenType.DCOLON: 'DCOLON'>
SEMICOLON = <TokenType.SEMICOLON: 'SEMICOLON'>
STAR = <TokenType.STAR: 'STAR'>
BACKSLASH = <TokenType.BACKSLASH: 'BACKSLASH'>
SLASH = <TokenType.SLASH: 'SLASH'>
LT = <TokenType.LT: 'LT'>
LTE = <TokenType.LTE: 'LTE'>
GT = <TokenType.GT: 'GT'>
GTE = <TokenType.GTE: 'GTE'>
NOT = <TokenType.NOT: 'NOT'>
EQ = <TokenType.EQ: 'EQ'>
NEQ = <TokenType.NEQ: 'NEQ'>
NULLSAFE_EQ = <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>
AND = <TokenType.AND: 'AND'>
OR = <TokenType.OR: 'OR'>
AMP = <TokenType.AMP: 'AMP'>
DPIPE = <TokenType.DPIPE: 'DPIPE'>
PIPE = <TokenType.PIPE: 'PIPE'>
CARET = <TokenType.CARET: 'CARET'>
TILDA = <TokenType.TILDA: 'TILDA'>
ARROW = <TokenType.ARROW: 'ARROW'>
DARROW = <TokenType.DARROW: 'DARROW'>
FARROW = <TokenType.FARROW: 'FARROW'>
HASH = <TokenType.HASH: 'HASH'>
HASH_ARROW = <TokenType.HASH_ARROW: 'HASH_ARROW'>
DHASH_ARROW = <TokenType.DHASH_ARROW: 'DHASH_ARROW'>
LR_ARROW = <TokenType.LR_ARROW: 'LR_ARROW'>
LT_AT = <TokenType.LT_AT: 'LT_AT'>
AT_GT = <TokenType.AT_GT: 'AT_GT'>
DOLLAR = <TokenType.DOLLAR: 'DOLLAR'>
PARAMETER = <TokenType.PARAMETER: 'PARAMETER'>
SESSION_PARAMETER = <TokenType.SESSION_PARAMETER: 'SESSION_PARAMETER'>
NATIONAL = <TokenType.NATIONAL: 'NATIONAL'>
DAMP = <TokenType.DAMP: 'DAMP'>
BLOCK_START = <TokenType.BLOCK_START: 'BLOCK_START'>
BLOCK_END = <TokenType.BLOCK_END: 'BLOCK_END'>
SPACE = <TokenType.SPACE: 'SPACE'>
BREAK = <TokenType.BREAK: 'BREAK'>
STRING = <TokenType.STRING: 'STRING'>
NUMBER = <TokenType.NUMBER: 'NUMBER'>
IDENTIFIER = <TokenType.IDENTIFIER: 'IDENTIFIER'>
DATABASE = <TokenType.DATABASE: 'DATABASE'>
COLUMN = <TokenType.COLUMN: 'COLUMN'>
COLUMN_DEF = <TokenType.COLUMN_DEF: 'COLUMN_DEF'>
SCHEMA = <TokenType.SCHEMA: 'SCHEMA'>
TABLE = <TokenType.TABLE: 'TABLE'>
VAR = <TokenType.VAR: 'VAR'>
BIT_STRING = <TokenType.BIT_STRING: 'BIT_STRING'>
HEX_STRING = <TokenType.HEX_STRING: 'HEX_STRING'>
BYTE_STRING = <TokenType.BYTE_STRING: 'BYTE_STRING'>
BIT = <TokenType.BIT: 'BIT'>
BOOLEAN = <TokenType.BOOLEAN: 'BOOLEAN'>
TINYINT = <TokenType.TINYINT: 'TINYINT'>
UTINYINT = <TokenType.UTINYINT: 'UTINYINT'>
SMALLINT = <TokenType.SMALLINT: 'SMALLINT'>
USMALLINT = <TokenType.USMALLINT: 'USMALLINT'>
INT = <TokenType.INT: 'INT'>
UINT = <TokenType.UINT: 'UINT'>
BIGINT = <TokenType.BIGINT: 'BIGINT'>
UBIGINT = <TokenType.UBIGINT: 'UBIGINT'>
INT128 = <TokenType.INT128: 'INT128'>
UINT128 = <TokenType.UINT128: 'UINT128'>
INT256 = <TokenType.INT256: 'INT256'>
UINT256 = <TokenType.UINT256: 'UINT256'>
FLOAT = <TokenType.FLOAT: 'FLOAT'>
DOUBLE = <TokenType.DOUBLE: 'DOUBLE'>
DECIMAL = <TokenType.DECIMAL: 'DECIMAL'>
BIGDECIMAL = <TokenType.BIGDECIMAL: 'BIGDECIMAL'>
CHAR = <TokenType.CHAR: 'CHAR'>
NCHAR = <TokenType.NCHAR: 'NCHAR'>
VARCHAR = <TokenType.VARCHAR: 'VARCHAR'>
NVARCHAR = <TokenType.NVARCHAR: 'NVARCHAR'>
TEXT = <TokenType.TEXT: 'TEXT'>
MEDIUMTEXT = <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>
LONGTEXT = <TokenType.LONGTEXT: 'LONGTEXT'>
MEDIUMBLOB = <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>
LONGBLOB = <TokenType.LONGBLOB: 'LONGBLOB'>
BINARY = <TokenType.BINARY: 'BINARY'>
VARBINARY = <TokenType.VARBINARY: 'VARBINARY'>
JSON = <TokenType.JSON: 'JSON'>
JSONB = <TokenType.JSONB: 'JSONB'>
TIME = <TokenType.TIME: 'TIME'>
TIMESTAMP = <TokenType.TIMESTAMP: 'TIMESTAMP'>
TIMESTAMPTZ = <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>
TIMESTAMPLTZ = <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>
DATETIME = <TokenType.DATETIME: 'DATETIME'>
DATETIME64 = <TokenType.DATETIME64: 'DATETIME64'>
DATE = <TokenType.DATE: 'DATE'>
UUID = <TokenType.UUID: 'UUID'>
GEOGRAPHY = <TokenType.GEOGRAPHY: 'GEOGRAPHY'>
NULLABLE = <TokenType.NULLABLE: 'NULLABLE'>
GEOMETRY = <TokenType.GEOMETRY: 'GEOMETRY'>
HLLSKETCH = <TokenType.HLLSKETCH: 'HLLSKETCH'>
HSTORE = <TokenType.HSTORE: 'HSTORE'>
SUPER = <TokenType.SUPER: 'SUPER'>
SERIAL = <TokenType.SERIAL: 'SERIAL'>
SMALLSERIAL = <TokenType.SMALLSERIAL: 'SMALLSERIAL'>
BIGSERIAL = <TokenType.BIGSERIAL: 'BIGSERIAL'>
XML = <TokenType.XML: 'XML'>
UNIQUEIDENTIFIER = <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>
MONEY = <TokenType.MONEY: 'MONEY'>
SMALLMONEY = <TokenType.SMALLMONEY: 'SMALLMONEY'>
ROWVERSION = <TokenType.ROWVERSION: 'ROWVERSION'>
IMAGE = <TokenType.IMAGE: 'IMAGE'>
VARIANT = <TokenType.VARIANT: 'VARIANT'>
OBJECT = <TokenType.OBJECT: 'OBJECT'>
INET = <TokenType.INET: 'INET'>
ALIAS = <TokenType.ALIAS: 'ALIAS'>
ALTER = <TokenType.ALTER: 'ALTER'>
ALWAYS = <TokenType.ALWAYS: 'ALWAYS'>
ALL = <TokenType.ALL: 'ALL'>
ANTI = <TokenType.ANTI: 'ANTI'>
ANY = <TokenType.ANY: 'ANY'>
APPLY = <TokenType.APPLY: 'APPLY'>
ARRAY = <TokenType.ARRAY: 'ARRAY'>
ASC = <TokenType.ASC: 'ASC'>
ASOF = <TokenType.ASOF: 'ASOF'>
AT_TIME_ZONE = <TokenType.AT_TIME_ZONE: 'AT_TIME_ZONE'>
AUTO_INCREMENT = <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>
BEGIN = <TokenType.BEGIN: 'BEGIN'>
BETWEEN = <TokenType.BETWEEN: 'BETWEEN'>
BOTH = <TokenType.BOTH: 'BOTH'>
BUCKET = <TokenType.BUCKET: 'BUCKET'>
BY_DEFAULT = <TokenType.BY_DEFAULT: 'BY_DEFAULT'>
CACHE = <TokenType.CACHE: 'CACHE'>
CASCADE = <TokenType.CASCADE: 'CASCADE'>
CASE = <TokenType.CASE: 'CASE'>
CHARACTER_SET = <TokenType.CHARACTER_SET: 'CHARACTER_SET'>
CLUSTER_BY = <TokenType.CLUSTER_BY: 'CLUSTER_BY'>
COLLATE = <TokenType.COLLATE: 'COLLATE'>
COMMAND = <TokenType.COMMAND: 'COMMAND'>
COMMENT = <TokenType.COMMENT: 'COMMENT'>
COMMIT = <TokenType.COMMIT: 'COMMIT'>
COMPOUND = <TokenType.COMPOUND: 'COMPOUND'>
CONSTRAINT = <TokenType.CONSTRAINT: 'CONSTRAINT'>
CREATE = <TokenType.CREATE: 'CREATE'>
CROSS = <TokenType.CROSS: 'CROSS'>
CUBE = <TokenType.CUBE: 'CUBE'>
CURRENT_DATE = <TokenType.CURRENT_DATE: 'CURRENT_DATE'>
CURRENT_DATETIME = <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>
CURRENT_ROW = <TokenType.CURRENT_ROW: 'CURRENT_ROW'>
CURRENT_TIME = <TokenType.CURRENT_TIME: 'CURRENT_TIME'>
CURRENT_TIMESTAMP = <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>
CURRENT_USER = <TokenType.CURRENT_USER: 'CURRENT_USER'>
DEFAULT = <TokenType.DEFAULT: 'DEFAULT'>
DELETE = <TokenType.DELETE: 'DELETE'>
DESC = <TokenType.DESC: 'DESC'>
DESCRIBE = <TokenType.DESCRIBE: 'DESCRIBE'>
DISTINCT = <TokenType.DISTINCT: 'DISTINCT'>
DISTINCT_FROM = <TokenType.DISTINCT_FROM: 'DISTINCT_FROM'>
DISTRIBUTE_BY = <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>
DIV = <TokenType.DIV: 'DIV'>
DROP = <TokenType.DROP: 'DROP'>
ELSE = <TokenType.ELSE: 'ELSE'>
END = <TokenType.END: 'END'>
ESCAPE = <TokenType.ESCAPE: 'ESCAPE'>
EXCEPT = <TokenType.EXCEPT: 'EXCEPT'>
EXECUTE = <TokenType.EXECUTE: 'EXECUTE'>
EXISTS = <TokenType.EXISTS: 'EXISTS'>
FALSE = <TokenType.FALSE: 'FALSE'>
FETCH = <TokenType.FETCH: 'FETCH'>
FILTER = <TokenType.FILTER: 'FILTER'>
FINAL = <TokenType.FINAL: 'FINAL'>
FIRST = <TokenType.FIRST: 'FIRST'>
FOLLOWING = <TokenType.FOLLOWING: 'FOLLOWING'>
FOR = <TokenType.FOR: 'FOR'>
FOREIGN_KEY = <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>
FORMAT = <TokenType.FORMAT: 'FORMAT'>
FROM = <TokenType.FROM: 'FROM'>
FULL = <TokenType.FULL: 'FULL'>
FUNCTION = <TokenType.FUNCTION: 'FUNCTION'>
GLOB = <TokenType.GLOB: 'GLOB'>
GLOBAL = <TokenType.GLOBAL: 'GLOBAL'>
GROUP_BY = <TokenType.GROUP_BY: 'GROUP_BY'>
GROUPING_SETS = <TokenType.GROUPING_SETS: 'GROUPING_SETS'>
HAVING = <TokenType.HAVING: 'HAVING'>
HINT = <TokenType.HINT: 'HINT'>
IF = <TokenType.IF: 'IF'>
IGNORE_NULLS = <TokenType.IGNORE_NULLS: 'IGNORE_NULLS'>
ILIKE = <TokenType.ILIKE: 'ILIKE'>
ILIKE_ANY = <TokenType.ILIKE_ANY: 'ILIKE_ANY'>
IN = <TokenType.IN: 'IN'>
INDEX = <TokenType.INDEX: 'INDEX'>
INNER = <TokenType.INNER: 'INNER'>
INSERT = <TokenType.INSERT: 'INSERT'>
INTERSECT = <TokenType.INTERSECT: 'INTERSECT'>
INTERVAL = <TokenType.INTERVAL: 'INTERVAL'>
INTO = <TokenType.INTO: 'INTO'>
INTRODUCER = <TokenType.INTRODUCER: 'INTRODUCER'>
IRLIKE = <TokenType.IRLIKE: 'IRLIKE'>
IS = <TokenType.IS: 'IS'>
ISNULL = <TokenType.ISNULL: 'ISNULL'>
JOIN = <TokenType.JOIN: 'JOIN'>
JOIN_MARKER = <TokenType.JOIN_MARKER: 'JOIN_MARKER'>
KEEP = <TokenType.KEEP: 'KEEP'>
LANGUAGE = <TokenType.LANGUAGE: 'LANGUAGE'>
LATERAL = <TokenType.LATERAL: 'LATERAL'>
LAZY = <TokenType.LAZY: 'LAZY'>
LEADING = <TokenType.LEADING: 'LEADING'>
LEFT = <TokenType.LEFT: 'LEFT'>
LIKE = <TokenType.LIKE: 'LIKE'>
LIKE_ANY = <TokenType.LIKE_ANY: 'LIKE_ANY'>
LIMIT = <TokenType.LIMIT: 'LIMIT'>
LOAD_DATA = <TokenType.LOAD_DATA: 'LOAD_DATA'>
LOCAL = <TokenType.LOCAL: 'LOCAL'>
LOCK = <TokenType.LOCK: 'LOCK'>
MAP = <TokenType.MAP: 'MAP'>
MATCH_RECOGNIZE = <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>
MATERIALIZED = <TokenType.MATERIALIZED: 'MATERIALIZED'>
MERGE = <TokenType.MERGE: 'MERGE'>
MOD = <TokenType.MOD: 'MOD'>
NATURAL = <TokenType.NATURAL: 'NATURAL'>
NEXT = <TokenType.NEXT: 'NEXT'>
NEXT_VALUE_FOR = <TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>
NO_ACTION = <TokenType.NO_ACTION: 'NO_ACTION'>
NOTNULL = <TokenType.NOTNULL: 'NOTNULL'>
NULL = <TokenType.NULL: 'NULL'>
NULLS_FIRST = <TokenType.NULLS_FIRST: 'NULLS_FIRST'>
NULLS_LAST = <TokenType.NULLS_LAST: 'NULLS_LAST'>
OFFSET = <TokenType.OFFSET: 'OFFSET'>
ON = <TokenType.ON: 'ON'>
ONLY = <TokenType.ONLY: 'ONLY'>
OPTIONS = <TokenType.OPTIONS: 'OPTIONS'>
ORDER_BY = <TokenType.ORDER_BY: 'ORDER_BY'>
ORDERED = <TokenType.ORDERED: 'ORDERED'>
ORDINALITY = <TokenType.ORDINALITY: 'ORDINALITY'>
OUTER = <TokenType.OUTER: 'OUTER'>
OUT_OF = <TokenType.OUT_OF: 'OUT_OF'>
OVER = <TokenType.OVER: 'OVER'>
OVERLAPS = <TokenType.OVERLAPS: 'OVERLAPS'>
OVERWRITE = <TokenType.OVERWRITE: 'OVERWRITE'>
PARTITION = <TokenType.PARTITION: 'PARTITION'>
PARTITION_BY = <TokenType.PARTITION_BY: 'PARTITION_BY'>
PERCENT = <TokenType.PERCENT: 'PERCENT'>
PIVOT = <TokenType.PIVOT: 'PIVOT'>
PLACEHOLDER = <TokenType.PLACEHOLDER: 'PLACEHOLDER'>
PRAGMA = <TokenType.PRAGMA: 'PRAGMA'>
PRECEDING = <TokenType.PRECEDING: 'PRECEDING'>
PRIMARY_KEY = <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>
PROCEDURE = <TokenType.PROCEDURE: 'PROCEDURE'>
PROPERTIES = <TokenType.PROPERTIES: 'PROPERTIES'>
PSEUDO_TYPE = <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>
QUALIFY = <TokenType.QUALIFY: 'QUALIFY'>
QUOTE = <TokenType.QUOTE: 'QUOTE'>
RANGE = <TokenType.RANGE: 'RANGE'>
RECURSIVE = <TokenType.RECURSIVE: 'RECURSIVE'>
REPLACE = <TokenType.REPLACE: 'REPLACE'>
RESPECT_NULLS = <TokenType.RESPECT_NULLS: 'RESPECT_NULLS'>
RETURNING = <TokenType.RETURNING: 'RETURNING'>
REFERENCES = <TokenType.REFERENCES: 'REFERENCES'>
RIGHT = <TokenType.RIGHT: 'RIGHT'>
RLIKE = <TokenType.RLIKE: 'RLIKE'>
ROLLBACK = <TokenType.ROLLBACK: 'ROLLBACK'>
ROLLUP = <TokenType.ROLLUP: 'ROLLUP'>
ROW = <TokenType.ROW: 'ROW'>
ROWS = <TokenType.ROWS: 'ROWS'>
SEED = <TokenType.SEED: 'SEED'>
SELECT = <TokenType.SELECT: 'SELECT'>
SEMI = <TokenType.SEMI: 'SEMI'>
SEPARATOR = <TokenType.SEPARATOR: 'SEPARATOR'>
SERDE_PROPERTIES = <TokenType.SERDE_PROPERTIES: 'SERDE_PROPERTIES'>
SET = <TokenType.SET: 'SET'>
SETTINGS = <TokenType.SETTINGS: 'SETTINGS'>
SHOW = <TokenType.SHOW: 'SHOW'>
SIMILAR_TO = <TokenType.SIMILAR_TO: 'SIMILAR_TO'>
SOME = <TokenType.SOME: 'SOME'>
SORTKEY = <TokenType.SORTKEY: 'SORTKEY'>
SORT_BY = <TokenType.SORT_BY: 'SORT_BY'>
STRUCT = <TokenType.STRUCT: 'STRUCT'>
TABLE_SAMPLE = <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>
TEMPORARY = <TokenType.TEMPORARY: 'TEMPORARY'>
TOP = <TokenType.TOP: 'TOP'>
THEN = <TokenType.THEN: 'THEN'>
TRAILING = <TokenType.TRAILING: 'TRAILING'>
TRUE = <TokenType.TRUE: 'TRUE'>
UNBOUNDED = <TokenType.UNBOUNDED: 'UNBOUNDED'>
UNCACHE = <TokenType.UNCACHE: 'UNCACHE'>
UNION = <TokenType.UNION: 'UNION'>
UNLOGGED = <TokenType.UNLOGGED: 'UNLOGGED'>
UNNEST = <TokenType.UNNEST: 'UNNEST'>
UNPIVOT = <TokenType.UNPIVOT: 'UNPIVOT'>
UPDATE = <TokenType.UPDATE: 'UPDATE'>
USE = <TokenType.USE: 'USE'>
USING = <TokenType.USING: 'USING'>
VALUES = <TokenType.VALUES: 'VALUES'>
VIEW = <TokenType.VIEW: 'VIEW'>
VOLATILE = <TokenType.VOLATILE: 'VOLATILE'>
WHEN = <TokenType.WHEN: 'WHEN'>
WHERE = <TokenType.WHERE: 'WHERE'>
WINDOW = <TokenType.WINDOW: 'WINDOW'>
WITH = <TokenType.WITH: 'WITH'>
WITH_TIME_ZONE = <TokenType.WITH_TIME_ZONE: 'WITH_TIME_ZONE'>
WITH_LOCAL_TIME_ZONE = <TokenType.WITH_LOCAL_TIME_ZONE: 'WITH_LOCAL_TIME_ZONE'>
WITHIN_GROUP = <TokenType.WITHIN_GROUP: 'WITHIN_GROUP'>
WITHOUT_TIME_ZONE = <TokenType.WITHOUT_TIME_ZONE: 'WITHOUT_TIME_ZONE'>
UNIQUE = <TokenType.UNIQUE: 'UNIQUE'>
Inherited Members
enum.Enum
name
value
class Token:
328class Token:
329    __slots__ = ("token_type", "text", "line", "col", "end", "comments")
330
331    @classmethod
332    def number(cls, number: int) -> Token:
333        """Returns a NUMBER token with `number` as its text."""
334        return cls(TokenType.NUMBER, str(number))
335
336    @classmethod
337    def string(cls, string: str) -> Token:
338        """Returns a STRING token with `string` as its text."""
339        return cls(TokenType.STRING, string)
340
341    @classmethod
342    def identifier(cls, identifier: str) -> Token:
343        """Returns an IDENTIFIER token with `identifier` as its text."""
344        return cls(TokenType.IDENTIFIER, identifier)
345
346    @classmethod
347    def var(cls, var: str) -> Token:
348        """Returns an VAR token with `var` as its text."""
349        return cls(TokenType.VAR, var)
350
351    def __init__(
352        self,
353        token_type: TokenType,
354        text: str,
355        line: int = 1,
356        col: int = 1,
357        end: int = 0,
358        comments: t.List[str] = [],
359    ) -> None:
360        self.token_type = token_type
361        self.text = text
362        self.line = line
363        size = len(text)
364        self.col = col
365        self.end = end if end else size
366        self.comments = comments
367
368    @property
369    def start(self) -> int:
370        """Returns the start of the token."""
371        return self.end - len(self.text)
372
373    def __repr__(self) -> str:
374        attributes = ", ".join(f"{k}: {getattr(self, k)}" for k in self.__slots__)
375        return f"<Token {attributes}>"
Token( token_type: sqlglot.tokens.TokenType, text: str, line: int = 1, col: int = 1, end: int = 0, comments: List[str] = [])
351    def __init__(
352        self,
353        token_type: TokenType,
354        text: str,
355        line: int = 1,
356        col: int = 1,
357        end: int = 0,
358        comments: t.List[str] = [],
359    ) -> None:
360        self.token_type = token_type
361        self.text = text
362        self.line = line
363        size = len(text)
364        self.col = col
365        self.end = end if end else size
366        self.comments = comments
@classmethod
def number(cls, number: int) -> sqlglot.tokens.Token:
331    @classmethod
332    def number(cls, number: int) -> Token:
333        """Returns a NUMBER token with `number` as its text."""
334        return cls(TokenType.NUMBER, str(number))

Returns a NUMBER token with number as its text.

@classmethod
def string(cls, string: str) -> sqlglot.tokens.Token:
336    @classmethod
337    def string(cls, string: str) -> Token:
338        """Returns a STRING token with `string` as its text."""
339        return cls(TokenType.STRING, string)

Returns a STRING token with string as its text.

@classmethod
def identifier(cls, identifier: str) -> sqlglot.tokens.Token:
341    @classmethod
342    def identifier(cls, identifier: str) -> Token:
343        """Returns an IDENTIFIER token with `identifier` as its text."""
344        return cls(TokenType.IDENTIFIER, identifier)

Returns an IDENTIFIER token with identifier as its text.

@classmethod
def var(cls, var: str) -> sqlglot.tokens.Token:
346    @classmethod
347    def var(cls, var: str) -> Token:
348        """Returns an VAR token with `var` as its text."""
349        return cls(TokenType.VAR, var)

Returns an VAR token with var as its text.

start: int

Returns the start of the token.

class Tokenizer:
 418class Tokenizer(metaclass=_Tokenizer):
 419    SINGLE_TOKENS = {
 420        "(": TokenType.L_PAREN,
 421        ")": TokenType.R_PAREN,
 422        "[": TokenType.L_BRACKET,
 423        "]": TokenType.R_BRACKET,
 424        "{": TokenType.L_BRACE,
 425        "}": TokenType.R_BRACE,
 426        "&": TokenType.AMP,
 427        "^": TokenType.CARET,
 428        ":": TokenType.COLON,
 429        ",": TokenType.COMMA,
 430        ".": TokenType.DOT,
 431        "-": TokenType.DASH,
 432        "=": TokenType.EQ,
 433        ">": TokenType.GT,
 434        "<": TokenType.LT,
 435        "%": TokenType.MOD,
 436        "!": TokenType.NOT,
 437        "|": TokenType.PIPE,
 438        "+": TokenType.PLUS,
 439        ";": TokenType.SEMICOLON,
 440        "/": TokenType.SLASH,
 441        "\\": TokenType.BACKSLASH,
 442        "*": TokenType.STAR,
 443        "~": TokenType.TILDA,
 444        "?": TokenType.PLACEHOLDER,
 445        "@": TokenType.PARAMETER,
 446        # used for breaking a var like x'y' but nothing else
 447        # the token type doesn't matter
 448        "'": TokenType.QUOTE,
 449        "`": TokenType.IDENTIFIER,
 450        '"': TokenType.IDENTIFIER,
 451        "#": TokenType.HASH,
 452    }
 453
 454    BIT_STRINGS: t.List[str | t.Tuple[str, str]] = []
 455    BYTE_STRINGS: t.List[str | t.Tuple[str, str]] = []
 456    HEX_STRINGS: t.List[str | t.Tuple[str, str]] = []
 457    IDENTIFIERS: t.List[str | t.Tuple[str, str]] = ['"']
 458    IDENTIFIER_ESCAPES = ['"']
 459    QUOTES: t.List[t.Tuple[str, str] | str] = ["'"]
 460    STRING_ESCAPES = ["'"]
 461    VAR_SINGLE_TOKENS: t.Set[str] = set()
 462
 463    _COMMENTS: t.Dict[str, str] = {}
 464    _BIT_STRINGS: t.Dict[str, str] = {}
 465    _BYTE_STRINGS: t.Dict[str, str] = {}
 466    _HEX_STRINGS: t.Dict[str, str] = {}
 467    _IDENTIFIERS: t.Dict[str, str] = {}
 468    _IDENTIFIER_ESCAPES: t.Set[str] = set()
 469    _QUOTES: t.Dict[str, str] = {}
 470    _STRING_ESCAPES: t.Set[str] = set()
 471
 472    KEYWORDS: t.Dict[t.Optional[str], TokenType] = {
 473        **{f"{{%{postfix}": TokenType.BLOCK_START for postfix in ("", "+", "-")},
 474        **{f"{prefix}%}}": TokenType.BLOCK_END for prefix in ("", "+", "-")},
 475        "{{+": TokenType.BLOCK_START,
 476        "{{-": TokenType.BLOCK_START,
 477        "+}}": TokenType.BLOCK_END,
 478        "-}}": TokenType.BLOCK_END,
 479        "/*+": TokenType.HINT,
 480        "==": TokenType.EQ,
 481        "::": TokenType.DCOLON,
 482        "||": TokenType.DPIPE,
 483        ">=": TokenType.GTE,
 484        "<=": TokenType.LTE,
 485        "<>": TokenType.NEQ,
 486        "!=": TokenType.NEQ,
 487        "<=>": TokenType.NULLSAFE_EQ,
 488        "->": TokenType.ARROW,
 489        "->>": TokenType.DARROW,
 490        "=>": TokenType.FARROW,
 491        "#>": TokenType.HASH_ARROW,
 492        "#>>": TokenType.DHASH_ARROW,
 493        "<->": TokenType.LR_ARROW,
 494        "&&": TokenType.DAMP,
 495        "ALL": TokenType.ALL,
 496        "ALWAYS": TokenType.ALWAYS,
 497        "AND": TokenType.AND,
 498        "ANTI": TokenType.ANTI,
 499        "ANY": TokenType.ANY,
 500        "ASC": TokenType.ASC,
 501        "AS": TokenType.ALIAS,
 502        "AT TIME ZONE": TokenType.AT_TIME_ZONE,
 503        "AUTOINCREMENT": TokenType.AUTO_INCREMENT,
 504        "AUTO_INCREMENT": TokenType.AUTO_INCREMENT,
 505        "BEGIN": TokenType.BEGIN,
 506        "BETWEEN": TokenType.BETWEEN,
 507        "BOTH": TokenType.BOTH,
 508        "BUCKET": TokenType.BUCKET,
 509        "BY DEFAULT": TokenType.BY_DEFAULT,
 510        "CACHE": TokenType.CACHE,
 511        "UNCACHE": TokenType.UNCACHE,
 512        "CASE": TokenType.CASE,
 513        "CASCADE": TokenType.CASCADE,
 514        "CHARACTER SET": TokenType.CHARACTER_SET,
 515        "CLUSTER BY": TokenType.CLUSTER_BY,
 516        "COLLATE": TokenType.COLLATE,
 517        "COLUMN": TokenType.COLUMN,
 518        "COMMIT": TokenType.COMMIT,
 519        "COMPOUND": TokenType.COMPOUND,
 520        "CONSTRAINT": TokenType.CONSTRAINT,
 521        "CREATE": TokenType.CREATE,
 522        "CROSS": TokenType.CROSS,
 523        "CUBE": TokenType.CUBE,
 524        "CURRENT_DATE": TokenType.CURRENT_DATE,
 525        "CURRENT ROW": TokenType.CURRENT_ROW,
 526        "CURRENT_TIME": TokenType.CURRENT_TIME,
 527        "CURRENT_TIMESTAMP": TokenType.CURRENT_TIMESTAMP,
 528        "CURRENT_USER": TokenType.CURRENT_USER,
 529        "DATABASE": TokenType.DATABASE,
 530        "DEFAULT": TokenType.DEFAULT,
 531        "DELETE": TokenType.DELETE,
 532        "DESC": TokenType.DESC,
 533        "DESCRIBE": TokenType.DESCRIBE,
 534        "DISTINCT": TokenType.DISTINCT,
 535        "DISTINCT FROM": TokenType.DISTINCT_FROM,
 536        "DISTRIBUTE BY": TokenType.DISTRIBUTE_BY,
 537        "DIV": TokenType.DIV,
 538        "DROP": TokenType.DROP,
 539        "ELSE": TokenType.ELSE,
 540        "END": TokenType.END,
 541        "ESCAPE": TokenType.ESCAPE,
 542        "EXCEPT": TokenType.EXCEPT,
 543        "EXECUTE": TokenType.EXECUTE,
 544        "EXISTS": TokenType.EXISTS,
 545        "FALSE": TokenType.FALSE,
 546        "FETCH": TokenType.FETCH,
 547        "FILTER": TokenType.FILTER,
 548        "FIRST": TokenType.FIRST,
 549        "FULL": TokenType.FULL,
 550        "FUNCTION": TokenType.FUNCTION,
 551        "FOLLOWING": TokenType.FOLLOWING,
 552        "FOR": TokenType.FOR,
 553        "FOREIGN KEY": TokenType.FOREIGN_KEY,
 554        "FORMAT": TokenType.FORMAT,
 555        "FROM": TokenType.FROM,
 556        "GEOGRAPHY": TokenType.GEOGRAPHY,
 557        "GEOMETRY": TokenType.GEOMETRY,
 558        "GLOB": TokenType.GLOB,
 559        "GROUP BY": TokenType.GROUP_BY,
 560        "GROUPING SETS": TokenType.GROUPING_SETS,
 561        "HAVING": TokenType.HAVING,
 562        "IF": TokenType.IF,
 563        "ILIKE": TokenType.ILIKE,
 564        "IGNORE NULLS": TokenType.IGNORE_NULLS,
 565        "IN": TokenType.IN,
 566        "INDEX": TokenType.INDEX,
 567        "INET": TokenType.INET,
 568        "INNER": TokenType.INNER,
 569        "INSERT": TokenType.INSERT,
 570        "INTERVAL": TokenType.INTERVAL,
 571        "INTERSECT": TokenType.INTERSECT,
 572        "INTO": TokenType.INTO,
 573        "IS": TokenType.IS,
 574        "ISNULL": TokenType.ISNULL,
 575        "JOIN": TokenType.JOIN,
 576        "KEEP": TokenType.KEEP,
 577        "LATERAL": TokenType.LATERAL,
 578        "LAZY": TokenType.LAZY,
 579        "LEADING": TokenType.LEADING,
 580        "LEFT": TokenType.LEFT,
 581        "LIKE": TokenType.LIKE,
 582        "LIMIT": TokenType.LIMIT,
 583        "LOAD DATA": TokenType.LOAD_DATA,
 584        "LOCAL": TokenType.LOCAL,
 585        "LOCK": TokenType.LOCK,
 586        "MATERIALIZED": TokenType.MATERIALIZED,
 587        "MERGE": TokenType.MERGE,
 588        "NATURAL": TokenType.NATURAL,
 589        "NEXT": TokenType.NEXT,
 590        "NEXT VALUE FOR": TokenType.NEXT_VALUE_FOR,
 591        "NO ACTION": TokenType.NO_ACTION,
 592        "NOT": TokenType.NOT,
 593        "NOTNULL": TokenType.NOTNULL,
 594        "NULL": TokenType.NULL,
 595        "NULLS FIRST": TokenType.NULLS_FIRST,
 596        "NULLS LAST": TokenType.NULLS_LAST,
 597        "OBJECT": TokenType.OBJECT,
 598        "OFFSET": TokenType.OFFSET,
 599        "ON": TokenType.ON,
 600        "ONLY": TokenType.ONLY,
 601        "OPTIONS": TokenType.OPTIONS,
 602        "OR": TokenType.OR,
 603        "ORDER BY": TokenType.ORDER_BY,
 604        "ORDINALITY": TokenType.ORDINALITY,
 605        "OUTER": TokenType.OUTER,
 606        "OUT OF": TokenType.OUT_OF,
 607        "OVER": TokenType.OVER,
 608        "OVERLAPS": TokenType.OVERLAPS,
 609        "OVERWRITE": TokenType.OVERWRITE,
 610        "PARTITION": TokenType.PARTITION,
 611        "PARTITION BY": TokenType.PARTITION_BY,
 612        "PARTITIONED BY": TokenType.PARTITION_BY,
 613        "PARTITIONED_BY": TokenType.PARTITION_BY,
 614        "PERCENT": TokenType.PERCENT,
 615        "PIVOT": TokenType.PIVOT,
 616        "PRAGMA": TokenType.PRAGMA,
 617        "PRECEDING": TokenType.PRECEDING,
 618        "PRIMARY KEY": TokenType.PRIMARY_KEY,
 619        "PROCEDURE": TokenType.PROCEDURE,
 620        "QUALIFY": TokenType.QUALIFY,
 621        "RANGE": TokenType.RANGE,
 622        "RECURSIVE": TokenType.RECURSIVE,
 623        "REGEXP": TokenType.RLIKE,
 624        "REPLACE": TokenType.REPLACE,
 625        "RESPECT NULLS": TokenType.RESPECT_NULLS,
 626        "REFERENCES": TokenType.REFERENCES,
 627        "RIGHT": TokenType.RIGHT,
 628        "RLIKE": TokenType.RLIKE,
 629        "ROLLBACK": TokenType.ROLLBACK,
 630        "ROLLUP": TokenType.ROLLUP,
 631        "ROW": TokenType.ROW,
 632        "ROWS": TokenType.ROWS,
 633        "SCHEMA": TokenType.SCHEMA,
 634        "SEED": TokenType.SEED,
 635        "SELECT": TokenType.SELECT,
 636        "SEMI": TokenType.SEMI,
 637        "SET": TokenType.SET,
 638        "SETTINGS": TokenType.SETTINGS,
 639        "SHOW": TokenType.SHOW,
 640        "SIMILAR TO": TokenType.SIMILAR_TO,
 641        "SOME": TokenType.SOME,
 642        "SORTKEY": TokenType.SORTKEY,
 643        "SORT BY": TokenType.SORT_BY,
 644        "TABLE": TokenType.TABLE,
 645        "TABLESAMPLE": TokenType.TABLE_SAMPLE,
 646        "TEMP": TokenType.TEMPORARY,
 647        "TEMPORARY": TokenType.TEMPORARY,
 648        "THEN": TokenType.THEN,
 649        "TRUE": TokenType.TRUE,
 650        "TRAILING": TokenType.TRAILING,
 651        "UNBOUNDED": TokenType.UNBOUNDED,
 652        "UNION": TokenType.UNION,
 653        "UNLOGGED": TokenType.UNLOGGED,
 654        "UNNEST": TokenType.UNNEST,
 655        "UNPIVOT": TokenType.UNPIVOT,
 656        "UPDATE": TokenType.UPDATE,
 657        "USE": TokenType.USE,
 658        "USING": TokenType.USING,
 659        "UUID": TokenType.UUID,
 660        "VALUES": TokenType.VALUES,
 661        "VIEW": TokenType.VIEW,
 662        "VOLATILE": TokenType.VOLATILE,
 663        "WHEN": TokenType.WHEN,
 664        "WHERE": TokenType.WHERE,
 665        "WINDOW": TokenType.WINDOW,
 666        "WITH": TokenType.WITH,
 667        "WITH TIME ZONE": TokenType.WITH_TIME_ZONE,
 668        "WITH LOCAL TIME ZONE": TokenType.WITH_LOCAL_TIME_ZONE,
 669        "WITHIN GROUP": TokenType.WITHIN_GROUP,
 670        "WITHOUT TIME ZONE": TokenType.WITHOUT_TIME_ZONE,
 671        "APPLY": TokenType.APPLY,
 672        "ARRAY": TokenType.ARRAY,
 673        "BIT": TokenType.BIT,
 674        "BOOL": TokenType.BOOLEAN,
 675        "BOOLEAN": TokenType.BOOLEAN,
 676        "BYTE": TokenType.TINYINT,
 677        "TINYINT": TokenType.TINYINT,
 678        "SHORT": TokenType.SMALLINT,
 679        "SMALLINT": TokenType.SMALLINT,
 680        "INT2": TokenType.SMALLINT,
 681        "INTEGER": TokenType.INT,
 682        "INT": TokenType.INT,
 683        "INT4": TokenType.INT,
 684        "LONG": TokenType.BIGINT,
 685        "BIGINT": TokenType.BIGINT,
 686        "INT8": TokenType.BIGINT,
 687        "DEC": TokenType.DECIMAL,
 688        "DECIMAL": TokenType.DECIMAL,
 689        "BIGDECIMAL": TokenType.BIGDECIMAL,
 690        "BIGNUMERIC": TokenType.BIGDECIMAL,
 691        "MAP": TokenType.MAP,
 692        "NULLABLE": TokenType.NULLABLE,
 693        "NUMBER": TokenType.DECIMAL,
 694        "NUMERIC": TokenType.DECIMAL,
 695        "FIXED": TokenType.DECIMAL,
 696        "REAL": TokenType.FLOAT,
 697        "FLOAT": TokenType.FLOAT,
 698        "FLOAT4": TokenType.FLOAT,
 699        "FLOAT8": TokenType.DOUBLE,
 700        "DOUBLE": TokenType.DOUBLE,
 701        "DOUBLE PRECISION": TokenType.DOUBLE,
 702        "JSON": TokenType.JSON,
 703        "CHAR": TokenType.CHAR,
 704        "CHARACTER": TokenType.CHAR,
 705        "NCHAR": TokenType.NCHAR,
 706        "VARCHAR": TokenType.VARCHAR,
 707        "VARCHAR2": TokenType.VARCHAR,
 708        "NVARCHAR": TokenType.NVARCHAR,
 709        "NVARCHAR2": TokenType.NVARCHAR,
 710        "STR": TokenType.TEXT,
 711        "STRING": TokenType.TEXT,
 712        "TEXT": TokenType.TEXT,
 713        "CLOB": TokenType.TEXT,
 714        "LONGVARCHAR": TokenType.TEXT,
 715        "BINARY": TokenType.BINARY,
 716        "BLOB": TokenType.VARBINARY,
 717        "BYTEA": TokenType.VARBINARY,
 718        "VARBINARY": TokenType.VARBINARY,
 719        "TIME": TokenType.TIME,
 720        "TIMESTAMP": TokenType.TIMESTAMP,
 721        "TIMESTAMPTZ": TokenType.TIMESTAMPTZ,
 722        "TIMESTAMPLTZ": TokenType.TIMESTAMPLTZ,
 723        "DATE": TokenType.DATE,
 724        "DATETIME": TokenType.DATETIME,
 725        "UNIQUE": TokenType.UNIQUE,
 726        "STRUCT": TokenType.STRUCT,
 727        "VARIANT": TokenType.VARIANT,
 728        "ALTER": TokenType.ALTER,
 729        "ALTER AGGREGATE": TokenType.COMMAND,
 730        "ALTER DEFAULT": TokenType.COMMAND,
 731        "ALTER DOMAIN": TokenType.COMMAND,
 732        "ALTER ROLE": TokenType.COMMAND,
 733        "ALTER RULE": TokenType.COMMAND,
 734        "ALTER SEQUENCE": TokenType.COMMAND,
 735        "ALTER TYPE": TokenType.COMMAND,
 736        "ALTER USER": TokenType.COMMAND,
 737        "ALTER VIEW": TokenType.COMMAND,
 738        "ANALYZE": TokenType.COMMAND,
 739        "CALL": TokenType.COMMAND,
 740        "COMMENT": TokenType.COMMENT,
 741        "COPY": TokenType.COMMAND,
 742        "EXPLAIN": TokenType.COMMAND,
 743        "GRANT": TokenType.COMMAND,
 744        "OPTIMIZE": TokenType.COMMAND,
 745        "PREPARE": TokenType.COMMAND,
 746        "TRUNCATE": TokenType.COMMAND,
 747        "VACUUM": TokenType.COMMAND,
 748    }
 749
 750    WHITE_SPACE: t.Dict[t.Optional[str], TokenType] = {
 751        " ": TokenType.SPACE,
 752        "\t": TokenType.SPACE,
 753        "\n": TokenType.BREAK,
 754        "\r": TokenType.BREAK,
 755        "\r\n": TokenType.BREAK,
 756    }
 757
 758    COMMANDS = {
 759        TokenType.COMMAND,
 760        TokenType.EXECUTE,
 761        TokenType.FETCH,
 762        TokenType.SHOW,
 763    }
 764
 765    COMMAND_PREFIX_TOKENS = {TokenType.SEMICOLON, TokenType.BEGIN}
 766
 767    # handle numeric literals like in hive (3L = BIGINT)
 768    NUMERIC_LITERALS: t.Dict[str, str] = {}
 769    ENCODE: t.Optional[str] = None
 770
 771    COMMENTS = ["--", ("/*", "*/"), ("{#", "#}")]
 772    KEYWORD_TRIE: t.Dict = {}  # autofilled
 773
 774    IDENTIFIER_CAN_START_WITH_DIGIT = False
 775
 776    __slots__ = (
 777        "sql",
 778        "size",
 779        "tokens",
 780        "_start",
 781        "_current",
 782        "_line",
 783        "_col",
 784        "_comments",
 785        "_char",
 786        "_end",
 787        "_peek",
 788        "_prev_token_line",
 789    )
 790
 791    def __init__(self) -> None:
 792        self.reset()
 793
 794    def reset(self) -> None:
 795        self.sql = ""
 796        self.size = 0
 797        self.tokens: t.List[Token] = []
 798        self._start = 0
 799        self._current = 0
 800        self._line = 1
 801        self._col = 1
 802        self._comments: t.List[str] = []
 803
 804        self._char = ""
 805        self._end = False
 806        self._peek = ""
 807        self._prev_token_line = -1
 808
 809    def tokenize(self, sql: str) -> t.List[Token]:
 810        """Returns a list of tokens corresponding to the SQL string `sql`."""
 811        self.reset()
 812        self.sql = sql
 813        self.size = len(sql)
 814        try:
 815            self._scan()
 816        except Exception as e:
 817            start = self._current - 50
 818            end = self._current + 50
 819            start = start if start > 0 else 0
 820            end = end if end < self.size else self.size - 1
 821            context = self.sql[start:end]
 822            raise ValueError(f"Error tokenizing '{context}'") from e
 823
 824        return self.tokens
 825
 826    def _scan(self, until: t.Optional[t.Callable] = None) -> None:
 827        while self.size and not self._end:
 828            self._start = self._current
 829            self._advance()
 830
 831            if self._char is None:
 832                break
 833
 834            if self._char not in self.WHITE_SPACE:
 835                if self._char.isdigit():
 836                    self._scan_number()
 837                elif self._char in self._IDENTIFIERS:
 838                    self._scan_identifier(self._IDENTIFIERS[self._char])
 839                else:
 840                    self._scan_keywords()
 841
 842            if until and until():
 843                break
 844
 845        if self.tokens:
 846            self.tokens[-1].comments.extend(self._comments)
 847
 848    def _chars(self, size: int) -> str:
 849        if size == 1:
 850            return self._char
 851        start = self._current - 1
 852        end = start + size
 853        if end <= self.size:
 854            return self.sql[start:end]
 855        return ""
 856
 857    def _advance(self, i: int = 1, alnum: bool = False) -> None:
 858        if self.WHITE_SPACE.get(self._char) is TokenType.BREAK:
 859            self._col = 1
 860            self._line += 1
 861        else:
 862            self._col += i
 863
 864        self._current += i
 865        self._end = self._current >= self.size
 866        self._char = self.sql[self._current - 1]
 867        self._peek = "" if self._end else self.sql[self._current]
 868
 869        if alnum and self._char.isalnum():
 870            _col = self._col
 871            _current = self._current
 872            _end = self._end
 873            _peek = self._peek
 874
 875            while _peek.isalnum():
 876                _col += 1
 877                _current += 1
 878                _end = _current >= self.size
 879                _peek = "" if _end else self.sql[_current]
 880
 881            self._col = _col
 882            self._current = _current
 883            self._end = _end
 884            self._peek = _peek
 885            self._char = self.sql[_current - 1]
 886
 887    @property
 888    def _text(self) -> str:
 889        return self.sql[self._start : self._current]
 890
 891    def _add(self, token_type: TokenType, text: t.Optional[str] = None) -> None:
 892        self._prev_token_line = self._line
 893        self.tokens.append(
 894            Token(
 895                token_type,
 896                self._text if text is None else text,
 897                self._line,
 898                self._col,
 899                self._current,
 900                self._comments,
 901            )
 902        )
 903        self._comments = []
 904
 905        # If we have either a semicolon or a begin token before the command's token, we'll parse
 906        # whatever follows the command's token as a string
 907        if (
 908            token_type in self.COMMANDS
 909            and self._peek != ";"
 910            and (len(self.tokens) == 1 or self.tokens[-2].token_type in self.COMMAND_PREFIX_TOKENS)
 911        ):
 912            start = self._current
 913            tokens = len(self.tokens)
 914            self._scan(lambda: self._peek == ";")
 915            self.tokens = self.tokens[:tokens]
 916            text = self.sql[start : self._current].strip()
 917            if text:
 918                self._add(TokenType.STRING, text)
 919
 920    def _scan_keywords(self) -> None:
 921        size = 0
 922        word = None
 923        chars = self._text
 924        char = chars
 925        prev_space = False
 926        skip = False
 927        trie = self.KEYWORD_TRIE
 928        single_token = char in self.SINGLE_TOKENS
 929
 930        while chars:
 931            if skip:
 932                result = 1
 933            else:
 934                result, trie = in_trie(trie, char.upper())
 935
 936            if result == 0:
 937                break
 938            if result == 2:
 939                word = chars
 940
 941            size += 1
 942            end = self._current - 1 + size
 943
 944            if end < self.size:
 945                char = self.sql[end]
 946                single_token = single_token or char in self.SINGLE_TOKENS
 947                is_space = char in self.WHITE_SPACE
 948
 949                if not is_space or not prev_space:
 950                    if is_space:
 951                        char = " "
 952                    chars += char
 953                    prev_space = is_space
 954                    skip = False
 955                else:
 956                    skip = True
 957            else:
 958                char = ""
 959                chars = " "
 960
 961        word = None if not single_token and chars[-1] not in self.WHITE_SPACE else word
 962
 963        if not word:
 964            if self._char in self.SINGLE_TOKENS:
 965                self._add(self.SINGLE_TOKENS[self._char], text=self._char)
 966                return
 967            self._scan_var()
 968            return
 969
 970        if self._scan_string(word):
 971            return
 972        if self._scan_formatted_string(word):
 973            return
 974        if self._scan_comment(word):
 975            return
 976
 977        self._advance(size - 1)
 978        word = word.upper()
 979        self._add(self.KEYWORDS[word], text=word)
 980
 981    def _scan_comment(self, comment_start: str) -> bool:
 982        if comment_start not in self._COMMENTS:
 983            return False
 984
 985        comment_start_line = self._line
 986        comment_start_size = len(comment_start)
 987        comment_end = self._COMMENTS[comment_start]
 988
 989        if comment_end:
 990            # Skip the comment's start delimiter
 991            self._advance(comment_start_size)
 992
 993            comment_end_size = len(comment_end)
 994            while not self._end and self._chars(comment_end_size) != comment_end:
 995                self._advance(alnum=True)
 996
 997            self._comments.append(self._text[comment_start_size : -comment_end_size + 1])
 998            self._advance(comment_end_size - 1)
 999        else:
1000            while not self._end and not self.WHITE_SPACE.get(self._peek) is TokenType.BREAK:
1001                self._advance(alnum=True)
1002            self._comments.append(self._text[comment_start_size:])
1003
1004        # Leading comment is attached to the succeeding token, whilst trailing comment to the preceding.
1005        # Multiple consecutive comments are preserved by appending them to the current comments list.
1006        if comment_start_line == self._prev_token_line:
1007            self.tokens[-1].comments.extend(self._comments)
1008            self._comments = []
1009            self._prev_token_line = self._line
1010
1011        return True
1012
1013    def _scan_number(self) -> None:
1014        if self._char == "0":
1015            peek = self._peek.upper()
1016            if peek == "B":
1017                return self._scan_bits() if self._BIT_STRINGS else self._add(TokenType.NUMBER)
1018            elif peek == "X":
1019                return self._scan_hex() if self._HEX_STRINGS else self._add(TokenType.NUMBER)
1020
1021        decimal = False
1022        scientific = 0
1023
1024        while True:
1025            if self._peek.isdigit():
1026                self._advance()
1027            elif self._peek == "." and not decimal:
1028                decimal = True
1029                self._advance()
1030            elif self._peek in ("-", "+") and scientific == 1:
1031                scientific += 1
1032                self._advance()
1033            elif self._peek.upper() == "E" and not scientific:
1034                scientific += 1
1035                self._advance()
1036            elif self._peek.isidentifier():
1037                number_text = self._text
1038                literal = ""
1039
1040                while self._peek.strip() and self._peek not in self.SINGLE_TOKENS:
1041                    literal += self._peek.upper()
1042                    self._advance()
1043
1044                token_type = self.KEYWORDS.get(self.NUMERIC_LITERALS.get(literal))
1045
1046                if token_type:
1047                    self._add(TokenType.NUMBER, number_text)
1048                    self._add(TokenType.DCOLON, "::")
1049                    return self._add(token_type, literal)
1050                elif self.IDENTIFIER_CAN_START_WITH_DIGIT:
1051                    return self._add(TokenType.VAR)
1052
1053                self._add(TokenType.NUMBER, number_text)
1054                return self._advance(-len(literal))
1055            else:
1056                return self._add(TokenType.NUMBER)
1057
1058    def _scan_bits(self) -> None:
1059        self._advance()
1060        value = self._extract_value()
1061        try:
1062            # If `value` can't be converted to a binary, fallback to tokenizing it as an identifier
1063            int(value, 2)
1064            self._add(TokenType.BIT_STRING, value[2:])  # Drop the 0b
1065        except ValueError:
1066            self._add(TokenType.IDENTIFIER)
1067
1068    def _scan_hex(self) -> None:
1069        self._advance()
1070        value = self._extract_value()
1071        try:
1072            # If `value` can't be converted to a hex, fallback to tokenizing it as an identifier
1073            int(value, 16)
1074            self._add(TokenType.HEX_STRING, value[2:])  # Drop the 0x
1075        except ValueError:
1076            self._add(TokenType.IDENTIFIER)
1077
1078    def _extract_value(self) -> str:
1079        while True:
1080            char = self._peek.strip()
1081            if char and char not in self.SINGLE_TOKENS:
1082                self._advance(alnum=True)
1083            else:
1084                break
1085
1086        return self._text
1087
1088    def _scan_string(self, quote: str) -> bool:
1089        quote_end = self._QUOTES.get(quote)
1090        if quote_end is None:
1091            return False
1092
1093        self._advance(len(quote))
1094        text = self._extract_string(quote_end)
1095        text = text.encode(self.ENCODE).decode(self.ENCODE) if self.ENCODE else text
1096        self._add(TokenType.NATIONAL if quote[0].upper() == "N" else TokenType.STRING, text)
1097        return True
1098
1099    # X'1234', b'0110', E'\\\\\' etc.
1100    def _scan_formatted_string(self, string_start: str) -> bool:
1101        if string_start in self._HEX_STRINGS:
1102            delimiters = self._HEX_STRINGS
1103            token_type = TokenType.HEX_STRING
1104            base = 16
1105        elif string_start in self._BIT_STRINGS:
1106            delimiters = self._BIT_STRINGS
1107            token_type = TokenType.BIT_STRING
1108            base = 2
1109        elif string_start in self._BYTE_STRINGS:
1110            delimiters = self._BYTE_STRINGS
1111            token_type = TokenType.BYTE_STRING
1112            base = None
1113        else:
1114            return False
1115
1116        self._advance(len(string_start))
1117        string_end = delimiters[string_start]
1118        text = self._extract_string(string_end)
1119
1120        if base:
1121            try:
1122                int(text, base)
1123            except:
1124                raise RuntimeError(
1125                    f"Numeric string contains invalid characters from {self._line}:{self._start}"
1126                )
1127
1128        self._add(token_type, text)
1129        return True
1130
1131    def _scan_identifier(self, identifier_end: str) -> None:
1132        self._advance()
1133        text = self._extract_string(identifier_end, self._IDENTIFIER_ESCAPES)
1134        self._add(TokenType.IDENTIFIER, text)
1135
1136    def _scan_var(self) -> None:
1137        while True:
1138            char = self._peek.strip()
1139            if char and (char in self.VAR_SINGLE_TOKENS or char not in self.SINGLE_TOKENS):
1140                self._advance(alnum=True)
1141            else:
1142                break
1143
1144        self._add(
1145            TokenType.VAR
1146            if self.tokens and self.tokens[-1].token_type == TokenType.PARAMETER
1147            else self.KEYWORDS.get(self._text.upper(), TokenType.VAR)
1148        )
1149
1150    def _extract_string(self, delimiter: str, escapes=None) -> str:
1151        text = ""
1152        delim_size = len(delimiter)
1153        escapes = self._STRING_ESCAPES if escapes is None else escapes
1154
1155        while True:
1156            if self._char in escapes and (self._peek == delimiter or self._peek in escapes):
1157                if self._peek == delimiter:
1158                    text += self._peek
1159                else:
1160                    text += self._char + self._peek
1161
1162                if self._current + 1 < self.size:
1163                    self._advance(2)
1164                else:
1165                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._current}")
1166            else:
1167                if self._chars(delim_size) == delimiter:
1168                    if delim_size > 1:
1169                        self._advance(delim_size - 1)
1170                    break
1171
1172                if self._end:
1173                    raise RuntimeError(f"Missing {delimiter} from {self._line}:{self._start}")
1174
1175                current = self._current - 1
1176                self._advance(alnum=True)
1177                text += self.sql[current : self._current - 1]
1178
1179        return text
def reset(self) -> None:
794    def reset(self) -> None:
795        self.sql = ""
796        self.size = 0
797        self.tokens: t.List[Token] = []
798        self._start = 0
799        self._current = 0
800        self._line = 1
801        self._col = 1
802        self._comments: t.List[str] = []
803
804        self._char = ""
805        self._end = False
806        self._peek = ""
807        self._prev_token_line = -1
def tokenize(self, sql: str) -> List[sqlglot.tokens.Token]:
809    def tokenize(self, sql: str) -> t.List[Token]:
810        """Returns a list of tokens corresponding to the SQL string `sql`."""
811        self.reset()
812        self.sql = sql
813        self.size = len(sql)
814        try:
815            self._scan()
816        except Exception as e:
817            start = self._current - 50
818            end = self._current + 50
819            start = start if start > 0 else 0
820            end = end if end < self.size else self.size - 1
821            context = self.sql[start:end]
822            raise ValueError(f"Error tokenizing '{context}'") from e
823
824        return self.tokens

Returns a list of tokens corresponding to the SQL string sql.