sqlglot.dialects.bigquery
1from __future__ import annotations 2 3import logging 4import re 5import typing as t 6 7from sqlglot import exp, generator, parser, tokens, transforms 8from sqlglot._typing import E 9from sqlglot.dialects.dialect import ( 10 Dialect, 11 binary_from_function, 12 date_add_interval_sql, 13 datestrtodate_sql, 14 format_time_lambda, 15 if_sql, 16 inline_array_sql, 17 json_keyvalue_comma_sql, 18 max_or_greatest, 19 min_or_least, 20 no_ilike_sql, 21 parse_date_delta_with_interval, 22 regexp_replace_sql, 23 rename_func, 24 timestrtotime_sql, 25 ts_or_ds_to_date_sql, 26) 27from sqlglot.helper import seq_get, split_num_words 28from sqlglot.tokens import TokenType 29 30logger = logging.getLogger("sqlglot") 31 32 33def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str: 34 if not expression.find_ancestor(exp.From, exp.Join): 35 return self.values_sql(expression) 36 37 alias = expression.args.get("alias") 38 39 structs = [ 40 exp.Struct( 41 expressions=[ 42 exp.alias_(value, column_name) 43 for value, column_name in zip( 44 t.expressions, 45 alias.columns 46 if alias and alias.columns 47 else (f"_c{i}" for i in range(len(t.expressions))), 48 ) 49 ] 50 ) 51 for t in expression.find_all(exp.Tuple) 52 ] 53 54 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) 55 56 57def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str: 58 this = expression.this 59 if isinstance(this, exp.Schema): 60 this = f"{this.this} <{self.expressions(this)}>" 61 else: 62 this = self.sql(this) 63 return f"RETURNS {this}" 64 65 66def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str: 67 kind = expression.args["kind"] 68 returns = expression.find(exp.ReturnsProperty) 69 70 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 71 expression = expression.copy() 72 expression.set("kind", "TABLE FUNCTION") 73 74 if isinstance(expression.expression, (exp.Subquery, exp.Literal)): 75 expression.set("expression", expression.expression.this) 76 77 return self.create_sql(expression) 78 79 return self.create_sql(expression) 80 81 82def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 83 """Remove references to unnest table aliases since bigquery doesn't allow them. 84 85 These are added by the optimizer's qualify_column step. 86 """ 87 from sqlglot.optimizer.scope import find_all_in_scope 88 89 if isinstance(expression, exp.Select): 90 unnest_aliases = { 91 unnest.alias 92 for unnest in find_all_in_scope(expression, exp.Unnest) 93 if isinstance(unnest.parent, (exp.From, exp.Join)) 94 } 95 if unnest_aliases: 96 for column in expression.find_all(exp.Column): 97 if column.table in unnest_aliases: 98 column.set("table", None) 99 elif column.db in unnest_aliases: 100 column.set("db", None) 101 102 return expression 103 104 105# https://issuetracker.google.com/issues/162294746 106# workaround for bigquery bug when grouping by an expression and then ordering 107# WITH x AS (SELECT 1 y) 108# SELECT y + 1 z 109# FROM x 110# GROUP BY x + 1 111# ORDER by z 112def _alias_ordered_group(expression: exp.Expression) -> exp.Expression: 113 if isinstance(expression, exp.Select): 114 group = expression.args.get("group") 115 order = expression.args.get("order") 116 117 if group and order: 118 aliases = { 119 select.this: select.args["alias"] 120 for select in expression.selects 121 if isinstance(select, exp.Alias) 122 } 123 124 for e in group.expressions: 125 alias = aliases.get(e) 126 127 if alias: 128 e.replace(exp.column(alias)) 129 130 return expression 131 132 133def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression: 134 """BigQuery doesn't allow column names when defining a CTE, so we try to push them down.""" 135 if isinstance(expression, exp.CTE) and expression.alias_column_names: 136 cte_query = expression.this 137 138 if cte_query.is_star: 139 logger.warning( 140 "Can't push down CTE column names for star queries. Run the query through" 141 " the optimizer or use 'qualify' to expand the star projections first." 142 ) 143 return expression 144 145 column_names = expression.alias_column_names 146 expression.args["alias"].set("columns", None) 147 148 for name, select in zip(column_names, cte_query.selects): 149 to_replace = select 150 151 if isinstance(select, exp.Alias): 152 select = select.this 153 154 # Inner aliases are shadowed by the CTE column names 155 to_replace.replace(exp.alias_(select, name)) 156 157 return expression 158 159 160def _parse_timestamp(args: t.List) -> exp.StrToTime: 161 this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)]) 162 this.set("zone", seq_get(args, 2)) 163 return this 164 165 166def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts: 167 expr_type = exp.DateFromParts if len(args) == 3 else exp.Date 168 return expr_type.from_arg_list(args) 169 170 171def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5: 172 # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation 173 arg = seq_get(args, 0) 174 return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg) 175 176 177class BigQuery(Dialect): 178 UNNEST_COLUMN_ONLY = True 179 SUPPORTS_USER_DEFINED_TYPES = False 180 SUPPORTS_SEMI_ANTI_JOIN = False 181 LOG_BASE_FIRST = False 182 183 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 184 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 185 186 # bigquery udfs are case sensitive 187 NORMALIZE_FUNCTIONS = False 188 189 TIME_MAPPING = { 190 "%D": "%m/%d/%y", 191 } 192 193 FORMAT_MAPPING = { 194 "DD": "%d", 195 "MM": "%m", 196 "MON": "%b", 197 "MONTH": "%B", 198 "YYYY": "%Y", 199 "YY": "%y", 200 "HH": "%I", 201 "HH12": "%I", 202 "HH24": "%H", 203 "MI": "%M", 204 "SS": "%S", 205 "SSSSS": "%f", 206 "TZH": "%z", 207 } 208 209 # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement 210 # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table 211 PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"} 212 213 @classmethod 214 def normalize_identifier(cls, expression: E) -> E: 215 if isinstance(expression, exp.Identifier): 216 parent = expression.parent 217 while isinstance(parent, exp.Dot): 218 parent = parent.parent 219 220 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 221 # The following check is essentially a heuristic to detect tables based on whether or 222 # not they're qualified. It also avoids normalizing UDFs, because they're case-sensitive. 223 if ( 224 not isinstance(parent, exp.UserDefinedFunction) 225 and not (isinstance(parent, exp.Table) and parent.db) 226 and not expression.meta.get("is_table") 227 ): 228 expression.set("this", expression.this.lower()) 229 230 return expression 231 232 class Tokenizer(tokens.Tokenizer): 233 QUOTES = ["'", '"', '"""', "'''"] 234 COMMENTS = ["--", "#", ("/*", "*/")] 235 IDENTIFIERS = ["`"] 236 STRING_ESCAPES = ["\\"] 237 238 HEX_STRINGS = [("0x", ""), ("0X", "")] 239 240 BYTE_STRINGS = [ 241 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 242 ] 243 244 RAW_STRINGS = [ 245 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 246 ] 247 248 KEYWORDS = { 249 **tokens.Tokenizer.KEYWORDS, 250 "ANY TYPE": TokenType.VARIANT, 251 "BEGIN": TokenType.COMMAND, 252 "BEGIN TRANSACTION": TokenType.BEGIN, 253 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 254 "BYTES": TokenType.BINARY, 255 "DECLARE": TokenType.COMMAND, 256 "FLOAT64": TokenType.DOUBLE, 257 "INT64": TokenType.BIGINT, 258 "RECORD": TokenType.STRUCT, 259 "TIMESTAMP": TokenType.TIMESTAMPTZ, 260 "NOT DETERMINISTIC": TokenType.VOLATILE, 261 "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT, 262 } 263 KEYWORDS.pop("DIV") 264 265 class Parser(parser.Parser): 266 PREFIXED_PIVOT_COLUMNS = True 267 268 LOG_DEFAULTS_TO_LN = True 269 270 FUNCTIONS = { 271 **parser.Parser.FUNCTIONS, 272 "DATE": _parse_date, 273 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 274 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 275 "DATE_TRUNC": lambda args: exp.DateTrunc( 276 unit=exp.Literal.string(str(seq_get(args, 1))), 277 this=seq_get(args, 0), 278 ), 279 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 280 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 281 "DIV": binary_from_function(exp.IntDiv), 282 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 283 "MD5": exp.MD5Digest.from_arg_list, 284 "TO_HEX": _parse_to_hex, 285 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 286 [seq_get(args, 1), seq_get(args, 0)] 287 ), 288 "PARSE_TIMESTAMP": _parse_timestamp, 289 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 290 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 291 this=seq_get(args, 0), 292 expression=seq_get(args, 1), 293 position=seq_get(args, 2), 294 occurrence=seq_get(args, 3), 295 group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None, 296 ), 297 "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)), 298 "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)), 299 "SPLIT": lambda args: exp.Split( 300 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 301 this=seq_get(args, 0), 302 expression=seq_get(args, 1) or exp.Literal.string(","), 303 ), 304 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 305 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 306 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 307 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 308 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 309 } 310 311 FUNCTION_PARSERS = { 312 **parser.Parser.FUNCTION_PARSERS, 313 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 314 } 315 FUNCTION_PARSERS.pop("TRIM") 316 317 NO_PAREN_FUNCTIONS = { 318 **parser.Parser.NO_PAREN_FUNCTIONS, 319 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 320 } 321 322 NESTED_TYPE_TOKENS = { 323 *parser.Parser.NESTED_TYPE_TOKENS, 324 TokenType.TABLE, 325 } 326 327 ID_VAR_TOKENS = { 328 *parser.Parser.ID_VAR_TOKENS, 329 TokenType.VALUES, 330 } 331 332 PROPERTY_PARSERS = { 333 **parser.Parser.PROPERTY_PARSERS, 334 "NOT DETERMINISTIC": lambda self: self.expression( 335 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 336 ), 337 "OPTIONS": lambda self: self._parse_with_property(), 338 } 339 340 CONSTRAINT_PARSERS = { 341 **parser.Parser.CONSTRAINT_PARSERS, 342 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 343 } 344 345 RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy() 346 RANGE_PARSERS.pop(TokenType.OVERLAPS, None) 347 348 NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN} 349 350 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 351 this = super()._parse_table_part(schema=schema) or self._parse_number() 352 353 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 354 if isinstance(this, exp.Identifier): 355 table_name = this.name 356 while self._match(TokenType.DASH, advance=False) and self._next: 357 self._advance(2) 358 table_name += f"-{self._prev.text}" 359 360 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 361 elif isinstance(this, exp.Literal): 362 table_name = this.name 363 364 if ( 365 self._curr 366 and self._prev.end == self._curr.start - 1 367 and self._parse_var(any_token=True) 368 ): 369 table_name += self._prev.text 370 371 this = exp.Identifier(this=table_name, quoted=True) 372 373 return this 374 375 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 376 table = super()._parse_table_parts(schema=schema) 377 if isinstance(table.this, exp.Identifier) and "." in table.name: 378 catalog, db, this, *rest = ( 379 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 380 for x in split_num_words(table.name, ".", 3) 381 ) 382 383 if rest and this: 384 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 385 386 table = exp.Table(this=this, db=db, catalog=catalog) 387 388 return table 389 390 def _parse_json_object(self) -> exp.JSONObject: 391 json_object = super()._parse_json_object() 392 array_kv_pair = seq_get(json_object.expressions, 0) 393 394 # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation 395 # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2 396 if ( 397 array_kv_pair 398 and isinstance(array_kv_pair.this, exp.Array) 399 and isinstance(array_kv_pair.expression, exp.Array) 400 ): 401 keys = array_kv_pair.this.expressions 402 values = array_kv_pair.expression.expressions 403 404 json_object.set( 405 "expressions", 406 [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)], 407 ) 408 409 return json_object 410 411 class Generator(generator.Generator): 412 EXPLICIT_UNION = True 413 INTERVAL_ALLOWS_PLURAL_FORM = False 414 JOIN_HINTS = False 415 QUERY_HINTS = False 416 TABLE_HINTS = False 417 LIMIT_FETCH = "LIMIT" 418 RENAME_TABLE_WITH_DB = False 419 ESCAPE_LINE_BREAK = True 420 NVL2_SUPPORTED = False 421 UNNEST_WITH_ORDINALITY = False 422 COLLATE_IS_FUNC = True 423 424 TRANSFORMS = { 425 **generator.Generator.TRANSFORMS, 426 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 427 exp.ArraySize: rename_func("ARRAY_LENGTH"), 428 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 429 exp.Create: _create_sql, 430 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 431 exp.DateAdd: date_add_interval_sql("DATE", "ADD"), 432 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 433 exp.DateFromParts: rename_func("DATE"), 434 exp.DateStrToDate: datestrtodate_sql, 435 exp.DateSub: date_add_interval_sql("DATE", "SUB"), 436 exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), 437 exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), 438 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 439 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 440 exp.GroupConcat: rename_func("STRING_AGG"), 441 exp.Hex: rename_func("TO_HEX"), 442 exp.If: if_sql(false_value="NULL"), 443 exp.ILike: no_ilike_sql, 444 exp.IntDiv: rename_func("DIV"), 445 exp.JSONFormat: rename_func("TO_JSON_STRING"), 446 exp.JSONKeyValue: json_keyvalue_comma_sql, 447 exp.Max: max_or_greatest, 448 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 449 exp.MD5Digest: rename_func("MD5"), 450 exp.Min: min_or_least, 451 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 452 exp.RegexpExtract: lambda self, e: self.func( 453 "REGEXP_EXTRACT", 454 e.this, 455 e.expression, 456 e.args.get("position"), 457 e.args.get("occurrence"), 458 ), 459 exp.RegexpReplace: regexp_replace_sql, 460 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 461 exp.ReturnsProperty: _returnsproperty_sql, 462 exp.Select: transforms.preprocess( 463 [ 464 transforms.explode_to_unnest(), 465 _unqualify_unnest, 466 transforms.eliminate_distinct_on, 467 _alias_ordered_group, 468 transforms.eliminate_semi_and_anti_joins, 469 ] 470 ), 471 exp.SHA2: lambda self, e: self.func( 472 f"SHA256" if e.text("length") == "256" else "SHA512", e.this 473 ), 474 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 475 if e.name == "IMMUTABLE" 476 else "NOT DETERMINISTIC", 477 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 478 exp.StrToTime: lambda self, e: self.func( 479 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 480 ), 481 exp.TimeAdd: date_add_interval_sql("TIME", "ADD"), 482 exp.TimeSub: date_add_interval_sql("TIME", "SUB"), 483 exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"), 484 exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"), 485 exp.TimeStrToTime: timestrtotime_sql, 486 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 487 exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"), 488 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 489 exp.Unhex: rename_func("FROM_HEX"), 490 exp.Values: _derived_table_values_to_unnest, 491 exp.VariancePop: rename_func("VAR_POP"), 492 } 493 494 TYPE_MAPPING = { 495 **generator.Generator.TYPE_MAPPING, 496 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 497 exp.DataType.Type.BIGINT: "INT64", 498 exp.DataType.Type.BINARY: "BYTES", 499 exp.DataType.Type.BOOLEAN: "BOOL", 500 exp.DataType.Type.CHAR: "STRING", 501 exp.DataType.Type.DECIMAL: "NUMERIC", 502 exp.DataType.Type.DOUBLE: "FLOAT64", 503 exp.DataType.Type.FLOAT: "FLOAT64", 504 exp.DataType.Type.INT: "INT64", 505 exp.DataType.Type.NCHAR: "STRING", 506 exp.DataType.Type.NVARCHAR: "STRING", 507 exp.DataType.Type.SMALLINT: "INT64", 508 exp.DataType.Type.TEXT: "STRING", 509 exp.DataType.Type.TIMESTAMP: "DATETIME", 510 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 511 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 512 exp.DataType.Type.TINYINT: "INT64", 513 exp.DataType.Type.VARBINARY: "BYTES", 514 exp.DataType.Type.VARCHAR: "STRING", 515 exp.DataType.Type.VARIANT: "ANY TYPE", 516 } 517 518 PROPERTIES_LOCATION = { 519 **generator.Generator.PROPERTIES_LOCATION, 520 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 521 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 522 } 523 524 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 525 RESERVED_KEYWORDS = { 526 *generator.Generator.RESERVED_KEYWORDS, 527 "all", 528 "and", 529 "any", 530 "array", 531 "as", 532 "asc", 533 "assert_rows_modified", 534 "at", 535 "between", 536 "by", 537 "case", 538 "cast", 539 "collate", 540 "contains", 541 "create", 542 "cross", 543 "cube", 544 "current", 545 "default", 546 "define", 547 "desc", 548 "distinct", 549 "else", 550 "end", 551 "enum", 552 "escape", 553 "except", 554 "exclude", 555 "exists", 556 "extract", 557 "false", 558 "fetch", 559 "following", 560 "for", 561 "from", 562 "full", 563 "group", 564 "grouping", 565 "groups", 566 "hash", 567 "having", 568 "if", 569 "ignore", 570 "in", 571 "inner", 572 "intersect", 573 "interval", 574 "into", 575 "is", 576 "join", 577 "lateral", 578 "left", 579 "like", 580 "limit", 581 "lookup", 582 "merge", 583 "natural", 584 "new", 585 "no", 586 "not", 587 "null", 588 "nulls", 589 "of", 590 "on", 591 "or", 592 "order", 593 "outer", 594 "over", 595 "partition", 596 "preceding", 597 "proto", 598 "qualify", 599 "range", 600 "recursive", 601 "respect", 602 "right", 603 "rollup", 604 "rows", 605 "select", 606 "set", 607 "some", 608 "struct", 609 "tablesample", 610 "then", 611 "to", 612 "treat", 613 "true", 614 "unbounded", 615 "union", 616 "unnest", 617 "using", 618 "when", 619 "where", 620 "window", 621 "with", 622 "within", 623 } 624 625 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 626 parent = expression.parent 627 628 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 629 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 630 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 631 return self.func( 632 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 633 ) 634 635 return super().attimezone_sql(expression) 636 637 def trycast_sql(self, expression: exp.TryCast) -> str: 638 return self.cast_sql(expression, safe_prefix="SAFE_") 639 640 def cte_sql(self, expression: exp.CTE) -> str: 641 if expression.alias_column_names: 642 self.unsupported("Column names in CTE definition are not supported.") 643 return super().cte_sql(expression) 644 645 def array_sql(self, expression: exp.Array) -> str: 646 first_arg = seq_get(expression.expressions, 0) 647 if isinstance(first_arg, exp.Subqueryable): 648 return f"ARRAY{self.wrap(self.sql(first_arg))}" 649 650 return inline_array_sql(self, expression) 651 652 def transaction_sql(self, *_) -> str: 653 return "BEGIN TRANSACTION" 654 655 def commit_sql(self, *_) -> str: 656 return "COMMIT TRANSACTION" 657 658 def rollback_sql(self, *_) -> str: 659 return "ROLLBACK TRANSACTION" 660 661 def in_unnest_op(self, expression: exp.Unnest) -> str: 662 return self.sql(expression) 663 664 def except_op(self, expression: exp.Except) -> str: 665 if not expression.args.get("distinct", False): 666 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 667 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 668 669 def intersect_op(self, expression: exp.Intersect) -> str: 670 if not expression.args.get("distinct", False): 671 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 672 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 673 674 def with_properties(self, properties: exp.Properties) -> str: 675 return self.properties(properties, prefix=self.seg("OPTIONS")) 676 677 def version_sql(self, expression: exp.Version) -> str: 678 if expression.name == "TIMESTAMP": 679 expression = expression.copy() 680 expression.set("this", "SYSTEM_TIME") 681 return super().version_sql(expression)
logger =
<Logger sqlglot (WARNING)>
178class BigQuery(Dialect): 179 UNNEST_COLUMN_ONLY = True 180 SUPPORTS_USER_DEFINED_TYPES = False 181 SUPPORTS_SEMI_ANTI_JOIN = False 182 LOG_BASE_FIRST = False 183 184 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 185 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 186 187 # bigquery udfs are case sensitive 188 NORMALIZE_FUNCTIONS = False 189 190 TIME_MAPPING = { 191 "%D": "%m/%d/%y", 192 } 193 194 FORMAT_MAPPING = { 195 "DD": "%d", 196 "MM": "%m", 197 "MON": "%b", 198 "MONTH": "%B", 199 "YYYY": "%Y", 200 "YY": "%y", 201 "HH": "%I", 202 "HH12": "%I", 203 "HH24": "%H", 204 "MI": "%M", 205 "SS": "%S", 206 "SSSSS": "%f", 207 "TZH": "%z", 208 } 209 210 # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement 211 # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table 212 PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"} 213 214 @classmethod 215 def normalize_identifier(cls, expression: E) -> E: 216 if isinstance(expression, exp.Identifier): 217 parent = expression.parent 218 while isinstance(parent, exp.Dot): 219 parent = parent.parent 220 221 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 222 # The following check is essentially a heuristic to detect tables based on whether or 223 # not they're qualified. It also avoids normalizing UDFs, because they're case-sensitive. 224 if ( 225 not isinstance(parent, exp.UserDefinedFunction) 226 and not (isinstance(parent, exp.Table) and parent.db) 227 and not expression.meta.get("is_table") 228 ): 229 expression.set("this", expression.this.lower()) 230 231 return expression 232 233 class Tokenizer(tokens.Tokenizer): 234 QUOTES = ["'", '"', '"""', "'''"] 235 COMMENTS = ["--", "#", ("/*", "*/")] 236 IDENTIFIERS = ["`"] 237 STRING_ESCAPES = ["\\"] 238 239 HEX_STRINGS = [("0x", ""), ("0X", "")] 240 241 BYTE_STRINGS = [ 242 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 243 ] 244 245 RAW_STRINGS = [ 246 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 247 ] 248 249 KEYWORDS = { 250 **tokens.Tokenizer.KEYWORDS, 251 "ANY TYPE": TokenType.VARIANT, 252 "BEGIN": TokenType.COMMAND, 253 "BEGIN TRANSACTION": TokenType.BEGIN, 254 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 255 "BYTES": TokenType.BINARY, 256 "DECLARE": TokenType.COMMAND, 257 "FLOAT64": TokenType.DOUBLE, 258 "INT64": TokenType.BIGINT, 259 "RECORD": TokenType.STRUCT, 260 "TIMESTAMP": TokenType.TIMESTAMPTZ, 261 "NOT DETERMINISTIC": TokenType.VOLATILE, 262 "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT, 263 } 264 KEYWORDS.pop("DIV") 265 266 class Parser(parser.Parser): 267 PREFIXED_PIVOT_COLUMNS = True 268 269 LOG_DEFAULTS_TO_LN = True 270 271 FUNCTIONS = { 272 **parser.Parser.FUNCTIONS, 273 "DATE": _parse_date, 274 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 275 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 276 "DATE_TRUNC": lambda args: exp.DateTrunc( 277 unit=exp.Literal.string(str(seq_get(args, 1))), 278 this=seq_get(args, 0), 279 ), 280 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 281 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 282 "DIV": binary_from_function(exp.IntDiv), 283 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 284 "MD5": exp.MD5Digest.from_arg_list, 285 "TO_HEX": _parse_to_hex, 286 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 287 [seq_get(args, 1), seq_get(args, 0)] 288 ), 289 "PARSE_TIMESTAMP": _parse_timestamp, 290 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 291 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 292 this=seq_get(args, 0), 293 expression=seq_get(args, 1), 294 position=seq_get(args, 2), 295 occurrence=seq_get(args, 3), 296 group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None, 297 ), 298 "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)), 299 "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)), 300 "SPLIT": lambda args: exp.Split( 301 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 302 this=seq_get(args, 0), 303 expression=seq_get(args, 1) or exp.Literal.string(","), 304 ), 305 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 306 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 307 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 308 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 309 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 310 } 311 312 FUNCTION_PARSERS = { 313 **parser.Parser.FUNCTION_PARSERS, 314 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 315 } 316 FUNCTION_PARSERS.pop("TRIM") 317 318 NO_PAREN_FUNCTIONS = { 319 **parser.Parser.NO_PAREN_FUNCTIONS, 320 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 321 } 322 323 NESTED_TYPE_TOKENS = { 324 *parser.Parser.NESTED_TYPE_TOKENS, 325 TokenType.TABLE, 326 } 327 328 ID_VAR_TOKENS = { 329 *parser.Parser.ID_VAR_TOKENS, 330 TokenType.VALUES, 331 } 332 333 PROPERTY_PARSERS = { 334 **parser.Parser.PROPERTY_PARSERS, 335 "NOT DETERMINISTIC": lambda self: self.expression( 336 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 337 ), 338 "OPTIONS": lambda self: self._parse_with_property(), 339 } 340 341 CONSTRAINT_PARSERS = { 342 **parser.Parser.CONSTRAINT_PARSERS, 343 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 344 } 345 346 RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy() 347 RANGE_PARSERS.pop(TokenType.OVERLAPS, None) 348 349 NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN} 350 351 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 352 this = super()._parse_table_part(schema=schema) or self._parse_number() 353 354 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 355 if isinstance(this, exp.Identifier): 356 table_name = this.name 357 while self._match(TokenType.DASH, advance=False) and self._next: 358 self._advance(2) 359 table_name += f"-{self._prev.text}" 360 361 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 362 elif isinstance(this, exp.Literal): 363 table_name = this.name 364 365 if ( 366 self._curr 367 and self._prev.end == self._curr.start - 1 368 and self._parse_var(any_token=True) 369 ): 370 table_name += self._prev.text 371 372 this = exp.Identifier(this=table_name, quoted=True) 373 374 return this 375 376 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 377 table = super()._parse_table_parts(schema=schema) 378 if isinstance(table.this, exp.Identifier) and "." in table.name: 379 catalog, db, this, *rest = ( 380 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 381 for x in split_num_words(table.name, ".", 3) 382 ) 383 384 if rest and this: 385 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 386 387 table = exp.Table(this=this, db=db, catalog=catalog) 388 389 return table 390 391 def _parse_json_object(self) -> exp.JSONObject: 392 json_object = super()._parse_json_object() 393 array_kv_pair = seq_get(json_object.expressions, 0) 394 395 # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation 396 # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2 397 if ( 398 array_kv_pair 399 and isinstance(array_kv_pair.this, exp.Array) 400 and isinstance(array_kv_pair.expression, exp.Array) 401 ): 402 keys = array_kv_pair.this.expressions 403 values = array_kv_pair.expression.expressions 404 405 json_object.set( 406 "expressions", 407 [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)], 408 ) 409 410 return json_object 411 412 class Generator(generator.Generator): 413 EXPLICIT_UNION = True 414 INTERVAL_ALLOWS_PLURAL_FORM = False 415 JOIN_HINTS = False 416 QUERY_HINTS = False 417 TABLE_HINTS = False 418 LIMIT_FETCH = "LIMIT" 419 RENAME_TABLE_WITH_DB = False 420 ESCAPE_LINE_BREAK = True 421 NVL2_SUPPORTED = False 422 UNNEST_WITH_ORDINALITY = False 423 COLLATE_IS_FUNC = True 424 425 TRANSFORMS = { 426 **generator.Generator.TRANSFORMS, 427 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 428 exp.ArraySize: rename_func("ARRAY_LENGTH"), 429 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 430 exp.Create: _create_sql, 431 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 432 exp.DateAdd: date_add_interval_sql("DATE", "ADD"), 433 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 434 exp.DateFromParts: rename_func("DATE"), 435 exp.DateStrToDate: datestrtodate_sql, 436 exp.DateSub: date_add_interval_sql("DATE", "SUB"), 437 exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), 438 exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), 439 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 440 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 441 exp.GroupConcat: rename_func("STRING_AGG"), 442 exp.Hex: rename_func("TO_HEX"), 443 exp.If: if_sql(false_value="NULL"), 444 exp.ILike: no_ilike_sql, 445 exp.IntDiv: rename_func("DIV"), 446 exp.JSONFormat: rename_func("TO_JSON_STRING"), 447 exp.JSONKeyValue: json_keyvalue_comma_sql, 448 exp.Max: max_or_greatest, 449 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 450 exp.MD5Digest: rename_func("MD5"), 451 exp.Min: min_or_least, 452 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 453 exp.RegexpExtract: lambda self, e: self.func( 454 "REGEXP_EXTRACT", 455 e.this, 456 e.expression, 457 e.args.get("position"), 458 e.args.get("occurrence"), 459 ), 460 exp.RegexpReplace: regexp_replace_sql, 461 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 462 exp.ReturnsProperty: _returnsproperty_sql, 463 exp.Select: transforms.preprocess( 464 [ 465 transforms.explode_to_unnest(), 466 _unqualify_unnest, 467 transforms.eliminate_distinct_on, 468 _alias_ordered_group, 469 transforms.eliminate_semi_and_anti_joins, 470 ] 471 ), 472 exp.SHA2: lambda self, e: self.func( 473 f"SHA256" if e.text("length") == "256" else "SHA512", e.this 474 ), 475 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 476 if e.name == "IMMUTABLE" 477 else "NOT DETERMINISTIC", 478 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 479 exp.StrToTime: lambda self, e: self.func( 480 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 481 ), 482 exp.TimeAdd: date_add_interval_sql("TIME", "ADD"), 483 exp.TimeSub: date_add_interval_sql("TIME", "SUB"), 484 exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"), 485 exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"), 486 exp.TimeStrToTime: timestrtotime_sql, 487 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 488 exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"), 489 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 490 exp.Unhex: rename_func("FROM_HEX"), 491 exp.Values: _derived_table_values_to_unnest, 492 exp.VariancePop: rename_func("VAR_POP"), 493 } 494 495 TYPE_MAPPING = { 496 **generator.Generator.TYPE_MAPPING, 497 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 498 exp.DataType.Type.BIGINT: "INT64", 499 exp.DataType.Type.BINARY: "BYTES", 500 exp.DataType.Type.BOOLEAN: "BOOL", 501 exp.DataType.Type.CHAR: "STRING", 502 exp.DataType.Type.DECIMAL: "NUMERIC", 503 exp.DataType.Type.DOUBLE: "FLOAT64", 504 exp.DataType.Type.FLOAT: "FLOAT64", 505 exp.DataType.Type.INT: "INT64", 506 exp.DataType.Type.NCHAR: "STRING", 507 exp.DataType.Type.NVARCHAR: "STRING", 508 exp.DataType.Type.SMALLINT: "INT64", 509 exp.DataType.Type.TEXT: "STRING", 510 exp.DataType.Type.TIMESTAMP: "DATETIME", 511 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 512 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 513 exp.DataType.Type.TINYINT: "INT64", 514 exp.DataType.Type.VARBINARY: "BYTES", 515 exp.DataType.Type.VARCHAR: "STRING", 516 exp.DataType.Type.VARIANT: "ANY TYPE", 517 } 518 519 PROPERTIES_LOCATION = { 520 **generator.Generator.PROPERTIES_LOCATION, 521 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 522 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 523 } 524 525 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 526 RESERVED_KEYWORDS = { 527 *generator.Generator.RESERVED_KEYWORDS, 528 "all", 529 "and", 530 "any", 531 "array", 532 "as", 533 "asc", 534 "assert_rows_modified", 535 "at", 536 "between", 537 "by", 538 "case", 539 "cast", 540 "collate", 541 "contains", 542 "create", 543 "cross", 544 "cube", 545 "current", 546 "default", 547 "define", 548 "desc", 549 "distinct", 550 "else", 551 "end", 552 "enum", 553 "escape", 554 "except", 555 "exclude", 556 "exists", 557 "extract", 558 "false", 559 "fetch", 560 "following", 561 "for", 562 "from", 563 "full", 564 "group", 565 "grouping", 566 "groups", 567 "hash", 568 "having", 569 "if", 570 "ignore", 571 "in", 572 "inner", 573 "intersect", 574 "interval", 575 "into", 576 "is", 577 "join", 578 "lateral", 579 "left", 580 "like", 581 "limit", 582 "lookup", 583 "merge", 584 "natural", 585 "new", 586 "no", 587 "not", 588 "null", 589 "nulls", 590 "of", 591 "on", 592 "or", 593 "order", 594 "outer", 595 "over", 596 "partition", 597 "preceding", 598 "proto", 599 "qualify", 600 "range", 601 "recursive", 602 "respect", 603 "right", 604 "rollup", 605 "rows", 606 "select", 607 "set", 608 "some", 609 "struct", 610 "tablesample", 611 "then", 612 "to", 613 "treat", 614 "true", 615 "unbounded", 616 "union", 617 "unnest", 618 "using", 619 "when", 620 "where", 621 "window", 622 "with", 623 "within", 624 } 625 626 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 627 parent = expression.parent 628 629 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 630 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 631 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 632 return self.func( 633 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 634 ) 635 636 return super().attimezone_sql(expression) 637 638 def trycast_sql(self, expression: exp.TryCast) -> str: 639 return self.cast_sql(expression, safe_prefix="SAFE_") 640 641 def cte_sql(self, expression: exp.CTE) -> str: 642 if expression.alias_column_names: 643 self.unsupported("Column names in CTE definition are not supported.") 644 return super().cte_sql(expression) 645 646 def array_sql(self, expression: exp.Array) -> str: 647 first_arg = seq_get(expression.expressions, 0) 648 if isinstance(first_arg, exp.Subqueryable): 649 return f"ARRAY{self.wrap(self.sql(first_arg))}" 650 651 return inline_array_sql(self, expression) 652 653 def transaction_sql(self, *_) -> str: 654 return "BEGIN TRANSACTION" 655 656 def commit_sql(self, *_) -> str: 657 return "COMMIT TRANSACTION" 658 659 def rollback_sql(self, *_) -> str: 660 return "ROLLBACK TRANSACTION" 661 662 def in_unnest_op(self, expression: exp.Unnest) -> str: 663 return self.sql(expression) 664 665 def except_op(self, expression: exp.Except) -> str: 666 if not expression.args.get("distinct", False): 667 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 668 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 669 670 def intersect_op(self, expression: exp.Intersect) -> str: 671 if not expression.args.get("distinct", False): 672 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 673 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 674 675 def with_properties(self, properties: exp.Properties) -> str: 676 return self.properties(properties, prefix=self.seg("OPTIONS")) 677 678 def version_sql(self, expression: exp.Version) -> str: 679 if expression.name == "TIMESTAMP": 680 expression = expression.copy() 681 expression.set("this", "SYSTEM_TIME") 682 return super().version_sql(expression)
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
@classmethod
def
normalize_identifier(cls, expression: ~E) -> ~E:
214 @classmethod 215 def normalize_identifier(cls, expression: E) -> E: 216 if isinstance(expression, exp.Identifier): 217 parent = expression.parent 218 while isinstance(parent, exp.Dot): 219 parent = parent.parent 220 221 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 222 # The following check is essentially a heuristic to detect tables based on whether or 223 # not they're qualified. It also avoids normalizing UDFs, because they're case-sensitive. 224 if ( 225 not isinstance(parent, exp.UserDefinedFunction) 226 and not (isinstance(parent, exp.Table) and parent.db) 227 and not expression.meta.get("is_table") 228 ): 229 expression.set("this", expression.this.lower()) 230 231 return expression
Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized to lowercase regardless of being quoted or not.
tokenizer_class =
<class 'BigQuery.Tokenizer'>
parser_class =
<class 'BigQuery.Parser'>
generator_class =
<class 'BigQuery.Generator'>
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.dialects.dialect.Dialect
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- DPIPE_IS_STRING_CONCAT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- DATE_FORMAT
- DATEINT_FORMAT
- TIME_FORMAT
- get_or_raise
- format_time
- case_sensitive
- can_identify
- quote_identifier
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
233 class Tokenizer(tokens.Tokenizer): 234 QUOTES = ["'", '"', '"""', "'''"] 235 COMMENTS = ["--", "#", ("/*", "*/")] 236 IDENTIFIERS = ["`"] 237 STRING_ESCAPES = ["\\"] 238 239 HEX_STRINGS = [("0x", ""), ("0X", "")] 240 241 BYTE_STRINGS = [ 242 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 243 ] 244 245 RAW_STRINGS = [ 246 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 247 ] 248 249 KEYWORDS = { 250 **tokens.Tokenizer.KEYWORDS, 251 "ANY TYPE": TokenType.VARIANT, 252 "BEGIN": TokenType.COMMAND, 253 "BEGIN TRANSACTION": TokenType.BEGIN, 254 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 255 "BYTES": TokenType.BINARY, 256 "DECLARE": TokenType.COMMAND, 257 "FLOAT64": TokenType.DOUBLE, 258 "INT64": TokenType.BIGINT, 259 "RECORD": TokenType.STRUCT, 260 "TIMESTAMP": TokenType.TIMESTAMPTZ, 261 "NOT DETERMINISTIC": TokenType.VOLATILE, 262 "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT, 263 } 264 KEYWORDS.pop("DIV")
BYTE_STRINGS =
[("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS =
[("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
266 class Parser(parser.Parser): 267 PREFIXED_PIVOT_COLUMNS = True 268 269 LOG_DEFAULTS_TO_LN = True 270 271 FUNCTIONS = { 272 **parser.Parser.FUNCTIONS, 273 "DATE": _parse_date, 274 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 275 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 276 "DATE_TRUNC": lambda args: exp.DateTrunc( 277 unit=exp.Literal.string(str(seq_get(args, 1))), 278 this=seq_get(args, 0), 279 ), 280 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 281 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 282 "DIV": binary_from_function(exp.IntDiv), 283 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 284 "MD5": exp.MD5Digest.from_arg_list, 285 "TO_HEX": _parse_to_hex, 286 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 287 [seq_get(args, 1), seq_get(args, 0)] 288 ), 289 "PARSE_TIMESTAMP": _parse_timestamp, 290 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 291 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 292 this=seq_get(args, 0), 293 expression=seq_get(args, 1), 294 position=seq_get(args, 2), 295 occurrence=seq_get(args, 3), 296 group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None, 297 ), 298 "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)), 299 "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)), 300 "SPLIT": lambda args: exp.Split( 301 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 302 this=seq_get(args, 0), 303 expression=seq_get(args, 1) or exp.Literal.string(","), 304 ), 305 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 306 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 307 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 308 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 309 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 310 } 311 312 FUNCTION_PARSERS = { 313 **parser.Parser.FUNCTION_PARSERS, 314 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 315 } 316 FUNCTION_PARSERS.pop("TRIM") 317 318 NO_PAREN_FUNCTIONS = { 319 **parser.Parser.NO_PAREN_FUNCTIONS, 320 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 321 } 322 323 NESTED_TYPE_TOKENS = { 324 *parser.Parser.NESTED_TYPE_TOKENS, 325 TokenType.TABLE, 326 } 327 328 ID_VAR_TOKENS = { 329 *parser.Parser.ID_VAR_TOKENS, 330 TokenType.VALUES, 331 } 332 333 PROPERTY_PARSERS = { 334 **parser.Parser.PROPERTY_PARSERS, 335 "NOT DETERMINISTIC": lambda self: self.expression( 336 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 337 ), 338 "OPTIONS": lambda self: self._parse_with_property(), 339 } 340 341 CONSTRAINT_PARSERS = { 342 **parser.Parser.CONSTRAINT_PARSERS, 343 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 344 } 345 346 RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy() 347 RANGE_PARSERS.pop(TokenType.OVERLAPS, None) 348 349 NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN} 350 351 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 352 this = super()._parse_table_part(schema=schema) or self._parse_number() 353 354 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 355 if isinstance(this, exp.Identifier): 356 table_name = this.name 357 while self._match(TokenType.DASH, advance=False) and self._next: 358 self._advance(2) 359 table_name += f"-{self._prev.text}" 360 361 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 362 elif isinstance(this, exp.Literal): 363 table_name = this.name 364 365 if ( 366 self._curr 367 and self._prev.end == self._curr.start - 1 368 and self._parse_var(any_token=True) 369 ): 370 table_name += self._prev.text 371 372 this = exp.Identifier(this=table_name, quoted=True) 373 374 return this 375 376 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 377 table = super()._parse_table_parts(schema=schema) 378 if isinstance(table.this, exp.Identifier) and "." in table.name: 379 catalog, db, this, *rest = ( 380 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 381 for x in split_num_words(table.name, ".", 3) 382 ) 383 384 if rest and this: 385 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 386 387 table = exp.Table(this=this, db=db, catalog=catalog) 388 389 return table 390 391 def _parse_json_object(self) -> exp.JSONObject: 392 json_object = super()._parse_json_object() 393 array_kv_pair = seq_get(json_object.expressions, 0) 394 395 # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation 396 # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2 397 if ( 398 array_kv_pair 399 and isinstance(array_kv_pair.this, exp.Array) 400 and isinstance(array_kv_pair.expression, exp.Array) 401 ): 402 keys = array_kv_pair.this.expressions 403 values = array_kv_pair.expression.expressions 404 405 json_object.set( 406 "expressions", 407 [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)], 408 ) 409 410 return json_object
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
FUNCTIONS =
{'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS =
{'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS =
{<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS =
{<TokenType.TABLE: 'TABLE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MAP: 'MAP'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.STRUCT: 'STRUCT'>}
ID_VAR_TOKENS =
{<TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.UUID: 'UUID'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.SOME: 'SOME'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.VAR: 'VAR'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UINT128: 'UINT128'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.TIME: 'TIME'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.VIEW: 'VIEW'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TEXT: 'TEXT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.LEFT: 'LEFT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.NULL: 'NULL'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TABLE: 'TABLE'>, <TokenType.END: 'END'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.UINT: 'UINT'>, <TokenType.ROW: 'ROW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INT256: 'INT256'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.CHAR: 'CHAR'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ALL: 'ALL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MAP: 'MAP'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.XML: 'XML'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INDEX: 'INDEX'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.INET: 'INET'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.RANGE: 'RANGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.INT: 'INT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DESC: 'DESC'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.DATE: 'DATE'>, <TokenType.ANY: 'ANY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SET: 'SET'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.BIT: 'BIT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.DIV: 'DIV'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.MONEY: 'MONEY'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.DELETE: 'DELETE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CASE: 'CASE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.SEMI: 'SEMI'>, <TokenType.INT128: 'INT128'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.JSON: 'JSON'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.FULL: 'FULL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ASC: 'ASC'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.VALUES: 'VALUES'>, <TokenType.IS: 'IS'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.VOLATILE: 'VOLATILE'>}
PROPERTY_PARSERS =
{'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS =
{'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
RANGE_PARSERS =
{<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>}
TABLE_ALIAS_TOKENS =
{<TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.UUID: 'UUID'>, <TokenType.SOME: 'SOME'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.VAR: 'VAR'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.UINT128: 'UINT128'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.TIME: 'TIME'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.VIEW: 'VIEW'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.TEXT: 'TEXT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.NULL: 'NULL'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TABLE: 'TABLE'>, <TokenType.END: 'END'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.UINT: 'UINT'>, <TokenType.ROW: 'ROW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INT256: 'INT256'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.CHAR: 'CHAR'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ALL: 'ALL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MAP: 'MAP'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.XML: 'XML'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.INDEX: 'INDEX'>, <TokenType.NEXT: 'NEXT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.INET: 'INET'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.RANGE: 'RANGE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.LOAD: 'LOAD'>, <TokenType.ANTI: 'ANTI'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.INT: 'INT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.DESC: 'DESC'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.KILL: 'KILL'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.DATE: 'DATE'>, <TokenType.ANY: 'ANY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.SET: 'SET'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.BIT: 'BIT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.MONEY: 'MONEY'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.DELETE: 'DELETE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.CASE: 'CASE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.INT128: 'INT128'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.JSON: 'JSON'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ASC: 'ASC'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.IS: 'IS'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.VOLATILE: 'VOLATILE'>}
SET_TRIE: Dict =
{'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_KEYWORDS
- DB_CREATABLES
- CREATABLES
- INTERVAL_VARS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- BITWISE
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STATEMENT_PARSERS
- UNARY_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- NO_PAREN_FUNCTION_PARSERS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- MODIFIABLES
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- CLONE_KINDS
- OPCLASS_FOLLOW_KEYWORDS
- TABLE_INDEX_HINT_TOKENS
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- STRICT_CAST
- CONCAT_NULL_OUTPUTS_STRING
- IDENTIFY_PIVOT_STRINGS
- ALTER_TABLE_ADD_COLUMN_KEYWORD
- TABLESAMPLE_CSV
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- STRICT_STRING_CONCAT
- NULL_ORDERING
- error_level
- error_message_context
- max_errors
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
412 class Generator(generator.Generator): 413 EXPLICIT_UNION = True 414 INTERVAL_ALLOWS_PLURAL_FORM = False 415 JOIN_HINTS = False 416 QUERY_HINTS = False 417 TABLE_HINTS = False 418 LIMIT_FETCH = "LIMIT" 419 RENAME_TABLE_WITH_DB = False 420 ESCAPE_LINE_BREAK = True 421 NVL2_SUPPORTED = False 422 UNNEST_WITH_ORDINALITY = False 423 COLLATE_IS_FUNC = True 424 425 TRANSFORMS = { 426 **generator.Generator.TRANSFORMS, 427 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 428 exp.ArraySize: rename_func("ARRAY_LENGTH"), 429 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 430 exp.Create: _create_sql, 431 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 432 exp.DateAdd: date_add_interval_sql("DATE", "ADD"), 433 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 434 exp.DateFromParts: rename_func("DATE"), 435 exp.DateStrToDate: datestrtodate_sql, 436 exp.DateSub: date_add_interval_sql("DATE", "SUB"), 437 exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"), 438 exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"), 439 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 440 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 441 exp.GroupConcat: rename_func("STRING_AGG"), 442 exp.Hex: rename_func("TO_HEX"), 443 exp.If: if_sql(false_value="NULL"), 444 exp.ILike: no_ilike_sql, 445 exp.IntDiv: rename_func("DIV"), 446 exp.JSONFormat: rename_func("TO_JSON_STRING"), 447 exp.JSONKeyValue: json_keyvalue_comma_sql, 448 exp.Max: max_or_greatest, 449 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 450 exp.MD5Digest: rename_func("MD5"), 451 exp.Min: min_or_least, 452 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 453 exp.RegexpExtract: lambda self, e: self.func( 454 "REGEXP_EXTRACT", 455 e.this, 456 e.expression, 457 e.args.get("position"), 458 e.args.get("occurrence"), 459 ), 460 exp.RegexpReplace: regexp_replace_sql, 461 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 462 exp.ReturnsProperty: _returnsproperty_sql, 463 exp.Select: transforms.preprocess( 464 [ 465 transforms.explode_to_unnest(), 466 _unqualify_unnest, 467 transforms.eliminate_distinct_on, 468 _alias_ordered_group, 469 transforms.eliminate_semi_and_anti_joins, 470 ] 471 ), 472 exp.SHA2: lambda self, e: self.func( 473 f"SHA256" if e.text("length") == "256" else "SHA512", e.this 474 ), 475 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 476 if e.name == "IMMUTABLE" 477 else "NOT DETERMINISTIC", 478 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 479 exp.StrToTime: lambda self, e: self.func( 480 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 481 ), 482 exp.TimeAdd: date_add_interval_sql("TIME", "ADD"), 483 exp.TimeSub: date_add_interval_sql("TIME", "SUB"), 484 exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"), 485 exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"), 486 exp.TimeStrToTime: timestrtotime_sql, 487 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 488 exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"), 489 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 490 exp.Unhex: rename_func("FROM_HEX"), 491 exp.Values: _derived_table_values_to_unnest, 492 exp.VariancePop: rename_func("VAR_POP"), 493 } 494 495 TYPE_MAPPING = { 496 **generator.Generator.TYPE_MAPPING, 497 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 498 exp.DataType.Type.BIGINT: "INT64", 499 exp.DataType.Type.BINARY: "BYTES", 500 exp.DataType.Type.BOOLEAN: "BOOL", 501 exp.DataType.Type.CHAR: "STRING", 502 exp.DataType.Type.DECIMAL: "NUMERIC", 503 exp.DataType.Type.DOUBLE: "FLOAT64", 504 exp.DataType.Type.FLOAT: "FLOAT64", 505 exp.DataType.Type.INT: "INT64", 506 exp.DataType.Type.NCHAR: "STRING", 507 exp.DataType.Type.NVARCHAR: "STRING", 508 exp.DataType.Type.SMALLINT: "INT64", 509 exp.DataType.Type.TEXT: "STRING", 510 exp.DataType.Type.TIMESTAMP: "DATETIME", 511 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 512 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 513 exp.DataType.Type.TINYINT: "INT64", 514 exp.DataType.Type.VARBINARY: "BYTES", 515 exp.DataType.Type.VARCHAR: "STRING", 516 exp.DataType.Type.VARIANT: "ANY TYPE", 517 } 518 519 PROPERTIES_LOCATION = { 520 **generator.Generator.PROPERTIES_LOCATION, 521 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 522 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 523 } 524 525 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 526 RESERVED_KEYWORDS = { 527 *generator.Generator.RESERVED_KEYWORDS, 528 "all", 529 "and", 530 "any", 531 "array", 532 "as", 533 "asc", 534 "assert_rows_modified", 535 "at", 536 "between", 537 "by", 538 "case", 539 "cast", 540 "collate", 541 "contains", 542 "create", 543 "cross", 544 "cube", 545 "current", 546 "default", 547 "define", 548 "desc", 549 "distinct", 550 "else", 551 "end", 552 "enum", 553 "escape", 554 "except", 555 "exclude", 556 "exists", 557 "extract", 558 "false", 559 "fetch", 560 "following", 561 "for", 562 "from", 563 "full", 564 "group", 565 "grouping", 566 "groups", 567 "hash", 568 "having", 569 "if", 570 "ignore", 571 "in", 572 "inner", 573 "intersect", 574 "interval", 575 "into", 576 "is", 577 "join", 578 "lateral", 579 "left", 580 "like", 581 "limit", 582 "lookup", 583 "merge", 584 "natural", 585 "new", 586 "no", 587 "not", 588 "null", 589 "nulls", 590 "of", 591 "on", 592 "or", 593 "order", 594 "outer", 595 "over", 596 "partition", 597 "preceding", 598 "proto", 599 "qualify", 600 "range", 601 "recursive", 602 "respect", 603 "right", 604 "rollup", 605 "rows", 606 "select", 607 "set", 608 "some", 609 "struct", 610 "tablesample", 611 "then", 612 "to", 613 "treat", 614 "true", 615 "unbounded", 616 "union", 617 "unnest", 618 "using", 619 "when", 620 "where", 621 "window", 622 "with", 623 "within", 624 } 625 626 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 627 parent = expression.parent 628 629 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 630 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 631 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 632 return self.func( 633 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 634 ) 635 636 return super().attimezone_sql(expression) 637 638 def trycast_sql(self, expression: exp.TryCast) -> str: 639 return self.cast_sql(expression, safe_prefix="SAFE_") 640 641 def cte_sql(self, expression: exp.CTE) -> str: 642 if expression.alias_column_names: 643 self.unsupported("Column names in CTE definition are not supported.") 644 return super().cte_sql(expression) 645 646 def array_sql(self, expression: exp.Array) -> str: 647 first_arg = seq_get(expression.expressions, 0) 648 if isinstance(first_arg, exp.Subqueryable): 649 return f"ARRAY{self.wrap(self.sql(first_arg))}" 650 651 return inline_array_sql(self, expression) 652 653 def transaction_sql(self, *_) -> str: 654 return "BEGIN TRANSACTION" 655 656 def commit_sql(self, *_) -> str: 657 return "COMMIT TRANSACTION" 658 659 def rollback_sql(self, *_) -> str: 660 return "ROLLBACK TRANSACTION" 661 662 def in_unnest_op(self, expression: exp.Unnest) -> str: 663 return self.sql(expression) 664 665 def except_op(self, expression: exp.Except) -> str: 666 if not expression.args.get("distinct", False): 667 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 668 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 669 670 def intersect_op(self, expression: exp.Intersect) -> str: 671 if not expression.args.get("distinct", False): 672 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 673 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 674 675 def with_properties(self, properties: exp.Properties) -> str: 676 return self.properties(properties, prefix=self.seg("OPTIONS")) 677 678 def version_sql(self, expression: exp.Version) -> str: 679 if expression.name == "TIMESTAMP": 680 expression = expression.copy() 681 expression.set("this", "SYSTEM_TIME") 682 return super().version_sql(expression)
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether or not to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether or not to normalize identifiers to lowercase. Default: False.
- pad: Determines the pad size in a formatted string. Default: 2.
- indent: Determines the indentation size in a formatted string. Default: 2.
- normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
TRANSFORMS =
{<class 'sqlglot.expressions.DateAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING =
{<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION =
{<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS =
{'assert_rows_modified', 'ignore', 'select', 'contains', 'cast', 'null', 'left', 'new', 'exclude', 'to', 'escape', 'interval', 'at', 'intersect', 'proto', 'qualify', 'outer', 'then', 'or', 'recursive', 'grouping', 'rollup', 'lateral', 'over', 'case', 'current', 'cross', 'some', 'in', 'join', 'set', 'union', 'array', 'like', 'into', 'lookup', 'collate', 'if', 'desc', 'fetch', 'enum', 'hash', 'groups', 'and', 'on', 'else', 'by', 'cube', 'where', 'is', 'extract', 'respect', 'tablesample', 'order', 'merge', 'end', 'having', 'preceding', 'define', 'limit', 'range', 'partition', 'unbounded', 'default', 'from', 'exists', 'as', 'within', 'all', 'following', 'not', 'rows', 'right', 'struct', 'between', 'true', 'unnest', 'using', 'natural', 'distinct', 'group', 'create', 'except', 'no', 'for', 'treat', 'false', 'window', 'with', 'when', 'any', 'full', 'nulls', 'of', 'asc', 'inner'}
626 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 627 parent = expression.parent 628 629 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 630 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 631 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 632 return self.func( 633 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 634 ) 635 636 return super().attimezone_sql(expression)
@classmethod
def
can_identify(text: str, identify: str | bool = 'safe') -> bool:
272 @classmethod 273 def can_identify(cls, text: str, identify: str | bool = "safe") -> bool: 274 """Checks if text can be identified given an identify option. 275 276 Args: 277 text: The text to check. 278 identify: 279 "always" or `True`: Always returns true. 280 "safe": True if the identifier is case-insensitive. 281 282 Returns: 283 Whether or not the given text can be identified. 284 """ 285 if identify is True or identify == "always": 286 return True 287 288 if identify == "safe": 289 return not cls.case_sensitive(text) 290 291 return False
Checks if text can be identified given an identify option.
Arguments:
- text: The text to check.
- identify: "always" or
True
: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:
Whether or not the given text can be identified.
TOKENIZER_CLASS =
<class 'BigQuery.Tokenizer'>
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- LOCKING_READS_SUPPORTED
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- SINGLE_STRING_INTERVAL
- TABLESAMPLE_WITH_METHOD
- TABLESAMPLE_SIZE_IS_PERCENT
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- TZ_TO_WITH_TIME_ZONE
- SELECT_KINDS
- VALUES_AS_TABLE
- ALTER_TABLE_ADD_COLUMN_KEYWORD
- AGGREGATE_FILTER_SUPPORTED
- SEMI_ANTI_JOIN_WITH_SIDE
- SUPPORTS_PARAMETERS
- COMPUTED_COLUMN_WITH_TYPE
- SUPPORTS_TABLE_COPY
- TABLESAMPLE_REQUIRES_PARENS
- DATA_TYPE_SPECIFIERS_ALLOWED
- STAR_MAPPING
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- PARAMETER_TOKEN
- WITH_SEPARATED_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- SENTINEL_LINE_BREAK
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- normalize_functions
- unsupported_messages
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypeparam_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- offset_limit_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- safebracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- safeconcat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- formatjson_sql
- jsonobject_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- add_sql
- and_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- safedpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- log_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql