sqlglot.dialects.bigquery
1from __future__ import annotations 2 3import logging 4import re 5import typing as t 6 7from sqlglot import exp, generator, parser, tokens, transforms 8from sqlglot._typing import E 9from sqlglot.dialects.dialect import ( 10 Dialect, 11 binary_from_function, 12 datestrtodate_sql, 13 format_time_lambda, 14 inline_array_sql, 15 max_or_greatest, 16 min_or_least, 17 no_ilike_sql, 18 parse_date_delta_with_interval, 19 regexp_replace_sql, 20 rename_func, 21 timestrtotime_sql, 22 ts_or_ds_to_date_sql, 23) 24from sqlglot.helper import seq_get, split_num_words 25from sqlglot.tokens import TokenType 26 27logger = logging.getLogger("sqlglot") 28 29 30def _date_add_sql( 31 data_type: str, kind: str 32) -> t.Callable[[generator.Generator, exp.Expression], str]: 33 def func(self, expression): 34 this = self.sql(expression, "this") 35 unit = expression.args.get("unit") 36 unit = exp.var(unit.name.upper() if unit else "DAY") 37 interval = exp.Interval(this=expression.expression.copy(), unit=unit) 38 return f"{data_type}_{kind}({this}, {self.sql(interval)})" 39 40 return func 41 42 43def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: 44 if not expression.find_ancestor(exp.From, exp.Join): 45 return self.values_sql(expression) 46 47 alias = expression.args.get("alias") 48 49 structs = [ 50 exp.Struct( 51 expressions=[ 52 exp.alias_(value, column_name) 53 for value, column_name in zip( 54 t.expressions, 55 alias.columns 56 if alias and alias.columns 57 else (f"_c{i}" for i in range(len(t.expressions))), 58 ) 59 ] 60 ) 61 for t in expression.find_all(exp.Tuple) 62 ] 63 64 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) 65 66 67def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: 68 this = expression.this 69 if isinstance(this, exp.Schema): 70 this = f"{this.this} <{self.expressions(this)}>" 71 else: 72 this = self.sql(this) 73 return f"RETURNS {this}" 74 75 76def _create_sql(self: generator.Generator, expression: exp.Create) -> str: 77 kind = expression.args["kind"] 78 returns = expression.find(exp.ReturnsProperty) 79 80 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 81 expression = expression.copy() 82 expression.set("kind", "TABLE FUNCTION") 83 84 if isinstance(expression.expression, (exp.Subquery, exp.Literal)): 85 expression.set("expression", expression.expression.this) 86 87 return self.create_sql(expression) 88 89 return self.create_sql(expression) 90 91 92def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 93 """Remove references to unnest table aliases since bigquery doesn't allow them. 94 95 These are added by the optimizer's qualify_column step. 96 """ 97 from sqlglot.optimizer.scope import Scope 98 99 if isinstance(expression, exp.Select): 100 for unnest in expression.find_all(exp.Unnest): 101 if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias: 102 for column in Scope(expression).find_all(exp.Column): 103 if column.table == unnest.alias: 104 column.set("table", None) 105 106 return expression 107 108 109# https://issuetracker.google.com/issues/162294746 110# workaround for bigquery bug when grouping by an expression and then ordering 111# WITH x AS (SELECT 1 y) 112# SELECT y + 1 z 113# FROM x 114# GROUP BY x + 1 115# ORDER by z 116def _alias_ordered_group(expression: exp.Expression) -> exp.Expression: 117 if isinstance(expression, exp.Select): 118 group = expression.args.get("group") 119 order = expression.args.get("order") 120 121 if group and order: 122 aliases = { 123 select.this: select.args["alias"] 124 for select in expression.selects 125 if isinstance(select, exp.Alias) 126 } 127 128 for e in group.expressions: 129 alias = aliases.get(e) 130 131 if alias: 132 e.replace(exp.column(alias)) 133 134 return expression 135 136 137def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression: 138 """BigQuery doesn't allow column names when defining a CTE, so we try to push them down.""" 139 if isinstance(expression, exp.CTE) and expression.alias_column_names: 140 cte_query = expression.this 141 142 if cte_query.is_star: 143 logger.warning( 144 "Can't push down CTE column names for star queries. Run the query through" 145 " the optimizer or use 'qualify' to expand the star projections first." 146 ) 147 return expression 148 149 column_names = expression.alias_column_names 150 expression.args["alias"].set("columns", None) 151 152 for name, select in zip(column_names, cte_query.selects): 153 to_replace = select 154 155 if isinstance(select, exp.Alias): 156 select = select.this 157 158 # Inner aliases are shadowed by the CTE column names 159 to_replace.replace(exp.alias_(select, name)) 160 161 return expression 162 163 164def _parse_timestamp(args: t.List) -> exp.StrToTime: 165 this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)]) 166 this.set("zone", seq_get(args, 2)) 167 return this 168 169 170def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts: 171 expr_type = exp.DateFromParts if len(args) == 3 else exp.Date 172 return expr_type.from_arg_list(args) 173 174 175def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5: 176 # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation 177 arg = seq_get(args, 0) 178 return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg) 179 180 181class BigQuery(Dialect): 182 UNNEST_COLUMN_ONLY = True 183 184 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 185 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 186 187 # bigquery udfs are case sensitive 188 NORMALIZE_FUNCTIONS = False 189 190 TIME_MAPPING = { 191 "%D": "%m/%d/%y", 192 } 193 194 FORMAT_MAPPING = { 195 "DD": "%d", 196 "MM": "%m", 197 "MON": "%b", 198 "MONTH": "%B", 199 "YYYY": "%Y", 200 "YY": "%y", 201 "HH": "%I", 202 "HH12": "%I", 203 "HH24": "%H", 204 "MI": "%M", 205 "SS": "%S", 206 "SSSSS": "%f", 207 "TZH": "%z", 208 } 209 210 # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement 211 # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table 212 PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"} 213 214 @classmethod 215 def normalize_identifier(cls, expression: E) -> E: 216 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 217 # The following check is essentially a heuristic to detect tables based on whether or 218 # not they're qualified. 219 if isinstance(expression, exp.Identifier): 220 parent = expression.parent 221 222 while isinstance(parent, exp.Dot): 223 parent = parent.parent 224 225 if ( 226 not isinstance(parent, exp.UserDefinedFunction) 227 and not (isinstance(parent, exp.Table) and parent.db) 228 and not expression.meta.get("is_table") 229 ): 230 expression.set("this", expression.this.lower()) 231 232 return expression 233 234 class Tokenizer(tokens.Tokenizer): 235 QUOTES = ["'", '"', '"""', "'''"] 236 COMMENTS = ["--", "#", ("/*", "*/")] 237 IDENTIFIERS = ["`"] 238 STRING_ESCAPES = ["\\"] 239 240 HEX_STRINGS = [("0x", ""), ("0X", "")] 241 242 BYTE_STRINGS = [ 243 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 244 ] 245 246 RAW_STRINGS = [ 247 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 248 ] 249 250 KEYWORDS = { 251 **tokens.Tokenizer.KEYWORDS, 252 "ANY TYPE": TokenType.VARIANT, 253 "BEGIN": TokenType.COMMAND, 254 "BEGIN TRANSACTION": TokenType.BEGIN, 255 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 256 "BYTES": TokenType.BINARY, 257 "DECLARE": TokenType.COMMAND, 258 "FLOAT64": TokenType.DOUBLE, 259 "INT64": TokenType.BIGINT, 260 "RECORD": TokenType.STRUCT, 261 "TIMESTAMP": TokenType.TIMESTAMPTZ, 262 "NOT DETERMINISTIC": TokenType.VOLATILE, 263 "UNKNOWN": TokenType.NULL, 264 } 265 KEYWORDS.pop("DIV") 266 267 class Parser(parser.Parser): 268 PREFIXED_PIVOT_COLUMNS = True 269 270 LOG_BASE_FIRST = False 271 LOG_DEFAULTS_TO_LN = True 272 273 SUPPORTS_USER_DEFINED_TYPES = False 274 275 FUNCTIONS = { 276 **parser.Parser.FUNCTIONS, 277 "DATE": _parse_date, 278 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 279 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 280 "DATE_TRUNC": lambda args: exp.DateTrunc( 281 unit=exp.Literal.string(str(seq_get(args, 1))), 282 this=seq_get(args, 0), 283 ), 284 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 285 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 286 "DIV": binary_from_function(exp.IntDiv), 287 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 288 "MD5": exp.MD5Digest.from_arg_list, 289 "TO_HEX": _parse_to_hex, 290 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 291 [seq_get(args, 1), seq_get(args, 0)] 292 ), 293 "PARSE_TIMESTAMP": _parse_timestamp, 294 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 295 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 296 this=seq_get(args, 0), 297 expression=seq_get(args, 1), 298 position=seq_get(args, 2), 299 occurrence=seq_get(args, 3), 300 group=exp.Literal.number(1) 301 if re.compile(str(seq_get(args, 1))).groups == 1 302 else None, 303 ), 304 "SPLIT": lambda args: exp.Split( 305 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 306 this=seq_get(args, 0), 307 expression=seq_get(args, 1) or exp.Literal.string(","), 308 ), 309 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 310 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 311 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 312 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 313 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 314 } 315 316 FUNCTION_PARSERS = { 317 **parser.Parser.FUNCTION_PARSERS, 318 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 319 } 320 FUNCTION_PARSERS.pop("TRIM") 321 322 NO_PAREN_FUNCTIONS = { 323 **parser.Parser.NO_PAREN_FUNCTIONS, 324 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 325 } 326 327 NESTED_TYPE_TOKENS = { 328 *parser.Parser.NESTED_TYPE_TOKENS, 329 TokenType.TABLE, 330 } 331 332 ID_VAR_TOKENS = { 333 *parser.Parser.ID_VAR_TOKENS, 334 TokenType.VALUES, 335 } 336 337 PROPERTY_PARSERS = { 338 **parser.Parser.PROPERTY_PARSERS, 339 "NOT DETERMINISTIC": lambda self: self.expression( 340 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 341 ), 342 "OPTIONS": lambda self: self._parse_with_property(), 343 } 344 345 CONSTRAINT_PARSERS = { 346 **parser.Parser.CONSTRAINT_PARSERS, 347 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 348 } 349 350 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 351 this = super()._parse_table_part(schema=schema) 352 353 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 354 if isinstance(this, exp.Identifier): 355 table_name = this.name 356 while self._match(TokenType.DASH, advance=False) and self._next: 357 self._advance(2) 358 table_name += f"-{self._prev.text}" 359 360 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 361 362 return this 363 364 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 365 table = super()._parse_table_parts(schema=schema) 366 if isinstance(table.this, exp.Identifier) and "." in table.name: 367 catalog, db, this, *rest = ( 368 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 369 for x in split_num_words(table.name, ".", 3) 370 ) 371 372 if rest and this: 373 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 374 375 table = exp.Table(this=this, db=db, catalog=catalog) 376 377 return table 378 379 class Generator(generator.Generator): 380 EXPLICIT_UNION = True 381 INTERVAL_ALLOWS_PLURAL_FORM = False 382 JOIN_HINTS = False 383 QUERY_HINTS = False 384 TABLE_HINTS = False 385 LIMIT_FETCH = "LIMIT" 386 RENAME_TABLE_WITH_DB = False 387 ESCAPE_LINE_BREAK = True 388 NVL2_SUPPORTED = False 389 390 TRANSFORMS = { 391 **generator.Generator.TRANSFORMS, 392 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 393 exp.ArraySize: rename_func("ARRAY_LENGTH"), 394 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 395 exp.Create: _create_sql, 396 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 397 exp.DateAdd: _date_add_sql("DATE", "ADD"), 398 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 399 exp.DateFromParts: rename_func("DATE"), 400 exp.DateStrToDate: datestrtodate_sql, 401 exp.DateSub: _date_add_sql("DATE", "SUB"), 402 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 403 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 404 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 405 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 406 exp.GroupConcat: rename_func("STRING_AGG"), 407 exp.Hex: rename_func("TO_HEX"), 408 exp.ILike: no_ilike_sql, 409 exp.IntDiv: rename_func("DIV"), 410 exp.JSONFormat: rename_func("TO_JSON_STRING"), 411 exp.Max: max_or_greatest, 412 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 413 exp.MD5Digest: rename_func("MD5"), 414 exp.Min: min_or_least, 415 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 416 exp.RegexpExtract: lambda self, e: self.func( 417 "REGEXP_EXTRACT", 418 e.this, 419 e.expression, 420 e.args.get("position"), 421 e.args.get("occurrence"), 422 ), 423 exp.RegexpReplace: regexp_replace_sql, 424 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 425 exp.ReturnsProperty: _returnsproperty_sql, 426 exp.Select: transforms.preprocess( 427 [ 428 transforms.explode_to_unnest, 429 _unqualify_unnest, 430 transforms.eliminate_distinct_on, 431 _alias_ordered_group, 432 ] 433 ), 434 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 435 if e.name == "IMMUTABLE" 436 else "NOT DETERMINISTIC", 437 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 438 exp.StrToTime: lambda self, e: self.func( 439 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 440 ), 441 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 442 exp.TimeSub: _date_add_sql("TIME", "SUB"), 443 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 444 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 445 exp.TimeStrToTime: timestrtotime_sql, 446 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 447 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 448 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 449 exp.Unhex: rename_func("FROM_HEX"), 450 exp.Values: _derived_table_values_to_unnest, 451 exp.VariancePop: rename_func("VAR_POP"), 452 } 453 454 TYPE_MAPPING = { 455 **generator.Generator.TYPE_MAPPING, 456 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 457 exp.DataType.Type.BIGINT: "INT64", 458 exp.DataType.Type.BINARY: "BYTES", 459 exp.DataType.Type.BOOLEAN: "BOOL", 460 exp.DataType.Type.CHAR: "STRING", 461 exp.DataType.Type.DECIMAL: "NUMERIC", 462 exp.DataType.Type.DOUBLE: "FLOAT64", 463 exp.DataType.Type.FLOAT: "FLOAT64", 464 exp.DataType.Type.INT: "INT64", 465 exp.DataType.Type.NCHAR: "STRING", 466 exp.DataType.Type.NVARCHAR: "STRING", 467 exp.DataType.Type.SMALLINT: "INT64", 468 exp.DataType.Type.TEXT: "STRING", 469 exp.DataType.Type.TIMESTAMP: "DATETIME", 470 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 471 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 472 exp.DataType.Type.TINYINT: "INT64", 473 exp.DataType.Type.VARBINARY: "BYTES", 474 exp.DataType.Type.VARCHAR: "STRING", 475 exp.DataType.Type.VARIANT: "ANY TYPE", 476 } 477 478 PROPERTIES_LOCATION = { 479 **generator.Generator.PROPERTIES_LOCATION, 480 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 481 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 482 } 483 484 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 485 RESERVED_KEYWORDS = { 486 *generator.Generator.RESERVED_KEYWORDS, 487 "all", 488 "and", 489 "any", 490 "array", 491 "as", 492 "asc", 493 "assert_rows_modified", 494 "at", 495 "between", 496 "by", 497 "case", 498 "cast", 499 "collate", 500 "contains", 501 "create", 502 "cross", 503 "cube", 504 "current", 505 "default", 506 "define", 507 "desc", 508 "distinct", 509 "else", 510 "end", 511 "enum", 512 "escape", 513 "except", 514 "exclude", 515 "exists", 516 "extract", 517 "false", 518 "fetch", 519 "following", 520 "for", 521 "from", 522 "full", 523 "group", 524 "grouping", 525 "groups", 526 "hash", 527 "having", 528 "if", 529 "ignore", 530 "in", 531 "inner", 532 "intersect", 533 "interval", 534 "into", 535 "is", 536 "join", 537 "lateral", 538 "left", 539 "like", 540 "limit", 541 "lookup", 542 "merge", 543 "natural", 544 "new", 545 "no", 546 "not", 547 "null", 548 "nulls", 549 "of", 550 "on", 551 "or", 552 "order", 553 "outer", 554 "over", 555 "partition", 556 "preceding", 557 "proto", 558 "qualify", 559 "range", 560 "recursive", 561 "respect", 562 "right", 563 "rollup", 564 "rows", 565 "select", 566 "set", 567 "some", 568 "struct", 569 "tablesample", 570 "then", 571 "to", 572 "treat", 573 "true", 574 "unbounded", 575 "union", 576 "unnest", 577 "using", 578 "when", 579 "where", 580 "window", 581 "with", 582 "within", 583 } 584 585 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 586 parent = expression.parent 587 588 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 589 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 590 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 591 return self.func( 592 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 593 ) 594 595 return super().attimezone_sql(expression) 596 597 def trycast_sql(self, expression: exp.TryCast) -> str: 598 return self.cast_sql(expression, safe_prefix="SAFE_") 599 600 def cte_sql(self, expression: exp.CTE) -> str: 601 if expression.alias_column_names: 602 self.unsupported("Column names in CTE definition are not supported.") 603 return super().cte_sql(expression) 604 605 def array_sql(self, expression: exp.Array) -> str: 606 first_arg = seq_get(expression.expressions, 0) 607 if isinstance(first_arg, exp.Subqueryable): 608 return f"ARRAY{self.wrap(self.sql(first_arg))}" 609 610 return inline_array_sql(self, expression) 611 612 def transaction_sql(self, *_) -> str: 613 return "BEGIN TRANSACTION" 614 615 def commit_sql(self, *_) -> str: 616 return "COMMIT TRANSACTION" 617 618 def rollback_sql(self, *_) -> str: 619 return "ROLLBACK TRANSACTION" 620 621 def in_unnest_op(self, expression: exp.Unnest) -> str: 622 return self.sql(expression) 623 624 def except_op(self, expression: exp.Except) -> str: 625 if not expression.args.get("distinct", False): 626 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 627 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 628 629 def intersect_op(self, expression: exp.Intersect) -> str: 630 if not expression.args.get("distinct", False): 631 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 632 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 633 634 def with_properties(self, properties: exp.Properties) -> str: 635 return self.properties(properties, prefix=self.seg("OPTIONS"))
logger =
<Logger sqlglot (WARNING)>
182class BigQuery(Dialect): 183 UNNEST_COLUMN_ONLY = True 184 185 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 186 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 187 188 # bigquery udfs are case sensitive 189 NORMALIZE_FUNCTIONS = False 190 191 TIME_MAPPING = { 192 "%D": "%m/%d/%y", 193 } 194 195 FORMAT_MAPPING = { 196 "DD": "%d", 197 "MM": "%m", 198 "MON": "%b", 199 "MONTH": "%B", 200 "YYYY": "%Y", 201 "YY": "%y", 202 "HH": "%I", 203 "HH12": "%I", 204 "HH24": "%H", 205 "MI": "%M", 206 "SS": "%S", 207 "SSSSS": "%f", 208 "TZH": "%z", 209 } 210 211 # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement 212 # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table 213 PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"} 214 215 @classmethod 216 def normalize_identifier(cls, expression: E) -> E: 217 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 218 # The following check is essentially a heuristic to detect tables based on whether or 219 # not they're qualified. 220 if isinstance(expression, exp.Identifier): 221 parent = expression.parent 222 223 while isinstance(parent, exp.Dot): 224 parent = parent.parent 225 226 if ( 227 not isinstance(parent, exp.UserDefinedFunction) 228 and not (isinstance(parent, exp.Table) and parent.db) 229 and not expression.meta.get("is_table") 230 ): 231 expression.set("this", expression.this.lower()) 232 233 return expression 234 235 class Tokenizer(tokens.Tokenizer): 236 QUOTES = ["'", '"', '"""', "'''"] 237 COMMENTS = ["--", "#", ("/*", "*/")] 238 IDENTIFIERS = ["`"] 239 STRING_ESCAPES = ["\\"] 240 241 HEX_STRINGS = [("0x", ""), ("0X", "")] 242 243 BYTE_STRINGS = [ 244 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 245 ] 246 247 RAW_STRINGS = [ 248 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 249 ] 250 251 KEYWORDS = { 252 **tokens.Tokenizer.KEYWORDS, 253 "ANY TYPE": TokenType.VARIANT, 254 "BEGIN": TokenType.COMMAND, 255 "BEGIN TRANSACTION": TokenType.BEGIN, 256 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 257 "BYTES": TokenType.BINARY, 258 "DECLARE": TokenType.COMMAND, 259 "FLOAT64": TokenType.DOUBLE, 260 "INT64": TokenType.BIGINT, 261 "RECORD": TokenType.STRUCT, 262 "TIMESTAMP": TokenType.TIMESTAMPTZ, 263 "NOT DETERMINISTIC": TokenType.VOLATILE, 264 "UNKNOWN": TokenType.NULL, 265 } 266 KEYWORDS.pop("DIV") 267 268 class Parser(parser.Parser): 269 PREFIXED_PIVOT_COLUMNS = True 270 271 LOG_BASE_FIRST = False 272 LOG_DEFAULTS_TO_LN = True 273 274 SUPPORTS_USER_DEFINED_TYPES = False 275 276 FUNCTIONS = { 277 **parser.Parser.FUNCTIONS, 278 "DATE": _parse_date, 279 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 280 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 281 "DATE_TRUNC": lambda args: exp.DateTrunc( 282 unit=exp.Literal.string(str(seq_get(args, 1))), 283 this=seq_get(args, 0), 284 ), 285 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 286 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 287 "DIV": binary_from_function(exp.IntDiv), 288 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 289 "MD5": exp.MD5Digest.from_arg_list, 290 "TO_HEX": _parse_to_hex, 291 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 292 [seq_get(args, 1), seq_get(args, 0)] 293 ), 294 "PARSE_TIMESTAMP": _parse_timestamp, 295 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 296 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 297 this=seq_get(args, 0), 298 expression=seq_get(args, 1), 299 position=seq_get(args, 2), 300 occurrence=seq_get(args, 3), 301 group=exp.Literal.number(1) 302 if re.compile(str(seq_get(args, 1))).groups == 1 303 else None, 304 ), 305 "SPLIT": lambda args: exp.Split( 306 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 307 this=seq_get(args, 0), 308 expression=seq_get(args, 1) or exp.Literal.string(","), 309 ), 310 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 311 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 312 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 313 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 314 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 315 } 316 317 FUNCTION_PARSERS = { 318 **parser.Parser.FUNCTION_PARSERS, 319 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 320 } 321 FUNCTION_PARSERS.pop("TRIM") 322 323 NO_PAREN_FUNCTIONS = { 324 **parser.Parser.NO_PAREN_FUNCTIONS, 325 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 326 } 327 328 NESTED_TYPE_TOKENS = { 329 *parser.Parser.NESTED_TYPE_TOKENS, 330 TokenType.TABLE, 331 } 332 333 ID_VAR_TOKENS = { 334 *parser.Parser.ID_VAR_TOKENS, 335 TokenType.VALUES, 336 } 337 338 PROPERTY_PARSERS = { 339 **parser.Parser.PROPERTY_PARSERS, 340 "NOT DETERMINISTIC": lambda self: self.expression( 341 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 342 ), 343 "OPTIONS": lambda self: self._parse_with_property(), 344 } 345 346 CONSTRAINT_PARSERS = { 347 **parser.Parser.CONSTRAINT_PARSERS, 348 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 349 } 350 351 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 352 this = super()._parse_table_part(schema=schema) 353 354 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 355 if isinstance(this, exp.Identifier): 356 table_name = this.name 357 while self._match(TokenType.DASH, advance=False) and self._next: 358 self._advance(2) 359 table_name += f"-{self._prev.text}" 360 361 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 362 363 return this 364 365 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 366 table = super()._parse_table_parts(schema=schema) 367 if isinstance(table.this, exp.Identifier) and "." in table.name: 368 catalog, db, this, *rest = ( 369 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 370 for x in split_num_words(table.name, ".", 3) 371 ) 372 373 if rest and this: 374 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 375 376 table = exp.Table(this=this, db=db, catalog=catalog) 377 378 return table 379 380 class Generator(generator.Generator): 381 EXPLICIT_UNION = True 382 INTERVAL_ALLOWS_PLURAL_FORM = False 383 JOIN_HINTS = False 384 QUERY_HINTS = False 385 TABLE_HINTS = False 386 LIMIT_FETCH = "LIMIT" 387 RENAME_TABLE_WITH_DB = False 388 ESCAPE_LINE_BREAK = True 389 NVL2_SUPPORTED = False 390 391 TRANSFORMS = { 392 **generator.Generator.TRANSFORMS, 393 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 394 exp.ArraySize: rename_func("ARRAY_LENGTH"), 395 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 396 exp.Create: _create_sql, 397 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 398 exp.DateAdd: _date_add_sql("DATE", "ADD"), 399 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 400 exp.DateFromParts: rename_func("DATE"), 401 exp.DateStrToDate: datestrtodate_sql, 402 exp.DateSub: _date_add_sql("DATE", "SUB"), 403 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 404 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 405 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 406 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 407 exp.GroupConcat: rename_func("STRING_AGG"), 408 exp.Hex: rename_func("TO_HEX"), 409 exp.ILike: no_ilike_sql, 410 exp.IntDiv: rename_func("DIV"), 411 exp.JSONFormat: rename_func("TO_JSON_STRING"), 412 exp.Max: max_or_greatest, 413 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 414 exp.MD5Digest: rename_func("MD5"), 415 exp.Min: min_or_least, 416 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 417 exp.RegexpExtract: lambda self, e: self.func( 418 "REGEXP_EXTRACT", 419 e.this, 420 e.expression, 421 e.args.get("position"), 422 e.args.get("occurrence"), 423 ), 424 exp.RegexpReplace: regexp_replace_sql, 425 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 426 exp.ReturnsProperty: _returnsproperty_sql, 427 exp.Select: transforms.preprocess( 428 [ 429 transforms.explode_to_unnest, 430 _unqualify_unnest, 431 transforms.eliminate_distinct_on, 432 _alias_ordered_group, 433 ] 434 ), 435 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 436 if e.name == "IMMUTABLE" 437 else "NOT DETERMINISTIC", 438 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 439 exp.StrToTime: lambda self, e: self.func( 440 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 441 ), 442 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 443 exp.TimeSub: _date_add_sql("TIME", "SUB"), 444 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 445 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 446 exp.TimeStrToTime: timestrtotime_sql, 447 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 448 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 449 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 450 exp.Unhex: rename_func("FROM_HEX"), 451 exp.Values: _derived_table_values_to_unnest, 452 exp.VariancePop: rename_func("VAR_POP"), 453 } 454 455 TYPE_MAPPING = { 456 **generator.Generator.TYPE_MAPPING, 457 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 458 exp.DataType.Type.BIGINT: "INT64", 459 exp.DataType.Type.BINARY: "BYTES", 460 exp.DataType.Type.BOOLEAN: "BOOL", 461 exp.DataType.Type.CHAR: "STRING", 462 exp.DataType.Type.DECIMAL: "NUMERIC", 463 exp.DataType.Type.DOUBLE: "FLOAT64", 464 exp.DataType.Type.FLOAT: "FLOAT64", 465 exp.DataType.Type.INT: "INT64", 466 exp.DataType.Type.NCHAR: "STRING", 467 exp.DataType.Type.NVARCHAR: "STRING", 468 exp.DataType.Type.SMALLINT: "INT64", 469 exp.DataType.Type.TEXT: "STRING", 470 exp.DataType.Type.TIMESTAMP: "DATETIME", 471 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 472 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 473 exp.DataType.Type.TINYINT: "INT64", 474 exp.DataType.Type.VARBINARY: "BYTES", 475 exp.DataType.Type.VARCHAR: "STRING", 476 exp.DataType.Type.VARIANT: "ANY TYPE", 477 } 478 479 PROPERTIES_LOCATION = { 480 **generator.Generator.PROPERTIES_LOCATION, 481 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 482 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 483 } 484 485 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 486 RESERVED_KEYWORDS = { 487 *generator.Generator.RESERVED_KEYWORDS, 488 "all", 489 "and", 490 "any", 491 "array", 492 "as", 493 "asc", 494 "assert_rows_modified", 495 "at", 496 "between", 497 "by", 498 "case", 499 "cast", 500 "collate", 501 "contains", 502 "create", 503 "cross", 504 "cube", 505 "current", 506 "default", 507 "define", 508 "desc", 509 "distinct", 510 "else", 511 "end", 512 "enum", 513 "escape", 514 "except", 515 "exclude", 516 "exists", 517 "extract", 518 "false", 519 "fetch", 520 "following", 521 "for", 522 "from", 523 "full", 524 "group", 525 "grouping", 526 "groups", 527 "hash", 528 "having", 529 "if", 530 "ignore", 531 "in", 532 "inner", 533 "intersect", 534 "interval", 535 "into", 536 "is", 537 "join", 538 "lateral", 539 "left", 540 "like", 541 "limit", 542 "lookup", 543 "merge", 544 "natural", 545 "new", 546 "no", 547 "not", 548 "null", 549 "nulls", 550 "of", 551 "on", 552 "or", 553 "order", 554 "outer", 555 "over", 556 "partition", 557 "preceding", 558 "proto", 559 "qualify", 560 "range", 561 "recursive", 562 "respect", 563 "right", 564 "rollup", 565 "rows", 566 "select", 567 "set", 568 "some", 569 "struct", 570 "tablesample", 571 "then", 572 "to", 573 "treat", 574 "true", 575 "unbounded", 576 "union", 577 "unnest", 578 "using", 579 "when", 580 "where", 581 "window", 582 "with", 583 "within", 584 } 585 586 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 587 parent = expression.parent 588 589 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 590 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 591 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 592 return self.func( 593 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 594 ) 595 596 return super().attimezone_sql(expression) 597 598 def trycast_sql(self, expression: exp.TryCast) -> str: 599 return self.cast_sql(expression, safe_prefix="SAFE_") 600 601 def cte_sql(self, expression: exp.CTE) -> str: 602 if expression.alias_column_names: 603 self.unsupported("Column names in CTE definition are not supported.") 604 return super().cte_sql(expression) 605 606 def array_sql(self, expression: exp.Array) -> str: 607 first_arg = seq_get(expression.expressions, 0) 608 if isinstance(first_arg, exp.Subqueryable): 609 return f"ARRAY{self.wrap(self.sql(first_arg))}" 610 611 return inline_array_sql(self, expression) 612 613 def transaction_sql(self, *_) -> str: 614 return "BEGIN TRANSACTION" 615 616 def commit_sql(self, *_) -> str: 617 return "COMMIT TRANSACTION" 618 619 def rollback_sql(self, *_) -> str: 620 return "ROLLBACK TRANSACTION" 621 622 def in_unnest_op(self, expression: exp.Unnest) -> str: 623 return self.sql(expression) 624 625 def except_op(self, expression: exp.Except) -> str: 626 if not expression.args.get("distinct", False): 627 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 628 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 629 630 def intersect_op(self, expression: exp.Intersect) -> str: 631 if not expression.args.get("distinct", False): 632 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 633 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 634 635 def with_properties(self, properties: exp.Properties) -> str: 636 return self.properties(properties, prefix=self.seg("OPTIONS"))
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
@classmethod
def
normalize_identifier(cls, expression: ~E) -> ~E:
215 @classmethod 216 def normalize_identifier(cls, expression: E) -> E: 217 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 218 # The following check is essentially a heuristic to detect tables based on whether or 219 # not they're qualified. 220 if isinstance(expression, exp.Identifier): 221 parent = expression.parent 222 223 while isinstance(parent, exp.Dot): 224 parent = parent.parent 225 226 if ( 227 not isinstance(parent, exp.UserDefinedFunction) 228 and not (isinstance(parent, exp.Table) and parent.db) 229 and not expression.meta.get("is_table") 230 ): 231 expression.set("this", expression.this.lower()) 232 233 return expression
Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.
tokenizer_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
parser_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Parser'>
generator_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Generator'>
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.dialects.dialect.Dialect
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- DPIPE_IS_STRING_CONCAT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- DATE_FORMAT
- DATEINT_FORMAT
- TIME_FORMAT
- get_or_raise
- format_time
- case_sensitive
- can_identify
- quote_identifier
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
235 class Tokenizer(tokens.Tokenizer): 236 QUOTES = ["'", '"', '"""', "'''"] 237 COMMENTS = ["--", "#", ("/*", "*/")] 238 IDENTIFIERS = ["`"] 239 STRING_ESCAPES = ["\\"] 240 241 HEX_STRINGS = [("0x", ""), ("0X", "")] 242 243 BYTE_STRINGS = [ 244 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 245 ] 246 247 RAW_STRINGS = [ 248 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 249 ] 250 251 KEYWORDS = { 252 **tokens.Tokenizer.KEYWORDS, 253 "ANY TYPE": TokenType.VARIANT, 254 "BEGIN": TokenType.COMMAND, 255 "BEGIN TRANSACTION": TokenType.BEGIN, 256 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 257 "BYTES": TokenType.BINARY, 258 "DECLARE": TokenType.COMMAND, 259 "FLOAT64": TokenType.DOUBLE, 260 "INT64": TokenType.BIGINT, 261 "RECORD": TokenType.STRUCT, 262 "TIMESTAMP": TokenType.TIMESTAMPTZ, 263 "NOT DETERMINISTIC": TokenType.VOLATILE, 264 "UNKNOWN": TokenType.NULL, 265 } 266 KEYWORDS.pop("DIV")
BYTE_STRINGS =
[("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS =
[("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>}
268 class Parser(parser.Parser): 269 PREFIXED_PIVOT_COLUMNS = True 270 271 LOG_BASE_FIRST = False 272 LOG_DEFAULTS_TO_LN = True 273 274 SUPPORTS_USER_DEFINED_TYPES = False 275 276 FUNCTIONS = { 277 **parser.Parser.FUNCTIONS, 278 "DATE": _parse_date, 279 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 280 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 281 "DATE_TRUNC": lambda args: exp.DateTrunc( 282 unit=exp.Literal.string(str(seq_get(args, 1))), 283 this=seq_get(args, 0), 284 ), 285 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 286 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 287 "DIV": binary_from_function(exp.IntDiv), 288 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 289 "MD5": exp.MD5Digest.from_arg_list, 290 "TO_HEX": _parse_to_hex, 291 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 292 [seq_get(args, 1), seq_get(args, 0)] 293 ), 294 "PARSE_TIMESTAMP": _parse_timestamp, 295 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 296 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 297 this=seq_get(args, 0), 298 expression=seq_get(args, 1), 299 position=seq_get(args, 2), 300 occurrence=seq_get(args, 3), 301 group=exp.Literal.number(1) 302 if re.compile(str(seq_get(args, 1))).groups == 1 303 else None, 304 ), 305 "SPLIT": lambda args: exp.Split( 306 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 307 this=seq_get(args, 0), 308 expression=seq_get(args, 1) or exp.Literal.string(","), 309 ), 310 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 311 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 312 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 313 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 314 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 315 } 316 317 FUNCTION_PARSERS = { 318 **parser.Parser.FUNCTION_PARSERS, 319 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 320 } 321 FUNCTION_PARSERS.pop("TRIM") 322 323 NO_PAREN_FUNCTIONS = { 324 **parser.Parser.NO_PAREN_FUNCTIONS, 325 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 326 } 327 328 NESTED_TYPE_TOKENS = { 329 *parser.Parser.NESTED_TYPE_TOKENS, 330 TokenType.TABLE, 331 } 332 333 ID_VAR_TOKENS = { 334 *parser.Parser.ID_VAR_TOKENS, 335 TokenType.VALUES, 336 } 337 338 PROPERTY_PARSERS = { 339 **parser.Parser.PROPERTY_PARSERS, 340 "NOT DETERMINISTIC": lambda self: self.expression( 341 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 342 ), 343 "OPTIONS": lambda self: self._parse_with_property(), 344 } 345 346 CONSTRAINT_PARSERS = { 347 **parser.Parser.CONSTRAINT_PARSERS, 348 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 349 } 350 351 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 352 this = super()._parse_table_part(schema=schema) 353 354 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 355 if isinstance(this, exp.Identifier): 356 table_name = this.name 357 while self._match(TokenType.DASH, advance=False) and self._next: 358 self._advance(2) 359 table_name += f"-{self._prev.text}" 360 361 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 362 363 return this 364 365 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 366 table = super()._parse_table_parts(schema=schema) 367 if isinstance(table.this, exp.Identifier) and "." in table.name: 368 catalog, db, this, *rest = ( 369 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 370 for x in split_num_words(table.name, ".", 3) 371 ) 372 373 if rest and this: 374 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 375 376 table = exp.Table(this=this, db=db, catalog=catalog) 377 378 return table
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
FUNCTIONS =
{'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS =
{'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS =
{<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS =
{<TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.TABLE: 'TABLE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MAP: 'MAP'>, <TokenType.STRUCT: 'STRUCT'>}
ID_VAR_TOKENS =
{<TokenType.ALL: 'ALL'>, <TokenType.TIME: 'TIME'>, <TokenType.CHAR: 'CHAR'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FILTER: 'FILTER'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.CASE: 'CASE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.YEAR: 'YEAR'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.DESC: 'DESC'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.APPLY: 'APPLY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.INET: 'INET'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VAR: 'VAR'>, <TokenType.SEMI: 'SEMI'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ROW: 'ROW'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.TABLE: 'TABLE'>, <TokenType.DATE: 'DATE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.DIV: 'DIV'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.UINT128: 'UINT128'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.LOAD: 'LOAD'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INT128: 'INT128'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.SUPER: 'SUPER'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.VALUES: 'VALUES'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.XML: 'XML'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.ASC: 'ASC'>, <TokenType.TEXT: 'TEXT'>, <TokenType.SOME: 'SOME'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.END: 'END'>, <TokenType.ROWS: 'ROWS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.BIT: 'BIT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.NULL: 'NULL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.TOP: 'TOP'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MAP: 'MAP'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.UINT: 'UINT'>, <TokenType.FULL: 'FULL'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.UUID: 'UUID'>, <TokenType.IS: 'IS'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INT256: 'INT256'>, <TokenType.DELETE: 'DELETE'>, <TokenType.SET: 'SET'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.INT: 'INT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.LEFT: 'LEFT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.MERGE: 'MERGE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.NESTED: 'NESTED'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.BINARY: 'BINARY'>, <TokenType.SMALLINT: 'SMALLINT'>}
PROPERTY_PARSERS =
{'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS =
{'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
TOKENIZER_CLASS: Type[sqlglot.tokens.Tokenizer] =
<class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
SET_TRIE: Dict =
{'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.parser.Parser
- Parser
- STRUCT_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- TYPE_TOKENS
- SUBQUERY_PREDICATES
- RESERVED_KEYWORDS
- DB_CREATABLES
- CREATABLES
- INTERVAL_VARS
- TABLE_ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- BITWISE
- TERM
- FACTOR
- TIMES
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STATEMENT_PARSERS
- UNARY_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- RANGE_PARSERS
- ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- NO_PAREN_FUNCTION_PARSERS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- MODIFIABLES
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- INSERT_ALTERNATIVES
- CLONE_KINDS
- TABLE_INDEX_HINT_TOKENS
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- STRICT_CAST
- CONCAT_NULL_OUTPUTS_STRING
- IDENTIFY_PIVOT_STRINGS
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- STRICT_STRING_CONCAT
- NULL_ORDERING
- error_level
- error_message_context
- max_errors
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
380 class Generator(generator.Generator): 381 EXPLICIT_UNION = True 382 INTERVAL_ALLOWS_PLURAL_FORM = False 383 JOIN_HINTS = False 384 QUERY_HINTS = False 385 TABLE_HINTS = False 386 LIMIT_FETCH = "LIMIT" 387 RENAME_TABLE_WITH_DB = False 388 ESCAPE_LINE_BREAK = True 389 NVL2_SUPPORTED = False 390 391 TRANSFORMS = { 392 **generator.Generator.TRANSFORMS, 393 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 394 exp.ArraySize: rename_func("ARRAY_LENGTH"), 395 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 396 exp.Create: _create_sql, 397 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 398 exp.DateAdd: _date_add_sql("DATE", "ADD"), 399 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 400 exp.DateFromParts: rename_func("DATE"), 401 exp.DateStrToDate: datestrtodate_sql, 402 exp.DateSub: _date_add_sql("DATE", "SUB"), 403 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 404 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 405 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 406 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 407 exp.GroupConcat: rename_func("STRING_AGG"), 408 exp.Hex: rename_func("TO_HEX"), 409 exp.ILike: no_ilike_sql, 410 exp.IntDiv: rename_func("DIV"), 411 exp.JSONFormat: rename_func("TO_JSON_STRING"), 412 exp.Max: max_or_greatest, 413 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 414 exp.MD5Digest: rename_func("MD5"), 415 exp.Min: min_or_least, 416 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 417 exp.RegexpExtract: lambda self, e: self.func( 418 "REGEXP_EXTRACT", 419 e.this, 420 e.expression, 421 e.args.get("position"), 422 e.args.get("occurrence"), 423 ), 424 exp.RegexpReplace: regexp_replace_sql, 425 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 426 exp.ReturnsProperty: _returnsproperty_sql, 427 exp.Select: transforms.preprocess( 428 [ 429 transforms.explode_to_unnest, 430 _unqualify_unnest, 431 transforms.eliminate_distinct_on, 432 _alias_ordered_group, 433 ] 434 ), 435 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 436 if e.name == "IMMUTABLE" 437 else "NOT DETERMINISTIC", 438 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 439 exp.StrToTime: lambda self, e: self.func( 440 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 441 ), 442 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 443 exp.TimeSub: _date_add_sql("TIME", "SUB"), 444 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 445 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 446 exp.TimeStrToTime: timestrtotime_sql, 447 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 448 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 449 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 450 exp.Unhex: rename_func("FROM_HEX"), 451 exp.Values: _derived_table_values_to_unnest, 452 exp.VariancePop: rename_func("VAR_POP"), 453 } 454 455 TYPE_MAPPING = { 456 **generator.Generator.TYPE_MAPPING, 457 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 458 exp.DataType.Type.BIGINT: "INT64", 459 exp.DataType.Type.BINARY: "BYTES", 460 exp.DataType.Type.BOOLEAN: "BOOL", 461 exp.DataType.Type.CHAR: "STRING", 462 exp.DataType.Type.DECIMAL: "NUMERIC", 463 exp.DataType.Type.DOUBLE: "FLOAT64", 464 exp.DataType.Type.FLOAT: "FLOAT64", 465 exp.DataType.Type.INT: "INT64", 466 exp.DataType.Type.NCHAR: "STRING", 467 exp.DataType.Type.NVARCHAR: "STRING", 468 exp.DataType.Type.SMALLINT: "INT64", 469 exp.DataType.Type.TEXT: "STRING", 470 exp.DataType.Type.TIMESTAMP: "DATETIME", 471 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 472 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 473 exp.DataType.Type.TINYINT: "INT64", 474 exp.DataType.Type.VARBINARY: "BYTES", 475 exp.DataType.Type.VARCHAR: "STRING", 476 exp.DataType.Type.VARIANT: "ANY TYPE", 477 } 478 479 PROPERTIES_LOCATION = { 480 **generator.Generator.PROPERTIES_LOCATION, 481 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 482 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 483 } 484 485 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 486 RESERVED_KEYWORDS = { 487 *generator.Generator.RESERVED_KEYWORDS, 488 "all", 489 "and", 490 "any", 491 "array", 492 "as", 493 "asc", 494 "assert_rows_modified", 495 "at", 496 "between", 497 "by", 498 "case", 499 "cast", 500 "collate", 501 "contains", 502 "create", 503 "cross", 504 "cube", 505 "current", 506 "default", 507 "define", 508 "desc", 509 "distinct", 510 "else", 511 "end", 512 "enum", 513 "escape", 514 "except", 515 "exclude", 516 "exists", 517 "extract", 518 "false", 519 "fetch", 520 "following", 521 "for", 522 "from", 523 "full", 524 "group", 525 "grouping", 526 "groups", 527 "hash", 528 "having", 529 "if", 530 "ignore", 531 "in", 532 "inner", 533 "intersect", 534 "interval", 535 "into", 536 "is", 537 "join", 538 "lateral", 539 "left", 540 "like", 541 "limit", 542 "lookup", 543 "merge", 544 "natural", 545 "new", 546 "no", 547 "not", 548 "null", 549 "nulls", 550 "of", 551 "on", 552 "or", 553 "order", 554 "outer", 555 "over", 556 "partition", 557 "preceding", 558 "proto", 559 "qualify", 560 "range", 561 "recursive", 562 "respect", 563 "right", 564 "rollup", 565 "rows", 566 "select", 567 "set", 568 "some", 569 "struct", 570 "tablesample", 571 "then", 572 "to", 573 "treat", 574 "true", 575 "unbounded", 576 "union", 577 "unnest", 578 "using", 579 "when", 580 "where", 581 "window", 582 "with", 583 "within", 584 } 585 586 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 587 parent = expression.parent 588 589 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 590 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 591 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 592 return self.func( 593 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 594 ) 595 596 return super().attimezone_sql(expression) 597 598 def trycast_sql(self, expression: exp.TryCast) -> str: 599 return self.cast_sql(expression, safe_prefix="SAFE_") 600 601 def cte_sql(self, expression: exp.CTE) -> str: 602 if expression.alias_column_names: 603 self.unsupported("Column names in CTE definition are not supported.") 604 return super().cte_sql(expression) 605 606 def array_sql(self, expression: exp.Array) -> str: 607 first_arg = seq_get(expression.expressions, 0) 608 if isinstance(first_arg, exp.Subqueryable): 609 return f"ARRAY{self.wrap(self.sql(first_arg))}" 610 611 return inline_array_sql(self, expression) 612 613 def transaction_sql(self, *_) -> str: 614 return "BEGIN TRANSACTION" 615 616 def commit_sql(self, *_) -> str: 617 return "COMMIT TRANSACTION" 618 619 def rollback_sql(self, *_) -> str: 620 return "ROLLBACK TRANSACTION" 621 622 def in_unnest_op(self, expression: exp.Unnest) -> str: 623 return self.sql(expression) 624 625 def except_op(self, expression: exp.Except) -> str: 626 if not expression.args.get("distinct", False): 627 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 628 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 629 630 def intersect_op(self, expression: exp.Intersect) -> str: 631 if not expression.args.get("distinct", False): 632 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 633 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 634 635 def with_properties(self, properties: exp.Properties) -> str: 636 return self.properties(properties, prefix=self.seg("OPTIONS"))
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether or not to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether or not to normalize identifiers to lowercase. Default: False.
- pad: Determines the pad size in a formatted string. Default: 2.
- indent: Determines the indentation size in a formatted string. Default: 2.
- normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
TRANSFORMS =
{<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalDayToSecondSpan'>: 'DAY TO SECOND', <class 'sqlglot.expressions.IntervalYearToMonthSpan'>: 'YEAR TO MONTH', <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING =
{<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION =
{<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS =
{'asc', 'grouping', 'lateral', 'by', 'respect', 'of', 'define', 'when', 'array', 'if', 'join', 'using', 'ignore', 'select', 'lookup', 'where', 'hash', 'cast', 'enum', 'at', 'having', 'is', 'merge', 'current', 'into', 'full', 'not', 'within', 'unnest', 'intersect', 'and', 'group', 'end', 'then', 'create', 'true', 'no', 'window', 'new', 'escape', 'following', 'range', 'treat', 'partition', 'union', 'exists', 'any', 'in', 'tablesample', 'interval', 'natural', 'qualify', 'cross', 'to', 'like', 'left', 'outer', 'inner', 'null', 'proto', 'over', 'exclude', 'fetch', 'recursive', 'distinct', 'struct', 'else', 'or', 'extract', 'contains', 'groups', 'limit', 'order', 'set', 'between', 'nulls', 'some', 'rows', 'case', 'false', 'cube', 'for', 'unbounded', 'as', 'right', 'all', 'except', 'from', 'with', 'desc', 'on', 'rollup', 'assert_rows_modified', 'default', 'collate', 'preceding'}
586 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 587 parent = expression.parent 588 589 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 590 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 591 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 592 return self.func( 593 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 594 ) 595 596 return super().attimezone_sql(expression)
@classmethod
def
can_identify(text: str, identify: str | bool = 'safe') -> bool:
256 @classmethod 257 def can_identify(cls, text: str, identify: str | bool = "safe") -> bool: 258 """Checks if text can be identified given an identify option. 259 260 Args: 261 text: The text to check. 262 identify: 263 "always" or `True`: Always returns true. 264 "safe": True if the identifier is case-insensitive. 265 266 Returns: 267 Whether or not the given text can be identified. 268 """ 269 if identify is True or identify == "always": 270 return True 271 272 if identify == "safe": 273 return not cls.case_sensitive(text) 274 275 return False
Checks if text can be identified given an identify option.
Arguments:
- text: The text to check.
- identify: "always" or
True
: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:
Whether or not the given text can be identified.
TOKENIZER_CLASS =
<class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- LOCKING_READS_SUPPORTED
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- SINGLE_STRING_INTERVAL
- TABLESAMPLE_WITH_METHOD
- TABLESAMPLE_SIZE_IS_PERCENT
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- TZ_TO_WITH_TIME_ZONE
- SELECT_KINDS
- VALUES_AS_TABLE
- STAR_MAPPING
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- PARAMETER_TOKEN
- WITH_SEPARATED_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- SENTINEL_LINE_BREAK
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- normalize_functions
- unsupported_messages
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypeparam_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- offset_limit_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- safebracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- safeconcat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- add_sql
- and_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- safedpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- indexcolumnconstraint_sql
- nvl2_sql