sqlglot.dialects.bigquery
1from __future__ import annotations 2 3import logging 4import re 5import typing as t 6 7from sqlglot import exp, generator, parser, tokens, transforms 8from sqlglot._typing import E 9from sqlglot.dialects.dialect import ( 10 Dialect, 11 datestrtodate_sql, 12 format_time_lambda, 13 inline_array_sql, 14 max_or_greatest, 15 min_or_least, 16 no_ilike_sql, 17 parse_date_delta_with_interval, 18 regexp_replace_sql, 19 rename_func, 20 timestrtotime_sql, 21 ts_or_ds_to_date_sql, 22) 23from sqlglot.helper import seq_get, split_num_words 24from sqlglot.tokens import TokenType 25 26logger = logging.getLogger("sqlglot") 27 28 29def _date_add_sql( 30 data_type: str, kind: str 31) -> t.Callable[[generator.Generator, exp.Expression], str]: 32 def func(self, expression): 33 this = self.sql(expression, "this") 34 unit = expression.args.get("unit") 35 unit = exp.var(unit.name.upper() if unit else "DAY") 36 interval = exp.Interval(this=expression.expression, unit=unit) 37 return f"{data_type}_{kind}({this}, {self.sql(interval)})" 38 39 return func 40 41 42def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: 43 if not expression.find_ancestor(exp.From, exp.Join): 44 return self.values_sql(expression) 45 46 alias = expression.args.get("alias") 47 48 structs = [ 49 exp.Struct( 50 expressions=[ 51 exp.alias_(value, column_name) 52 for value, column_name in zip( 53 t.expressions, 54 alias.columns 55 if alias and alias.columns 56 else (f"_c{i}" for i in range(len(t.expressions))), 57 ) 58 ] 59 ) 60 for t in expression.find_all(exp.Tuple) 61 ] 62 63 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) 64 65 66def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: 67 this = expression.this 68 if isinstance(this, exp.Schema): 69 this = f"{this.this} <{self.expressions(this)}>" 70 else: 71 this = self.sql(this) 72 return f"RETURNS {this}" 73 74 75def _create_sql(self: generator.Generator, expression: exp.Create) -> str: 76 kind = expression.args["kind"] 77 returns = expression.find(exp.ReturnsProperty) 78 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 79 expression = expression.copy() 80 expression.set("kind", "TABLE FUNCTION") 81 if isinstance( 82 expression.expression, 83 ( 84 exp.Subquery, 85 exp.Literal, 86 ), 87 ): 88 expression.set("expression", expression.expression.this) 89 90 return self.create_sql(expression) 91 92 return self.create_sql(expression) 93 94 95def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 96 """Remove references to unnest table aliases since bigquery doesn't allow them. 97 98 These are added by the optimizer's qualify_column step. 99 """ 100 from sqlglot.optimizer.scope import Scope 101 102 if isinstance(expression, exp.Select): 103 for unnest in expression.find_all(exp.Unnest): 104 if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias: 105 for column in Scope(expression).find_all(exp.Column): 106 if column.table == unnest.alias: 107 column.set("table", None) 108 109 return expression 110 111 112# https://issuetracker.google.com/issues/162294746 113# workaround for bigquery bug when grouping by an expression and then ordering 114# WITH x AS (SELECT 1 y) 115# SELECT y + 1 z 116# FROM x 117# GROUP BY x + 1 118# ORDER by z 119def _alias_ordered_group(expression: exp.Expression) -> exp.Expression: 120 if isinstance(expression, exp.Select): 121 group = expression.args.get("group") 122 order = expression.args.get("order") 123 124 if group and order: 125 aliases = { 126 select.this: select.args["alias"] 127 for select in expression.selects 128 if isinstance(select, exp.Alias) 129 } 130 131 for e in group.expressions: 132 alias = aliases.get(e) 133 134 if alias: 135 e.replace(exp.column(alias)) 136 137 return expression 138 139 140def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression: 141 """BigQuery doesn't allow column names when defining a CTE, so we try to push them down.""" 142 if isinstance(expression, exp.CTE) and expression.alias_column_names: 143 cte_query = expression.this 144 145 if cte_query.is_star: 146 logger.warning( 147 "Can't push down CTE column names for star queries. Run the query through" 148 " the optimizer or use 'qualify' to expand the star projections first." 149 ) 150 return expression 151 152 column_names = expression.alias_column_names 153 expression.args["alias"].set("columns", None) 154 155 for name, select in zip(column_names, cte_query.selects): 156 to_replace = select 157 158 if isinstance(select, exp.Alias): 159 select = select.this 160 161 # Inner aliases are shadowed by the CTE column names 162 to_replace.replace(exp.alias_(select, name)) 163 164 return expression 165 166 167def _parse_timestamp(args: t.List) -> exp.StrToTime: 168 this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)]) 169 this.set("zone", seq_get(args, 2)) 170 return this 171 172 173def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts: 174 expr_type = exp.DateFromParts if len(args) == 3 else exp.Date 175 return expr_type.from_arg_list(args) 176 177 178def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5: 179 # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation 180 arg = seq_get(args, 0) 181 return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg) 182 183 184class BigQuery(Dialect): 185 UNNEST_COLUMN_ONLY = True 186 187 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 188 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 189 190 # bigquery udfs are case sensitive 191 NORMALIZE_FUNCTIONS = False 192 193 TIME_MAPPING = { 194 "%D": "%m/%d/%y", 195 } 196 197 FORMAT_MAPPING = { 198 "DD": "%d", 199 "MM": "%m", 200 "MON": "%b", 201 "MONTH": "%B", 202 "YYYY": "%Y", 203 "YY": "%y", 204 "HH": "%I", 205 "HH12": "%I", 206 "HH24": "%H", 207 "MI": "%M", 208 "SS": "%S", 209 "SSSSS": "%f", 210 "TZH": "%z", 211 } 212 213 @classmethod 214 def normalize_identifier(cls, expression: E) -> E: 215 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 216 # The following check is essentially a heuristic to detect tables based on whether or 217 # not they're qualified. 218 if isinstance(expression, exp.Identifier): 219 parent = expression.parent 220 221 while isinstance(parent, exp.Dot): 222 parent = parent.parent 223 224 if ( 225 not isinstance(parent, exp.UserDefinedFunction) 226 and not (isinstance(parent, exp.Table) and parent.db) 227 and not expression.meta.get("is_table") 228 ): 229 expression.set("this", expression.this.lower()) 230 231 return expression 232 233 class Tokenizer(tokens.Tokenizer): 234 QUOTES = ["'", '"', '"""', "'''"] 235 COMMENTS = ["--", "#", ("/*", "*/")] 236 IDENTIFIERS = ["`"] 237 STRING_ESCAPES = ["\\"] 238 239 HEX_STRINGS = [("0x", ""), ("0X", "")] 240 241 BYTE_STRINGS = [ 242 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 243 ] 244 245 RAW_STRINGS = [ 246 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 247 ] 248 249 KEYWORDS = { 250 **tokens.Tokenizer.KEYWORDS, 251 "ANY TYPE": TokenType.VARIANT, 252 "BEGIN": TokenType.COMMAND, 253 "BEGIN TRANSACTION": TokenType.BEGIN, 254 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 255 "BYTES": TokenType.BINARY, 256 "DECLARE": TokenType.COMMAND, 257 "FLOAT64": TokenType.DOUBLE, 258 "INT64": TokenType.BIGINT, 259 "RECORD": TokenType.STRUCT, 260 "TIMESTAMP": TokenType.TIMESTAMPTZ, 261 "NOT DETERMINISTIC": TokenType.VOLATILE, 262 "UNKNOWN": TokenType.NULL, 263 } 264 KEYWORDS.pop("DIV") 265 266 class Parser(parser.Parser): 267 PREFIXED_PIVOT_COLUMNS = True 268 269 LOG_BASE_FIRST = False 270 LOG_DEFAULTS_TO_LN = True 271 272 FUNCTIONS = { 273 **parser.Parser.FUNCTIONS, 274 "DATE": _parse_date, 275 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 276 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 277 "DATE_TRUNC": lambda args: exp.DateTrunc( 278 unit=exp.Literal.string(str(seq_get(args, 1))), 279 this=seq_get(args, 0), 280 ), 281 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 282 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 283 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 284 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 285 "MD5": exp.MD5Digest.from_arg_list, 286 "TO_HEX": _parse_to_hex, 287 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 288 [seq_get(args, 1), seq_get(args, 0)] 289 ), 290 "PARSE_TIMESTAMP": _parse_timestamp, 291 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 292 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 293 this=seq_get(args, 0), 294 expression=seq_get(args, 1), 295 position=seq_get(args, 2), 296 occurrence=seq_get(args, 3), 297 group=exp.Literal.number(1) 298 if re.compile(str(seq_get(args, 1))).groups == 1 299 else None, 300 ), 301 "SPLIT": lambda args: exp.Split( 302 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 303 this=seq_get(args, 0), 304 expression=seq_get(args, 1) or exp.Literal.string(","), 305 ), 306 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 307 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 308 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 309 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 310 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 311 } 312 313 FUNCTION_PARSERS = { 314 **parser.Parser.FUNCTION_PARSERS, 315 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 316 } 317 FUNCTION_PARSERS.pop("TRIM") 318 319 NO_PAREN_FUNCTIONS = { 320 **parser.Parser.NO_PAREN_FUNCTIONS, 321 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 322 } 323 324 NESTED_TYPE_TOKENS = { 325 *parser.Parser.NESTED_TYPE_TOKENS, 326 TokenType.TABLE, 327 } 328 329 ID_VAR_TOKENS = { 330 *parser.Parser.ID_VAR_TOKENS, 331 TokenType.VALUES, 332 } 333 334 PROPERTY_PARSERS = { 335 **parser.Parser.PROPERTY_PARSERS, 336 "NOT DETERMINISTIC": lambda self: self.expression( 337 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 338 ), 339 "OPTIONS": lambda self: self._parse_with_property(), 340 } 341 342 CONSTRAINT_PARSERS = { 343 **parser.Parser.CONSTRAINT_PARSERS, 344 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 345 } 346 347 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 348 this = super()._parse_table_part(schema=schema) 349 350 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 351 if isinstance(this, exp.Identifier): 352 table_name = this.name 353 while self._match(TokenType.DASH, advance=False) and self._next: 354 self._advance(2) 355 table_name += f"-{self._prev.text}" 356 357 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 358 359 return this 360 361 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 362 table = super()._parse_table_parts(schema=schema) 363 if isinstance(table.this, exp.Identifier) and "." in table.name: 364 catalog, db, this, *rest = ( 365 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 366 for x in split_num_words(table.name, ".", 3) 367 ) 368 369 if rest and this: 370 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 371 372 table = exp.Table(this=this, db=db, catalog=catalog) 373 374 return table 375 376 class Generator(generator.Generator): 377 EXPLICIT_UNION = True 378 INTERVAL_ALLOWS_PLURAL_FORM = False 379 JOIN_HINTS = False 380 QUERY_HINTS = False 381 TABLE_HINTS = False 382 LIMIT_FETCH = "LIMIT" 383 RENAME_TABLE_WITH_DB = False 384 ESCAPE_LINE_BREAK = True 385 386 TRANSFORMS = { 387 **generator.Generator.TRANSFORMS, 388 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 389 exp.ArraySize: rename_func("ARRAY_LENGTH"), 390 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 391 exp.Create: _create_sql, 392 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 393 exp.DateAdd: _date_add_sql("DATE", "ADD"), 394 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 395 exp.DateFromParts: rename_func("DATE"), 396 exp.DateStrToDate: datestrtodate_sql, 397 exp.DateSub: _date_add_sql("DATE", "SUB"), 398 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 399 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 400 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 401 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 402 exp.GroupConcat: rename_func("STRING_AGG"), 403 exp.Hex: rename_func("TO_HEX"), 404 exp.ILike: no_ilike_sql, 405 exp.IntDiv: rename_func("DIV"), 406 exp.JSONFormat: rename_func("TO_JSON_STRING"), 407 exp.Max: max_or_greatest, 408 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 409 exp.MD5Digest: rename_func("MD5"), 410 exp.Min: min_or_least, 411 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 412 exp.RegexpExtract: lambda self, e: self.func( 413 "REGEXP_EXTRACT", 414 e.this, 415 e.expression, 416 e.args.get("position"), 417 e.args.get("occurrence"), 418 ), 419 exp.RegexpReplace: regexp_replace_sql, 420 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 421 exp.ReturnsProperty: _returnsproperty_sql, 422 exp.Select: transforms.preprocess( 423 [ 424 transforms.explode_to_unnest, 425 _unqualify_unnest, 426 transforms.eliminate_distinct_on, 427 _alias_ordered_group, 428 ] 429 ), 430 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 431 if e.name == "IMMUTABLE" 432 else "NOT DETERMINISTIC", 433 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 434 exp.StrToTime: lambda self, e: self.func( 435 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 436 ), 437 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 438 exp.TimeSub: _date_add_sql("TIME", "SUB"), 439 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 440 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 441 exp.TimeStrToTime: timestrtotime_sql, 442 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 443 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 444 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 445 exp.Unhex: rename_func("FROM_HEX"), 446 exp.Values: _derived_table_values_to_unnest, 447 exp.VariancePop: rename_func("VAR_POP"), 448 } 449 450 TYPE_MAPPING = { 451 **generator.Generator.TYPE_MAPPING, 452 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 453 exp.DataType.Type.BIGINT: "INT64", 454 exp.DataType.Type.BINARY: "BYTES", 455 exp.DataType.Type.BOOLEAN: "BOOL", 456 exp.DataType.Type.CHAR: "STRING", 457 exp.DataType.Type.DECIMAL: "NUMERIC", 458 exp.DataType.Type.DOUBLE: "FLOAT64", 459 exp.DataType.Type.FLOAT: "FLOAT64", 460 exp.DataType.Type.INT: "INT64", 461 exp.DataType.Type.NCHAR: "STRING", 462 exp.DataType.Type.NVARCHAR: "STRING", 463 exp.DataType.Type.SMALLINT: "INT64", 464 exp.DataType.Type.TEXT: "STRING", 465 exp.DataType.Type.TIMESTAMP: "DATETIME", 466 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 467 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 468 exp.DataType.Type.TINYINT: "INT64", 469 exp.DataType.Type.VARBINARY: "BYTES", 470 exp.DataType.Type.VARCHAR: "STRING", 471 exp.DataType.Type.VARIANT: "ANY TYPE", 472 } 473 474 PROPERTIES_LOCATION = { 475 **generator.Generator.PROPERTIES_LOCATION, 476 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 477 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 478 } 479 480 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 481 RESERVED_KEYWORDS = { 482 *generator.Generator.RESERVED_KEYWORDS, 483 "all", 484 "and", 485 "any", 486 "array", 487 "as", 488 "asc", 489 "assert_rows_modified", 490 "at", 491 "between", 492 "by", 493 "case", 494 "cast", 495 "collate", 496 "contains", 497 "create", 498 "cross", 499 "cube", 500 "current", 501 "default", 502 "define", 503 "desc", 504 "distinct", 505 "else", 506 "end", 507 "enum", 508 "escape", 509 "except", 510 "exclude", 511 "exists", 512 "extract", 513 "false", 514 "fetch", 515 "following", 516 "for", 517 "from", 518 "full", 519 "group", 520 "grouping", 521 "groups", 522 "hash", 523 "having", 524 "if", 525 "ignore", 526 "in", 527 "inner", 528 "intersect", 529 "interval", 530 "into", 531 "is", 532 "join", 533 "lateral", 534 "left", 535 "like", 536 "limit", 537 "lookup", 538 "merge", 539 "natural", 540 "new", 541 "no", 542 "not", 543 "null", 544 "nulls", 545 "of", 546 "on", 547 "or", 548 "order", 549 "outer", 550 "over", 551 "partition", 552 "preceding", 553 "proto", 554 "qualify", 555 "range", 556 "recursive", 557 "respect", 558 "right", 559 "rollup", 560 "rows", 561 "select", 562 "set", 563 "some", 564 "struct", 565 "tablesample", 566 "then", 567 "to", 568 "treat", 569 "true", 570 "unbounded", 571 "union", 572 "unnest", 573 "using", 574 "when", 575 "where", 576 "window", 577 "with", 578 "within", 579 } 580 581 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 582 parent = expression.parent 583 584 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 585 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 586 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 587 return self.func( 588 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 589 ) 590 591 return super().attimezone_sql(expression) 592 593 def trycast_sql(self, expression: exp.TryCast) -> str: 594 return self.cast_sql(expression, safe_prefix="SAFE_") 595 596 def cte_sql(self, expression: exp.CTE) -> str: 597 if expression.alias_column_names: 598 self.unsupported("Column names in CTE definition are not supported.") 599 return super().cte_sql(expression) 600 601 def array_sql(self, expression: exp.Array) -> str: 602 first_arg = seq_get(expression.expressions, 0) 603 if isinstance(first_arg, exp.Subqueryable): 604 return f"ARRAY{self.wrap(self.sql(first_arg))}" 605 606 return inline_array_sql(self, expression) 607 608 def transaction_sql(self, *_) -> str: 609 return "BEGIN TRANSACTION" 610 611 def commit_sql(self, *_) -> str: 612 return "COMMIT TRANSACTION" 613 614 def rollback_sql(self, *_) -> str: 615 return "ROLLBACK TRANSACTION" 616 617 def in_unnest_op(self, expression: exp.Unnest) -> str: 618 return self.sql(expression) 619 620 def except_op(self, expression: exp.Except) -> str: 621 if not expression.args.get("distinct", False): 622 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 623 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 624 625 def intersect_op(self, expression: exp.Intersect) -> str: 626 if not expression.args.get("distinct", False): 627 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 628 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 629 630 def with_properties(self, properties: exp.Properties) -> str: 631 return self.properties(properties, prefix=self.seg("OPTIONS"))
logger =
<Logger sqlglot (WARNING)>
185class BigQuery(Dialect): 186 UNNEST_COLUMN_ONLY = True 187 188 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity 189 RESOLVES_IDENTIFIERS_AS_UPPERCASE = None 190 191 # bigquery udfs are case sensitive 192 NORMALIZE_FUNCTIONS = False 193 194 TIME_MAPPING = { 195 "%D": "%m/%d/%y", 196 } 197 198 FORMAT_MAPPING = { 199 "DD": "%d", 200 "MM": "%m", 201 "MON": "%b", 202 "MONTH": "%B", 203 "YYYY": "%Y", 204 "YY": "%y", 205 "HH": "%I", 206 "HH12": "%I", 207 "HH24": "%H", 208 "MI": "%M", 209 "SS": "%S", 210 "SSSSS": "%f", 211 "TZH": "%z", 212 } 213 214 @classmethod 215 def normalize_identifier(cls, expression: E) -> E: 216 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 217 # The following check is essentially a heuristic to detect tables based on whether or 218 # not they're qualified. 219 if isinstance(expression, exp.Identifier): 220 parent = expression.parent 221 222 while isinstance(parent, exp.Dot): 223 parent = parent.parent 224 225 if ( 226 not isinstance(parent, exp.UserDefinedFunction) 227 and not (isinstance(parent, exp.Table) and parent.db) 228 and not expression.meta.get("is_table") 229 ): 230 expression.set("this", expression.this.lower()) 231 232 return expression 233 234 class Tokenizer(tokens.Tokenizer): 235 QUOTES = ["'", '"', '"""', "'''"] 236 COMMENTS = ["--", "#", ("/*", "*/")] 237 IDENTIFIERS = ["`"] 238 STRING_ESCAPES = ["\\"] 239 240 HEX_STRINGS = [("0x", ""), ("0X", "")] 241 242 BYTE_STRINGS = [ 243 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 244 ] 245 246 RAW_STRINGS = [ 247 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 248 ] 249 250 KEYWORDS = { 251 **tokens.Tokenizer.KEYWORDS, 252 "ANY TYPE": TokenType.VARIANT, 253 "BEGIN": TokenType.COMMAND, 254 "BEGIN TRANSACTION": TokenType.BEGIN, 255 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 256 "BYTES": TokenType.BINARY, 257 "DECLARE": TokenType.COMMAND, 258 "FLOAT64": TokenType.DOUBLE, 259 "INT64": TokenType.BIGINT, 260 "RECORD": TokenType.STRUCT, 261 "TIMESTAMP": TokenType.TIMESTAMPTZ, 262 "NOT DETERMINISTIC": TokenType.VOLATILE, 263 "UNKNOWN": TokenType.NULL, 264 } 265 KEYWORDS.pop("DIV") 266 267 class Parser(parser.Parser): 268 PREFIXED_PIVOT_COLUMNS = True 269 270 LOG_BASE_FIRST = False 271 LOG_DEFAULTS_TO_LN = True 272 273 FUNCTIONS = { 274 **parser.Parser.FUNCTIONS, 275 "DATE": _parse_date, 276 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 277 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 278 "DATE_TRUNC": lambda args: exp.DateTrunc( 279 unit=exp.Literal.string(str(seq_get(args, 1))), 280 this=seq_get(args, 0), 281 ), 282 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 283 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 284 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 285 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 286 "MD5": exp.MD5Digest.from_arg_list, 287 "TO_HEX": _parse_to_hex, 288 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 289 [seq_get(args, 1), seq_get(args, 0)] 290 ), 291 "PARSE_TIMESTAMP": _parse_timestamp, 292 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 293 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 294 this=seq_get(args, 0), 295 expression=seq_get(args, 1), 296 position=seq_get(args, 2), 297 occurrence=seq_get(args, 3), 298 group=exp.Literal.number(1) 299 if re.compile(str(seq_get(args, 1))).groups == 1 300 else None, 301 ), 302 "SPLIT": lambda args: exp.Split( 303 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 304 this=seq_get(args, 0), 305 expression=seq_get(args, 1) or exp.Literal.string(","), 306 ), 307 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 308 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 309 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 310 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 311 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 312 } 313 314 FUNCTION_PARSERS = { 315 **parser.Parser.FUNCTION_PARSERS, 316 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 317 } 318 FUNCTION_PARSERS.pop("TRIM") 319 320 NO_PAREN_FUNCTIONS = { 321 **parser.Parser.NO_PAREN_FUNCTIONS, 322 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 323 } 324 325 NESTED_TYPE_TOKENS = { 326 *parser.Parser.NESTED_TYPE_TOKENS, 327 TokenType.TABLE, 328 } 329 330 ID_VAR_TOKENS = { 331 *parser.Parser.ID_VAR_TOKENS, 332 TokenType.VALUES, 333 } 334 335 PROPERTY_PARSERS = { 336 **parser.Parser.PROPERTY_PARSERS, 337 "NOT DETERMINISTIC": lambda self: self.expression( 338 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 339 ), 340 "OPTIONS": lambda self: self._parse_with_property(), 341 } 342 343 CONSTRAINT_PARSERS = { 344 **parser.Parser.CONSTRAINT_PARSERS, 345 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 346 } 347 348 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 349 this = super()._parse_table_part(schema=schema) 350 351 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 352 if isinstance(this, exp.Identifier): 353 table_name = this.name 354 while self._match(TokenType.DASH, advance=False) and self._next: 355 self._advance(2) 356 table_name += f"-{self._prev.text}" 357 358 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 359 360 return this 361 362 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 363 table = super()._parse_table_parts(schema=schema) 364 if isinstance(table.this, exp.Identifier) and "." in table.name: 365 catalog, db, this, *rest = ( 366 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 367 for x in split_num_words(table.name, ".", 3) 368 ) 369 370 if rest and this: 371 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 372 373 table = exp.Table(this=this, db=db, catalog=catalog) 374 375 return table 376 377 class Generator(generator.Generator): 378 EXPLICIT_UNION = True 379 INTERVAL_ALLOWS_PLURAL_FORM = False 380 JOIN_HINTS = False 381 QUERY_HINTS = False 382 TABLE_HINTS = False 383 LIMIT_FETCH = "LIMIT" 384 RENAME_TABLE_WITH_DB = False 385 ESCAPE_LINE_BREAK = True 386 387 TRANSFORMS = { 388 **generator.Generator.TRANSFORMS, 389 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 390 exp.ArraySize: rename_func("ARRAY_LENGTH"), 391 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 392 exp.Create: _create_sql, 393 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 394 exp.DateAdd: _date_add_sql("DATE", "ADD"), 395 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 396 exp.DateFromParts: rename_func("DATE"), 397 exp.DateStrToDate: datestrtodate_sql, 398 exp.DateSub: _date_add_sql("DATE", "SUB"), 399 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 400 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 401 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 402 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 403 exp.GroupConcat: rename_func("STRING_AGG"), 404 exp.Hex: rename_func("TO_HEX"), 405 exp.ILike: no_ilike_sql, 406 exp.IntDiv: rename_func("DIV"), 407 exp.JSONFormat: rename_func("TO_JSON_STRING"), 408 exp.Max: max_or_greatest, 409 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 410 exp.MD5Digest: rename_func("MD5"), 411 exp.Min: min_or_least, 412 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 413 exp.RegexpExtract: lambda self, e: self.func( 414 "REGEXP_EXTRACT", 415 e.this, 416 e.expression, 417 e.args.get("position"), 418 e.args.get("occurrence"), 419 ), 420 exp.RegexpReplace: regexp_replace_sql, 421 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 422 exp.ReturnsProperty: _returnsproperty_sql, 423 exp.Select: transforms.preprocess( 424 [ 425 transforms.explode_to_unnest, 426 _unqualify_unnest, 427 transforms.eliminate_distinct_on, 428 _alias_ordered_group, 429 ] 430 ), 431 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 432 if e.name == "IMMUTABLE" 433 else "NOT DETERMINISTIC", 434 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 435 exp.StrToTime: lambda self, e: self.func( 436 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 437 ), 438 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 439 exp.TimeSub: _date_add_sql("TIME", "SUB"), 440 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 441 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 442 exp.TimeStrToTime: timestrtotime_sql, 443 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 444 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 445 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 446 exp.Unhex: rename_func("FROM_HEX"), 447 exp.Values: _derived_table_values_to_unnest, 448 exp.VariancePop: rename_func("VAR_POP"), 449 } 450 451 TYPE_MAPPING = { 452 **generator.Generator.TYPE_MAPPING, 453 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 454 exp.DataType.Type.BIGINT: "INT64", 455 exp.DataType.Type.BINARY: "BYTES", 456 exp.DataType.Type.BOOLEAN: "BOOL", 457 exp.DataType.Type.CHAR: "STRING", 458 exp.DataType.Type.DECIMAL: "NUMERIC", 459 exp.DataType.Type.DOUBLE: "FLOAT64", 460 exp.DataType.Type.FLOAT: "FLOAT64", 461 exp.DataType.Type.INT: "INT64", 462 exp.DataType.Type.NCHAR: "STRING", 463 exp.DataType.Type.NVARCHAR: "STRING", 464 exp.DataType.Type.SMALLINT: "INT64", 465 exp.DataType.Type.TEXT: "STRING", 466 exp.DataType.Type.TIMESTAMP: "DATETIME", 467 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 468 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 469 exp.DataType.Type.TINYINT: "INT64", 470 exp.DataType.Type.VARBINARY: "BYTES", 471 exp.DataType.Type.VARCHAR: "STRING", 472 exp.DataType.Type.VARIANT: "ANY TYPE", 473 } 474 475 PROPERTIES_LOCATION = { 476 **generator.Generator.PROPERTIES_LOCATION, 477 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 478 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 479 } 480 481 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 482 RESERVED_KEYWORDS = { 483 *generator.Generator.RESERVED_KEYWORDS, 484 "all", 485 "and", 486 "any", 487 "array", 488 "as", 489 "asc", 490 "assert_rows_modified", 491 "at", 492 "between", 493 "by", 494 "case", 495 "cast", 496 "collate", 497 "contains", 498 "create", 499 "cross", 500 "cube", 501 "current", 502 "default", 503 "define", 504 "desc", 505 "distinct", 506 "else", 507 "end", 508 "enum", 509 "escape", 510 "except", 511 "exclude", 512 "exists", 513 "extract", 514 "false", 515 "fetch", 516 "following", 517 "for", 518 "from", 519 "full", 520 "group", 521 "grouping", 522 "groups", 523 "hash", 524 "having", 525 "if", 526 "ignore", 527 "in", 528 "inner", 529 "intersect", 530 "interval", 531 "into", 532 "is", 533 "join", 534 "lateral", 535 "left", 536 "like", 537 "limit", 538 "lookup", 539 "merge", 540 "natural", 541 "new", 542 "no", 543 "not", 544 "null", 545 "nulls", 546 "of", 547 "on", 548 "or", 549 "order", 550 "outer", 551 "over", 552 "partition", 553 "preceding", 554 "proto", 555 "qualify", 556 "range", 557 "recursive", 558 "respect", 559 "right", 560 "rollup", 561 "rows", 562 "select", 563 "set", 564 "some", 565 "struct", 566 "tablesample", 567 "then", 568 "to", 569 "treat", 570 "true", 571 "unbounded", 572 "union", 573 "unnest", 574 "using", 575 "when", 576 "where", 577 "window", 578 "with", 579 "within", 580 } 581 582 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 583 parent = expression.parent 584 585 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 586 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 587 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 588 return self.func( 589 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 590 ) 591 592 return super().attimezone_sql(expression) 593 594 def trycast_sql(self, expression: exp.TryCast) -> str: 595 return self.cast_sql(expression, safe_prefix="SAFE_") 596 597 def cte_sql(self, expression: exp.CTE) -> str: 598 if expression.alias_column_names: 599 self.unsupported("Column names in CTE definition are not supported.") 600 return super().cte_sql(expression) 601 602 def array_sql(self, expression: exp.Array) -> str: 603 first_arg = seq_get(expression.expressions, 0) 604 if isinstance(first_arg, exp.Subqueryable): 605 return f"ARRAY{self.wrap(self.sql(first_arg))}" 606 607 return inline_array_sql(self, expression) 608 609 def transaction_sql(self, *_) -> str: 610 return "BEGIN TRANSACTION" 611 612 def commit_sql(self, *_) -> str: 613 return "COMMIT TRANSACTION" 614 615 def rollback_sql(self, *_) -> str: 616 return "ROLLBACK TRANSACTION" 617 618 def in_unnest_op(self, expression: exp.Unnest) -> str: 619 return self.sql(expression) 620 621 def except_op(self, expression: exp.Except) -> str: 622 if not expression.args.get("distinct", False): 623 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 624 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 625 626 def intersect_op(self, expression: exp.Intersect) -> str: 627 if not expression.args.get("distinct", False): 628 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 629 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 630 631 def with_properties(self, properties: exp.Properties) -> str: 632 return self.properties(properties, prefix=self.seg("OPTIONS"))
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
@classmethod
def
normalize_identifier(cls, expression: ~E) -> ~E:
214 @classmethod 215 def normalize_identifier(cls, expression: E) -> E: 216 # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least). 217 # The following check is essentially a heuristic to detect tables based on whether or 218 # not they're qualified. 219 if isinstance(expression, exp.Identifier): 220 parent = expression.parent 221 222 while isinstance(parent, exp.Dot): 223 parent = parent.parent 224 225 if ( 226 not isinstance(parent, exp.UserDefinedFunction) 227 and not (isinstance(parent, exp.Table) and parent.db) 228 and not expression.meta.get("is_table") 229 ): 230 expression.set("this", expression.this.lower()) 231 232 return expression
Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.
tokenizer_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
parser_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Parser'>
generator_class =
<class 'sqlglot.dialects.bigquery.BigQuery.Generator'>
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.dialects.dialect.Dialect
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- DATE_FORMAT
- DATEINT_FORMAT
- TIME_FORMAT
- get_or_raise
- format_time
- case_sensitive
- can_identify
- quote_identifier
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
234 class Tokenizer(tokens.Tokenizer): 235 QUOTES = ["'", '"', '"""', "'''"] 236 COMMENTS = ["--", "#", ("/*", "*/")] 237 IDENTIFIERS = ["`"] 238 STRING_ESCAPES = ["\\"] 239 240 HEX_STRINGS = [("0x", ""), ("0X", "")] 241 242 BYTE_STRINGS = [ 243 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B") 244 ] 245 246 RAW_STRINGS = [ 247 (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R") 248 ] 249 250 KEYWORDS = { 251 **tokens.Tokenizer.KEYWORDS, 252 "ANY TYPE": TokenType.VARIANT, 253 "BEGIN": TokenType.COMMAND, 254 "BEGIN TRANSACTION": TokenType.BEGIN, 255 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 256 "BYTES": TokenType.BINARY, 257 "DECLARE": TokenType.COMMAND, 258 "FLOAT64": TokenType.DOUBLE, 259 "INT64": TokenType.BIGINT, 260 "RECORD": TokenType.STRUCT, 261 "TIMESTAMP": TokenType.TIMESTAMPTZ, 262 "NOT DETERMINISTIC": TokenType.VOLATILE, 263 "UNKNOWN": TokenType.NULL, 264 } 265 KEYWORDS.pop("DIV")
BYTE_STRINGS =
[("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS =
[("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS =
{'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'IF': <TokenType.IF: 'IF'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NEXT VALUE FOR': <TokenType.NEXT_VALUE_FOR: 'NEXT_VALUE_FOR'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>}
267 class Parser(parser.Parser): 268 PREFIXED_PIVOT_COLUMNS = True 269 270 LOG_BASE_FIRST = False 271 LOG_DEFAULTS_TO_LN = True 272 273 FUNCTIONS = { 274 **parser.Parser.FUNCTIONS, 275 "DATE": _parse_date, 276 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 277 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 278 "DATE_TRUNC": lambda args: exp.DateTrunc( 279 unit=exp.Literal.string(str(seq_get(args, 1))), 280 this=seq_get(args, 0), 281 ), 282 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 283 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 284 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 285 "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list, 286 "MD5": exp.MD5Digest.from_arg_list, 287 "TO_HEX": _parse_to_hex, 288 "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")( 289 [seq_get(args, 1), seq_get(args, 0)] 290 ), 291 "PARSE_TIMESTAMP": _parse_timestamp, 292 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 293 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 294 this=seq_get(args, 0), 295 expression=seq_get(args, 1), 296 position=seq_get(args, 2), 297 occurrence=seq_get(args, 3), 298 group=exp.Literal.number(1) 299 if re.compile(str(seq_get(args, 1))).groups == 1 300 else None, 301 ), 302 "SPLIT": lambda args: exp.Split( 303 # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split 304 this=seq_get(args, 0), 305 expression=seq_get(args, 1) or exp.Literal.string(","), 306 ), 307 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 308 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 309 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 310 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 311 "TO_JSON_STRING": exp.JSONFormat.from_arg_list, 312 } 313 314 FUNCTION_PARSERS = { 315 **parser.Parser.FUNCTION_PARSERS, 316 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 317 } 318 FUNCTION_PARSERS.pop("TRIM") 319 320 NO_PAREN_FUNCTIONS = { 321 **parser.Parser.NO_PAREN_FUNCTIONS, 322 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 323 } 324 325 NESTED_TYPE_TOKENS = { 326 *parser.Parser.NESTED_TYPE_TOKENS, 327 TokenType.TABLE, 328 } 329 330 ID_VAR_TOKENS = { 331 *parser.Parser.ID_VAR_TOKENS, 332 TokenType.VALUES, 333 } 334 335 PROPERTY_PARSERS = { 336 **parser.Parser.PROPERTY_PARSERS, 337 "NOT DETERMINISTIC": lambda self: self.expression( 338 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 339 ), 340 "OPTIONS": lambda self: self._parse_with_property(), 341 } 342 343 CONSTRAINT_PARSERS = { 344 **parser.Parser.CONSTRAINT_PARSERS, 345 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 346 } 347 348 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 349 this = super()._parse_table_part(schema=schema) 350 351 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 352 if isinstance(this, exp.Identifier): 353 table_name = this.name 354 while self._match(TokenType.DASH, advance=False) and self._next: 355 self._advance(2) 356 table_name += f"-{self._prev.text}" 357 358 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 359 360 return this 361 362 def _parse_table_parts(self, schema: bool = False) -> exp.Table: 363 table = super()._parse_table_parts(schema=schema) 364 if isinstance(table.this, exp.Identifier) and "." in table.name: 365 catalog, db, this, *rest = ( 366 t.cast(t.Optional[exp.Expression], exp.to_identifier(x)) 367 for x in split_num_words(table.name, ".", 3) 368 ) 369 370 if rest and this: 371 this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest])) 372 373 table = exp.Table(this=this, db=db, catalog=catalog) 374 375 return table
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
FUNCTIONS =
{'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function BigQuery.Parser.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS =
{'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS =
{<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS =
{<TokenType.TABLE: 'TABLE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MAP: 'MAP'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ARRAY: 'ARRAY'>}
ID_VAR_TOKENS =
{<TokenType.DATETIME: 'DATETIME'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.IS: 'IS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.VAR: 'VAR'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TEXT: 'TEXT'>, <TokenType.TRUE: 'TRUE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TIME: 'TIME'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SOME: 'SOME'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.IF: 'IF'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.ASC: 'ASC'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UINT256: 'UINT256'>, <TokenType.INT256: 'INT256'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.SHOW: 'SHOW'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ROW: 'ROW'>, <TokenType.ANY: 'ANY'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SET: 'SET'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.MAP: 'MAP'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.UINT: 'UINT'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.INET: 'INET'>, <TokenType.XML: 'XML'>, <TokenType.RANGE: 'RANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.INT128: 'INT128'>, <TokenType.BINARY: 'BINARY'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.CASE: 'CASE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.JSONB: 'JSONB'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.UUID: 'UUID'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CACHE: 'CACHE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.VIEW: 'VIEW'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.DATE: 'DATE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ALL: 'ALL'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.LEFT: 'LEFT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.JSON: 'JSON'>, <TokenType.UINT128: 'UINT128'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.FALSE: 'FALSE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.FULL: 'FULL'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.END: 'END'>, <TokenType.BIT: 'BIT'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.VALUES: 'VALUES'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DESC: 'DESC'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TABLE: 'TABLE'>, <TokenType.APPLY: 'APPLY'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INDEX: 'INDEX'>, <TokenType.TOP: 'TOP'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.INT: 'INT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>}
PROPERTY_PARSERS =
{'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS =
{'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
SET_TRIE: Dict =
{'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] =
{'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict =
{'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
Inherited Members
- sqlglot.parser.Parser
- Parser
- ENUM_TYPE_TOKENS
- TYPE_TOKENS
- SUBQUERY_PREDICATES
- RESERVED_KEYWORDS
- DB_CREATABLES
- CREATABLES
- INTERVAL_VARS
- TABLE_ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- BITWISE
- TERM
- FACTOR
- TIMESTAMPS
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- COLUMN_OPERATORS
- EXPRESSION_PARSERS
- STATEMENT_PARSERS
- UNARY_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- RANGE_PARSERS
- ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- NO_PAREN_FUNCTION_PARSERS
- FUNCTIONS_WITH_ALIASED_ARGS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- SHOW_PARSERS
- TYPE_LITERAL_PARSERS
- MODIFIABLES
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- INSERT_ALTERNATIVES
- CLONE_KINDS
- TABLE_INDEX_HINT_TOKENS
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- ADD_CONSTRAINT_TOKENS
- STRICT_CAST
- CONCAT_NULL_OUTPUTS_STRING
- IDENTIFY_PIVOT_STRINGS
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- STRICT_STRING_CONCAT
- NULL_ORDERING
- error_level
- error_message_context
- max_errors
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
377 class Generator(generator.Generator): 378 EXPLICIT_UNION = True 379 INTERVAL_ALLOWS_PLURAL_FORM = False 380 JOIN_HINTS = False 381 QUERY_HINTS = False 382 TABLE_HINTS = False 383 LIMIT_FETCH = "LIMIT" 384 RENAME_TABLE_WITH_DB = False 385 ESCAPE_LINE_BREAK = True 386 387 TRANSFORMS = { 388 **generator.Generator.TRANSFORMS, 389 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 390 exp.ArraySize: rename_func("ARRAY_LENGTH"), 391 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 392 exp.Create: _create_sql, 393 exp.CTE: transforms.preprocess([_pushdown_cte_column_names]), 394 exp.DateAdd: _date_add_sql("DATE", "ADD"), 395 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 396 exp.DateFromParts: rename_func("DATE"), 397 exp.DateStrToDate: datestrtodate_sql, 398 exp.DateSub: _date_add_sql("DATE", "SUB"), 399 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 400 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 401 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 402 exp.GenerateSeries: rename_func("GENERATE_ARRAY"), 403 exp.GroupConcat: rename_func("STRING_AGG"), 404 exp.Hex: rename_func("TO_HEX"), 405 exp.ILike: no_ilike_sql, 406 exp.IntDiv: rename_func("DIV"), 407 exp.JSONFormat: rename_func("TO_JSON_STRING"), 408 exp.Max: max_or_greatest, 409 exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)), 410 exp.MD5Digest: rename_func("MD5"), 411 exp.Min: min_or_least, 412 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 413 exp.RegexpExtract: lambda self, e: self.func( 414 "REGEXP_EXTRACT", 415 e.this, 416 e.expression, 417 e.args.get("position"), 418 e.args.get("occurrence"), 419 ), 420 exp.RegexpReplace: regexp_replace_sql, 421 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 422 exp.ReturnsProperty: _returnsproperty_sql, 423 exp.Select: transforms.preprocess( 424 [ 425 transforms.explode_to_unnest, 426 _unqualify_unnest, 427 transforms.eliminate_distinct_on, 428 _alias_ordered_group, 429 ] 430 ), 431 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 432 if e.name == "IMMUTABLE" 433 else "NOT DETERMINISTIC", 434 exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})", 435 exp.StrToTime: lambda self, e: self.func( 436 "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone") 437 ), 438 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 439 exp.TimeSub: _date_add_sql("TIME", "SUB"), 440 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 441 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 442 exp.TimeStrToTime: timestrtotime_sql, 443 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 444 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 445 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 446 exp.Unhex: rename_func("FROM_HEX"), 447 exp.Values: _derived_table_values_to_unnest, 448 exp.VariancePop: rename_func("VAR_POP"), 449 } 450 451 TYPE_MAPPING = { 452 **generator.Generator.TYPE_MAPPING, 453 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 454 exp.DataType.Type.BIGINT: "INT64", 455 exp.DataType.Type.BINARY: "BYTES", 456 exp.DataType.Type.BOOLEAN: "BOOL", 457 exp.DataType.Type.CHAR: "STRING", 458 exp.DataType.Type.DECIMAL: "NUMERIC", 459 exp.DataType.Type.DOUBLE: "FLOAT64", 460 exp.DataType.Type.FLOAT: "FLOAT64", 461 exp.DataType.Type.INT: "INT64", 462 exp.DataType.Type.NCHAR: "STRING", 463 exp.DataType.Type.NVARCHAR: "STRING", 464 exp.DataType.Type.SMALLINT: "INT64", 465 exp.DataType.Type.TEXT: "STRING", 466 exp.DataType.Type.TIMESTAMP: "DATETIME", 467 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 468 exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP", 469 exp.DataType.Type.TINYINT: "INT64", 470 exp.DataType.Type.VARBINARY: "BYTES", 471 exp.DataType.Type.VARCHAR: "STRING", 472 exp.DataType.Type.VARIANT: "ANY TYPE", 473 } 474 475 PROPERTIES_LOCATION = { 476 **generator.Generator.PROPERTIES_LOCATION, 477 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 478 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 479 } 480 481 # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords 482 RESERVED_KEYWORDS = { 483 *generator.Generator.RESERVED_KEYWORDS, 484 "all", 485 "and", 486 "any", 487 "array", 488 "as", 489 "asc", 490 "assert_rows_modified", 491 "at", 492 "between", 493 "by", 494 "case", 495 "cast", 496 "collate", 497 "contains", 498 "create", 499 "cross", 500 "cube", 501 "current", 502 "default", 503 "define", 504 "desc", 505 "distinct", 506 "else", 507 "end", 508 "enum", 509 "escape", 510 "except", 511 "exclude", 512 "exists", 513 "extract", 514 "false", 515 "fetch", 516 "following", 517 "for", 518 "from", 519 "full", 520 "group", 521 "grouping", 522 "groups", 523 "hash", 524 "having", 525 "if", 526 "ignore", 527 "in", 528 "inner", 529 "intersect", 530 "interval", 531 "into", 532 "is", 533 "join", 534 "lateral", 535 "left", 536 "like", 537 "limit", 538 "lookup", 539 "merge", 540 "natural", 541 "new", 542 "no", 543 "not", 544 "null", 545 "nulls", 546 "of", 547 "on", 548 "or", 549 "order", 550 "outer", 551 "over", 552 "partition", 553 "preceding", 554 "proto", 555 "qualify", 556 "range", 557 "recursive", 558 "respect", 559 "right", 560 "rollup", 561 "rows", 562 "select", 563 "set", 564 "some", 565 "struct", 566 "tablesample", 567 "then", 568 "to", 569 "treat", 570 "true", 571 "unbounded", 572 "union", 573 "unnest", 574 "using", 575 "when", 576 "where", 577 "window", 578 "with", 579 "within", 580 } 581 582 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 583 parent = expression.parent 584 585 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 586 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 587 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 588 return self.func( 589 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 590 ) 591 592 return super().attimezone_sql(expression) 593 594 def trycast_sql(self, expression: exp.TryCast) -> str: 595 return self.cast_sql(expression, safe_prefix="SAFE_") 596 597 def cte_sql(self, expression: exp.CTE) -> str: 598 if expression.alias_column_names: 599 self.unsupported("Column names in CTE definition are not supported.") 600 return super().cte_sql(expression) 601 602 def array_sql(self, expression: exp.Array) -> str: 603 first_arg = seq_get(expression.expressions, 0) 604 if isinstance(first_arg, exp.Subqueryable): 605 return f"ARRAY{self.wrap(self.sql(first_arg))}" 606 607 return inline_array_sql(self, expression) 608 609 def transaction_sql(self, *_) -> str: 610 return "BEGIN TRANSACTION" 611 612 def commit_sql(self, *_) -> str: 613 return "COMMIT TRANSACTION" 614 615 def rollback_sql(self, *_) -> str: 616 return "ROLLBACK TRANSACTION" 617 618 def in_unnest_op(self, expression: exp.Unnest) -> str: 619 return self.sql(expression) 620 621 def except_op(self, expression: exp.Except) -> str: 622 if not expression.args.get("distinct", False): 623 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 624 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 625 626 def intersect_op(self, expression: exp.Intersect) -> str: 627 if not expression.args.get("distinct", False): 628 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 629 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 630 631 def with_properties(self, properties: exp.Properties) -> str: 632 return self.properties(properties, prefix=self.seg("OPTIONS"))
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether or not to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether or not to normalize identifiers to lowercase. Default: False.
- pad: Determines the pad size in a formatted string. Default: 2.
- indent: Determines the indentation size in a formatted string. Default: 2.
- normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
TRANSFORMS =
{<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING =
{<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION =
{<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS =
{'unnest', 'limit', 'full', 'proto', 'some', 'following', 'unbounded', 'nulls', 'join', 'outer', 'end', 'left', 'within', 'groups', 'ignore', 'set', 'distinct', 'struct', 'new', 'interval', 'at', 'on', 'respect', 'when', 'between', 'null', 'union', 'window', 'intersect', 'treat', 'enum', 'in', 'lateral', 'exclude', 'group', 'as', 'cast', 'default', 'fetch', 'cross', 'rollup', 'with', 'collate', 'natural', 'any', 'false', 'cube', 'order', 'to', 'if', 'no', 'where', 'contains', 'or', 'grouping', 'range', 'right', 'inner', 'extract', 'recursive', 'define', 'escape', 'select', 'exists', 'merge', 'hash', 'using', 'rows', 'by', 'having', 'array', 'lookup', 'not', 'desc', 'and', 'for', 'else', 'is', 'tablesample', 'into', 'all', 'assert_rows_modified', 'except', 'over', 'case', 'qualify', 'of', 'then', 'from', 'preceding', 'true', 'like', 'current', 'asc', 'create', 'partition'}
582 def attimezone_sql(self, expression: exp.AtTimeZone) -> str: 583 parent = expression.parent 584 585 # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]). 586 # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included. 587 if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"): 588 return self.func( 589 "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone")) 590 ) 591 592 return super().attimezone_sql(expression)
@classmethod
def
can_identify(text: str, identify: str | bool = 'safe') -> bool:
246 @classmethod 247 def can_identify(cls, text: str, identify: str | bool = "safe") -> bool: 248 """Checks if text can be identified given an identify option. 249 250 Args: 251 text: The text to check. 252 identify: 253 "always" or `True`: Always returns true. 254 "safe": True if the identifier is case-insensitive. 255 256 Returns: 257 Whether or not the given text can be identified. 258 """ 259 if identify is True or identify == "always": 260 return True 261 262 if identify == "safe": 263 return not cls.case_sensitive(text) 264 265 return False
Checks if text can be identified given an identify option.
Arguments:
- text: The text to check.
- identify: "always" or
True
: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:
Whether or not the given text can be identified.
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- LOCKING_READS_SUPPORTED
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- MATCHED_BY_SOURCE
- SINGLE_STRING_INTERVAL
- TABLESAMPLE_WITH_METHOD
- TABLESAMPLE_SIZE_IS_PERCENT
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- SELECT_KINDS
- STAR_MAPPING
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- STRUCT_DELIMITER
- PARAMETER_TOKEN
- WITH_SEPARATED_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- SENTINEL_LINE_BREAK
- INDEX_OFFSET
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- STRICT_STRING_CONCAT
- NULL_ORDERING
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- normalize_functions
- unsupported_messages
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- rawstring_sql
- datatypesize_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- offset_limit_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- safebracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- safeconcat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- add_sql
- and_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- safedpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql