Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    arg_max_or_min_no_count,
 12    binary_from_function,
 13    date_add_interval_sql,
 14    datestrtodate_sql,
 15    format_time_lambda,
 16    if_sql,
 17    inline_array_sql,
 18    json_keyvalue_comma_sql,
 19    max_or_greatest,
 20    min_or_least,
 21    no_ilike_sql,
 22    parse_date_delta_with_interval,
 23    regexp_replace_sql,
 24    rename_func,
 25    timestrtotime_sql,
 26    ts_or_ds_to_date_sql,
 27)
 28from sqlglot.helper import seq_get, split_num_words
 29from sqlglot.tokens import TokenType
 30
 31logger = logging.getLogger("sqlglot")
 32
 33
 34def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
 35    if not expression.find_ancestor(exp.From, exp.Join):
 36        return self.values_sql(expression)
 37
 38    alias = expression.args.get("alias")
 39
 40    structs = [
 41        exp.Struct(
 42            expressions=[
 43                exp.alias_(value, column_name)
 44                for value, column_name in zip(
 45                    t.expressions,
 46                    alias.columns
 47                    if alias and alias.columns
 48                    else (f"_c{i}" for i in range(len(t.expressions))),
 49                )
 50            ]
 51        )
 52        for t in expression.find_all(exp.Tuple)
 53    ]
 54
 55    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 56
 57
 58def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
 59    this = expression.this
 60    if isinstance(this, exp.Schema):
 61        this = f"{this.this} <{self.expressions(this)}>"
 62    else:
 63        this = self.sql(this)
 64    return f"RETURNS {this}"
 65
 66
 67def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
 68    kind = expression.args["kind"]
 69    returns = expression.find(exp.ReturnsProperty)
 70
 71    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 72        expression = expression.copy()
 73        expression.set("kind", "TABLE FUNCTION")
 74
 75        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 76            expression.set("expression", expression.expression.this)
 77
 78        return self.create_sql(expression)
 79
 80    return self.create_sql(expression)
 81
 82
 83def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 84    """Remove references to unnest table aliases since bigquery doesn't allow them.
 85
 86    These are added by the optimizer's qualify_column step.
 87    """
 88    from sqlglot.optimizer.scope import find_all_in_scope
 89
 90    if isinstance(expression, exp.Select):
 91        unnest_aliases = {
 92            unnest.alias
 93            for unnest in find_all_in_scope(expression, exp.Unnest)
 94            if isinstance(unnest.parent, (exp.From, exp.Join))
 95        }
 96        if unnest_aliases:
 97            for column in expression.find_all(exp.Column):
 98                if column.table in unnest_aliases:
 99                    column.set("table", None)
100                elif column.db in unnest_aliases:
101                    column.set("db", None)
102
103    return expression
104
105
106# https://issuetracker.google.com/issues/162294746
107# workaround for bigquery bug when grouping by an expression and then ordering
108# WITH x AS (SELECT 1 y)
109# SELECT y + 1 z
110# FROM x
111# GROUP BY x + 1
112# ORDER by z
113def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
114    if isinstance(expression, exp.Select):
115        group = expression.args.get("group")
116        order = expression.args.get("order")
117
118        if group and order:
119            aliases = {
120                select.this: select.args["alias"]
121                for select in expression.selects
122                if isinstance(select, exp.Alias)
123            }
124
125            for e in group.expressions:
126                alias = aliases.get(e)
127
128                if alias:
129                    e.replace(exp.column(alias))
130
131    return expression
132
133
134def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
135    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
136    if isinstance(expression, exp.CTE) and expression.alias_column_names:
137        cte_query = expression.this
138
139        if cte_query.is_star:
140            logger.warning(
141                "Can't push down CTE column names for star queries. Run the query through"
142                " the optimizer or use 'qualify' to expand the star projections first."
143            )
144            return expression
145
146        column_names = expression.alias_column_names
147        expression.args["alias"].set("columns", None)
148
149        for name, select in zip(column_names, cte_query.selects):
150            to_replace = select
151
152            if isinstance(select, exp.Alias):
153                select = select.this
154
155            # Inner aliases are shadowed by the CTE column names
156            to_replace.replace(exp.alias_(select, name))
157
158    return expression
159
160
161def _parse_timestamp(args: t.List) -> exp.StrToTime:
162    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
163    this.set("zone", seq_get(args, 2))
164    return this
165
166
167def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
168    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
169    return expr_type.from_arg_list(args)
170
171
172def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5:
173    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
174    arg = seq_get(args, 0)
175    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg)
176
177
178class BigQuery(Dialect):
179    UNNEST_COLUMN_ONLY = True
180    SUPPORTS_USER_DEFINED_TYPES = False
181    SUPPORTS_SEMI_ANTI_JOIN = False
182    LOG_BASE_FIRST = False
183
184    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
185    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
186
187    # bigquery udfs are case sensitive
188    NORMALIZE_FUNCTIONS = False
189
190    TIME_MAPPING = {
191        "%D": "%m/%d/%y",
192    }
193
194    ESCAPE_SEQUENCES = {
195        "\\a": "\a",
196        "\\b": "\b",
197        "\\f": "\f",
198        "\\n": "\n",
199        "\\r": "\r",
200        "\\t": "\t",
201        "\\v": "\v",
202    }
203
204    FORMAT_MAPPING = {
205        "DD": "%d",
206        "MM": "%m",
207        "MON": "%b",
208        "MONTH": "%B",
209        "YYYY": "%Y",
210        "YY": "%y",
211        "HH": "%I",
212        "HH12": "%I",
213        "HH24": "%H",
214        "MI": "%M",
215        "SS": "%S",
216        "SSSSS": "%f",
217        "TZH": "%z",
218    }
219
220    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
221    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
222    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
223
224    @classmethod
225    def normalize_identifier(cls, expression: E) -> E:
226        if isinstance(expression, exp.Identifier):
227            parent = expression.parent
228            while isinstance(parent, exp.Dot):
229                parent = parent.parent
230
231            # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
232            # The following check is essentially a heuristic to detect tables based on whether or
233            # not they're qualified. It also avoids normalizing UDFs, because they're case-sensitive.
234            if (
235                not isinstance(parent, exp.UserDefinedFunction)
236                and not (isinstance(parent, exp.Table) and parent.db)
237                and not expression.meta.get("is_table")
238            ):
239                expression.set("this", expression.this.lower())
240
241        return expression
242
243    class Tokenizer(tokens.Tokenizer):
244        QUOTES = ["'", '"', '"""', "'''"]
245        COMMENTS = ["--", "#", ("/*", "*/")]
246        IDENTIFIERS = ["`"]
247        STRING_ESCAPES = ["\\"]
248
249        HEX_STRINGS = [("0x", ""), ("0X", "")]
250
251        BYTE_STRINGS = [
252            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
253        ]
254
255        RAW_STRINGS = [
256            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
257        ]
258
259        KEYWORDS = {
260            **tokens.Tokenizer.KEYWORDS,
261            "ANY TYPE": TokenType.VARIANT,
262            "BEGIN": TokenType.COMMAND,
263            "BEGIN TRANSACTION": TokenType.BEGIN,
264            "BYTES": TokenType.BINARY,
265            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
266            "DECLARE": TokenType.COMMAND,
267            "FLOAT64": TokenType.DOUBLE,
268            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
269            "INT64": TokenType.BIGINT,
270            "MODEL": TokenType.MODEL,
271            "NOT DETERMINISTIC": TokenType.VOLATILE,
272            "RECORD": TokenType.STRUCT,
273            "TIMESTAMP": TokenType.TIMESTAMPTZ,
274        }
275        KEYWORDS.pop("DIV")
276
277    class Parser(parser.Parser):
278        PREFIXED_PIVOT_COLUMNS = True
279
280        LOG_DEFAULTS_TO_LN = True
281
282        FUNCTIONS = {
283            **parser.Parser.FUNCTIONS,
284            "DATE": _parse_date,
285            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
286            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
287            "DATE_TRUNC": lambda args: exp.DateTrunc(
288                unit=exp.Literal.string(str(seq_get(args, 1))),
289                this=seq_get(args, 0),
290            ),
291            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
292            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
293            "DIV": binary_from_function(exp.IntDiv),
294            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
295            "MD5": exp.MD5Digest.from_arg_list,
296            "TO_HEX": _parse_to_hex,
297            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
298                [seq_get(args, 1), seq_get(args, 0)]
299            ),
300            "PARSE_TIMESTAMP": _parse_timestamp,
301            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
302            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
303                this=seq_get(args, 0),
304                expression=seq_get(args, 1),
305                position=seq_get(args, 2),
306                occurrence=seq_get(args, 3),
307                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
308            ),
309            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
310            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
311            "SPLIT": lambda args: exp.Split(
312                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
313                this=seq_get(args, 0),
314                expression=seq_get(args, 1) or exp.Literal.string(","),
315            ),
316            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
317            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
318            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
319            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
320            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
321        }
322
323        FUNCTION_PARSERS = {
324            **parser.Parser.FUNCTION_PARSERS,
325            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
326        }
327        FUNCTION_PARSERS.pop("TRIM")
328
329        NO_PAREN_FUNCTIONS = {
330            **parser.Parser.NO_PAREN_FUNCTIONS,
331            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
332        }
333
334        NESTED_TYPE_TOKENS = {
335            *parser.Parser.NESTED_TYPE_TOKENS,
336            TokenType.TABLE,
337        }
338
339        ID_VAR_TOKENS = {
340            *parser.Parser.ID_VAR_TOKENS,
341            TokenType.VALUES,
342        }
343
344        PROPERTY_PARSERS = {
345            **parser.Parser.PROPERTY_PARSERS,
346            "NOT DETERMINISTIC": lambda self: self.expression(
347                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
348            ),
349            "OPTIONS": lambda self: self._parse_with_property(),
350        }
351
352        CONSTRAINT_PARSERS = {
353            **parser.Parser.CONSTRAINT_PARSERS,
354            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
355        }
356
357        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
358        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
359
360        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
361
362        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
363            this = super()._parse_table_part(schema=schema) or self._parse_number()
364
365            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
366            if isinstance(this, exp.Identifier):
367                table_name = this.name
368                while self._match(TokenType.DASH, advance=False) and self._next:
369                    self._advance(2)
370                    table_name += f"-{self._prev.text}"
371
372                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
373            elif isinstance(this, exp.Literal):
374                table_name = this.name
375
376                if (
377                    self._curr
378                    and self._prev.end == self._curr.start - 1
379                    and self._parse_var(any_token=True)
380                ):
381                    table_name += self._prev.text
382
383                this = exp.Identifier(this=table_name, quoted=True)
384
385            return this
386
387        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
388            table = super()._parse_table_parts(schema=schema)
389            if isinstance(table.this, exp.Identifier) and "." in table.name:
390                catalog, db, this, *rest = (
391                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
392                    for x in split_num_words(table.name, ".", 3)
393                )
394
395                if rest and this:
396                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
397
398                table = exp.Table(this=this, db=db, catalog=catalog)
399
400            return table
401
402        def _parse_json_object(self) -> exp.JSONObject:
403            json_object = super()._parse_json_object()
404            array_kv_pair = seq_get(json_object.expressions, 0)
405
406            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
407            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
408            if (
409                array_kv_pair
410                and isinstance(array_kv_pair.this, exp.Array)
411                and isinstance(array_kv_pair.expression, exp.Array)
412            ):
413                keys = array_kv_pair.this.expressions
414                values = array_kv_pair.expression.expressions
415
416                json_object.set(
417                    "expressions",
418                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
419                )
420
421            return json_object
422
423    class Generator(generator.Generator):
424        EXPLICIT_UNION = True
425        INTERVAL_ALLOWS_PLURAL_FORM = False
426        JOIN_HINTS = False
427        QUERY_HINTS = False
428        TABLE_HINTS = False
429        LIMIT_FETCH = "LIMIT"
430        RENAME_TABLE_WITH_DB = False
431        NVL2_SUPPORTED = False
432        UNNEST_WITH_ORDINALITY = False
433        COLLATE_IS_FUNC = True
434
435        TRANSFORMS = {
436            **generator.Generator.TRANSFORMS,
437            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
438            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
439            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
440            exp.ArraySize: rename_func("ARRAY_LENGTH"),
441            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
442            exp.CollateProperty: lambda self, e: f"DEFAULT COLLATE {self.sql(e, 'this')}"
443            if e.args.get("default")
444            else f"COLLATE {self.sql(e, 'this')}",
445            exp.Create: _create_sql,
446            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
447            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
448            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
449            exp.DateFromParts: rename_func("DATE"),
450            exp.DateStrToDate: datestrtodate_sql,
451            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
452            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
453            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
454            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
455            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
456            exp.GroupConcat: rename_func("STRING_AGG"),
457            exp.Hex: rename_func("TO_HEX"),
458            exp.If: if_sql(false_value="NULL"),
459            exp.ILike: no_ilike_sql,
460            exp.IntDiv: rename_func("DIV"),
461            exp.JSONFormat: rename_func("TO_JSON_STRING"),
462            exp.JSONKeyValue: json_keyvalue_comma_sql,
463            exp.Max: max_or_greatest,
464            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
465            exp.MD5Digest: rename_func("MD5"),
466            exp.Min: min_or_least,
467            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
468            exp.RegexpExtract: lambda self, e: self.func(
469                "REGEXP_EXTRACT",
470                e.this,
471                e.expression,
472                e.args.get("position"),
473                e.args.get("occurrence"),
474            ),
475            exp.RegexpReplace: regexp_replace_sql,
476            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
477            exp.ReturnsProperty: _returnsproperty_sql,
478            exp.Select: transforms.preprocess(
479                [
480                    transforms.explode_to_unnest(),
481                    _unqualify_unnest,
482                    transforms.eliminate_distinct_on,
483                    _alias_ordered_group,
484                    transforms.eliminate_semi_and_anti_joins,
485                ]
486            ),
487            exp.SHA2: lambda self, e: self.func(
488                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
489            ),
490            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
491            if e.name == "IMMUTABLE"
492            else "NOT DETERMINISTIC",
493            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
494            exp.StrToTime: lambda self, e: self.func(
495                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
496            ),
497            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
498            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
499            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
500            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
501            exp.TimeStrToTime: timestrtotime_sql,
502            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
503            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
504            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
505            exp.Unhex: rename_func("FROM_HEX"),
506            exp.Values: _derived_table_values_to_unnest,
507            exp.VariancePop: rename_func("VAR_POP"),
508        }
509
510        TYPE_MAPPING = {
511            **generator.Generator.TYPE_MAPPING,
512            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
513            exp.DataType.Type.BIGINT: "INT64",
514            exp.DataType.Type.BINARY: "BYTES",
515            exp.DataType.Type.BOOLEAN: "BOOL",
516            exp.DataType.Type.CHAR: "STRING",
517            exp.DataType.Type.DECIMAL: "NUMERIC",
518            exp.DataType.Type.DOUBLE: "FLOAT64",
519            exp.DataType.Type.FLOAT: "FLOAT64",
520            exp.DataType.Type.INT: "INT64",
521            exp.DataType.Type.NCHAR: "STRING",
522            exp.DataType.Type.NVARCHAR: "STRING",
523            exp.DataType.Type.SMALLINT: "INT64",
524            exp.DataType.Type.TEXT: "STRING",
525            exp.DataType.Type.TIMESTAMP: "DATETIME",
526            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
527            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
528            exp.DataType.Type.TINYINT: "INT64",
529            exp.DataType.Type.VARBINARY: "BYTES",
530            exp.DataType.Type.VARCHAR: "STRING",
531            exp.DataType.Type.VARIANT: "ANY TYPE",
532        }
533
534        PROPERTIES_LOCATION = {
535            **generator.Generator.PROPERTIES_LOCATION,
536            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
537            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
538        }
539
540        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
541        RESERVED_KEYWORDS = {
542            *generator.Generator.RESERVED_KEYWORDS,
543            "all",
544            "and",
545            "any",
546            "array",
547            "as",
548            "asc",
549            "assert_rows_modified",
550            "at",
551            "between",
552            "by",
553            "case",
554            "cast",
555            "collate",
556            "contains",
557            "create",
558            "cross",
559            "cube",
560            "current",
561            "default",
562            "define",
563            "desc",
564            "distinct",
565            "else",
566            "end",
567            "enum",
568            "escape",
569            "except",
570            "exclude",
571            "exists",
572            "extract",
573            "false",
574            "fetch",
575            "following",
576            "for",
577            "from",
578            "full",
579            "group",
580            "grouping",
581            "groups",
582            "hash",
583            "having",
584            "if",
585            "ignore",
586            "in",
587            "inner",
588            "intersect",
589            "interval",
590            "into",
591            "is",
592            "join",
593            "lateral",
594            "left",
595            "like",
596            "limit",
597            "lookup",
598            "merge",
599            "natural",
600            "new",
601            "no",
602            "not",
603            "null",
604            "nulls",
605            "of",
606            "on",
607            "or",
608            "order",
609            "outer",
610            "over",
611            "partition",
612            "preceding",
613            "proto",
614            "qualify",
615            "range",
616            "recursive",
617            "respect",
618            "right",
619            "rollup",
620            "rows",
621            "select",
622            "set",
623            "some",
624            "struct",
625            "tablesample",
626            "then",
627            "to",
628            "treat",
629            "true",
630            "unbounded",
631            "union",
632            "unnest",
633            "using",
634            "when",
635            "where",
636            "window",
637            "with",
638            "within",
639        }
640
641        def eq_sql(self, expression: exp.EQ) -> str:
642            # Operands of = cannot be NULL in BigQuery
643            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
644                return "NULL"
645
646            return self.binary(expression, "=")
647
648        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
649            parent = expression.parent
650
651            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
652            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
653            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
654                return self.func(
655                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
656                )
657
658            return super().attimezone_sql(expression)
659
660        def trycast_sql(self, expression: exp.TryCast) -> str:
661            return self.cast_sql(expression, safe_prefix="SAFE_")
662
663        def cte_sql(self, expression: exp.CTE) -> str:
664            if expression.alias_column_names:
665                self.unsupported("Column names in CTE definition are not supported.")
666            return super().cte_sql(expression)
667
668        def array_sql(self, expression: exp.Array) -> str:
669            first_arg = seq_get(expression.expressions, 0)
670            if isinstance(first_arg, exp.Subqueryable):
671                return f"ARRAY{self.wrap(self.sql(first_arg))}"
672
673            return inline_array_sql(self, expression)
674
675        def transaction_sql(self, *_) -> str:
676            return "BEGIN TRANSACTION"
677
678        def commit_sql(self, *_) -> str:
679            return "COMMIT TRANSACTION"
680
681        def rollback_sql(self, *_) -> str:
682            return "ROLLBACK TRANSACTION"
683
684        def in_unnest_op(self, expression: exp.Unnest) -> str:
685            return self.sql(expression)
686
687        def except_op(self, expression: exp.Except) -> str:
688            if not expression.args.get("distinct", False):
689                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
690            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
691
692        def intersect_op(self, expression: exp.Intersect) -> str:
693            if not expression.args.get("distinct", False):
694                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
695            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
696
697        def with_properties(self, properties: exp.Properties) -> str:
698            return self.properties(properties, prefix=self.seg("OPTIONS"))
699
700        def version_sql(self, expression: exp.Version) -> str:
701            if expression.name == "TIMESTAMP":
702                expression = expression.copy()
703                expression.set("this", "SYSTEM_TIME")
704            return super().version_sql(expression)
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
179class BigQuery(Dialect):
180    UNNEST_COLUMN_ONLY = True
181    SUPPORTS_USER_DEFINED_TYPES = False
182    SUPPORTS_SEMI_ANTI_JOIN = False
183    LOG_BASE_FIRST = False
184
185    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
186    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
187
188    # bigquery udfs are case sensitive
189    NORMALIZE_FUNCTIONS = False
190
191    TIME_MAPPING = {
192        "%D": "%m/%d/%y",
193    }
194
195    ESCAPE_SEQUENCES = {
196        "\\a": "\a",
197        "\\b": "\b",
198        "\\f": "\f",
199        "\\n": "\n",
200        "\\r": "\r",
201        "\\t": "\t",
202        "\\v": "\v",
203    }
204
205    FORMAT_MAPPING = {
206        "DD": "%d",
207        "MM": "%m",
208        "MON": "%b",
209        "MONTH": "%B",
210        "YYYY": "%Y",
211        "YY": "%y",
212        "HH": "%I",
213        "HH12": "%I",
214        "HH24": "%H",
215        "MI": "%M",
216        "SS": "%S",
217        "SSSSS": "%f",
218        "TZH": "%z",
219    }
220
221    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
222    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
223    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
224
225    @classmethod
226    def normalize_identifier(cls, expression: E) -> E:
227        if isinstance(expression, exp.Identifier):
228            parent = expression.parent
229            while isinstance(parent, exp.Dot):
230                parent = parent.parent
231
232            # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
233            # The following check is essentially a heuristic to detect tables based on whether or
234            # not they're qualified. It also avoids normalizing UDFs, because they're case-sensitive.
235            if (
236                not isinstance(parent, exp.UserDefinedFunction)
237                and not (isinstance(parent, exp.Table) and parent.db)
238                and not expression.meta.get("is_table")
239            ):
240                expression.set("this", expression.this.lower())
241
242        return expression
243
244    class Tokenizer(tokens.Tokenizer):
245        QUOTES = ["'", '"', '"""', "'''"]
246        COMMENTS = ["--", "#", ("/*", "*/")]
247        IDENTIFIERS = ["`"]
248        STRING_ESCAPES = ["\\"]
249
250        HEX_STRINGS = [("0x", ""), ("0X", "")]
251
252        BYTE_STRINGS = [
253            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
254        ]
255
256        RAW_STRINGS = [
257            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
258        ]
259
260        KEYWORDS = {
261            **tokens.Tokenizer.KEYWORDS,
262            "ANY TYPE": TokenType.VARIANT,
263            "BEGIN": TokenType.COMMAND,
264            "BEGIN TRANSACTION": TokenType.BEGIN,
265            "BYTES": TokenType.BINARY,
266            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
267            "DECLARE": TokenType.COMMAND,
268            "FLOAT64": TokenType.DOUBLE,
269            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
270            "INT64": TokenType.BIGINT,
271            "MODEL": TokenType.MODEL,
272            "NOT DETERMINISTIC": TokenType.VOLATILE,
273            "RECORD": TokenType.STRUCT,
274            "TIMESTAMP": TokenType.TIMESTAMPTZ,
275        }
276        KEYWORDS.pop("DIV")
277
278    class Parser(parser.Parser):
279        PREFIXED_PIVOT_COLUMNS = True
280
281        LOG_DEFAULTS_TO_LN = True
282
283        FUNCTIONS = {
284            **parser.Parser.FUNCTIONS,
285            "DATE": _parse_date,
286            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
287            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
288            "DATE_TRUNC": lambda args: exp.DateTrunc(
289                unit=exp.Literal.string(str(seq_get(args, 1))),
290                this=seq_get(args, 0),
291            ),
292            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
293            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
294            "DIV": binary_from_function(exp.IntDiv),
295            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
296            "MD5": exp.MD5Digest.from_arg_list,
297            "TO_HEX": _parse_to_hex,
298            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
299                [seq_get(args, 1), seq_get(args, 0)]
300            ),
301            "PARSE_TIMESTAMP": _parse_timestamp,
302            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
303            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1),
306                position=seq_get(args, 2),
307                occurrence=seq_get(args, 3),
308                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
309            ),
310            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
311            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
312            "SPLIT": lambda args: exp.Split(
313                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
314                this=seq_get(args, 0),
315                expression=seq_get(args, 1) or exp.Literal.string(","),
316            ),
317            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
318            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
319            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
320            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
321            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
322        }
323
324        FUNCTION_PARSERS = {
325            **parser.Parser.FUNCTION_PARSERS,
326            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
327        }
328        FUNCTION_PARSERS.pop("TRIM")
329
330        NO_PAREN_FUNCTIONS = {
331            **parser.Parser.NO_PAREN_FUNCTIONS,
332            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
333        }
334
335        NESTED_TYPE_TOKENS = {
336            *parser.Parser.NESTED_TYPE_TOKENS,
337            TokenType.TABLE,
338        }
339
340        ID_VAR_TOKENS = {
341            *parser.Parser.ID_VAR_TOKENS,
342            TokenType.VALUES,
343        }
344
345        PROPERTY_PARSERS = {
346            **parser.Parser.PROPERTY_PARSERS,
347            "NOT DETERMINISTIC": lambda self: self.expression(
348                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
349            ),
350            "OPTIONS": lambda self: self._parse_with_property(),
351        }
352
353        CONSTRAINT_PARSERS = {
354            **parser.Parser.CONSTRAINT_PARSERS,
355            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
356        }
357
358        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
359        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
360
361        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
362
363        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
364            this = super()._parse_table_part(schema=schema) or self._parse_number()
365
366            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
367            if isinstance(this, exp.Identifier):
368                table_name = this.name
369                while self._match(TokenType.DASH, advance=False) and self._next:
370                    self._advance(2)
371                    table_name += f"-{self._prev.text}"
372
373                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
374            elif isinstance(this, exp.Literal):
375                table_name = this.name
376
377                if (
378                    self._curr
379                    and self._prev.end == self._curr.start - 1
380                    and self._parse_var(any_token=True)
381                ):
382                    table_name += self._prev.text
383
384                this = exp.Identifier(this=table_name, quoted=True)
385
386            return this
387
388        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
389            table = super()._parse_table_parts(schema=schema)
390            if isinstance(table.this, exp.Identifier) and "." in table.name:
391                catalog, db, this, *rest = (
392                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
393                    for x in split_num_words(table.name, ".", 3)
394                )
395
396                if rest and this:
397                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
398
399                table = exp.Table(this=this, db=db, catalog=catalog)
400
401            return table
402
403        def _parse_json_object(self) -> exp.JSONObject:
404            json_object = super()._parse_json_object()
405            array_kv_pair = seq_get(json_object.expressions, 0)
406
407            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
408            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
409            if (
410                array_kv_pair
411                and isinstance(array_kv_pair.this, exp.Array)
412                and isinstance(array_kv_pair.expression, exp.Array)
413            ):
414                keys = array_kv_pair.this.expressions
415                values = array_kv_pair.expression.expressions
416
417                json_object.set(
418                    "expressions",
419                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
420                )
421
422            return json_object
423
424    class Generator(generator.Generator):
425        EXPLICIT_UNION = True
426        INTERVAL_ALLOWS_PLURAL_FORM = False
427        JOIN_HINTS = False
428        QUERY_HINTS = False
429        TABLE_HINTS = False
430        LIMIT_FETCH = "LIMIT"
431        RENAME_TABLE_WITH_DB = False
432        NVL2_SUPPORTED = False
433        UNNEST_WITH_ORDINALITY = False
434        COLLATE_IS_FUNC = True
435
436        TRANSFORMS = {
437            **generator.Generator.TRANSFORMS,
438            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
439            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
440            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
441            exp.ArraySize: rename_func("ARRAY_LENGTH"),
442            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
443            exp.CollateProperty: lambda self, e: f"DEFAULT COLLATE {self.sql(e, 'this')}"
444            if e.args.get("default")
445            else f"COLLATE {self.sql(e, 'this')}",
446            exp.Create: _create_sql,
447            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
448            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
449            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
450            exp.DateFromParts: rename_func("DATE"),
451            exp.DateStrToDate: datestrtodate_sql,
452            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
453            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
454            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
455            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
456            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
457            exp.GroupConcat: rename_func("STRING_AGG"),
458            exp.Hex: rename_func("TO_HEX"),
459            exp.If: if_sql(false_value="NULL"),
460            exp.ILike: no_ilike_sql,
461            exp.IntDiv: rename_func("DIV"),
462            exp.JSONFormat: rename_func("TO_JSON_STRING"),
463            exp.JSONKeyValue: json_keyvalue_comma_sql,
464            exp.Max: max_or_greatest,
465            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
466            exp.MD5Digest: rename_func("MD5"),
467            exp.Min: min_or_least,
468            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
469            exp.RegexpExtract: lambda self, e: self.func(
470                "REGEXP_EXTRACT",
471                e.this,
472                e.expression,
473                e.args.get("position"),
474                e.args.get("occurrence"),
475            ),
476            exp.RegexpReplace: regexp_replace_sql,
477            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
478            exp.ReturnsProperty: _returnsproperty_sql,
479            exp.Select: transforms.preprocess(
480                [
481                    transforms.explode_to_unnest(),
482                    _unqualify_unnest,
483                    transforms.eliminate_distinct_on,
484                    _alias_ordered_group,
485                    transforms.eliminate_semi_and_anti_joins,
486                ]
487            ),
488            exp.SHA2: lambda self, e: self.func(
489                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
490            ),
491            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
492            if e.name == "IMMUTABLE"
493            else "NOT DETERMINISTIC",
494            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
495            exp.StrToTime: lambda self, e: self.func(
496                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
497            ),
498            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
499            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
500            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
501            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
502            exp.TimeStrToTime: timestrtotime_sql,
503            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
504            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
505            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
506            exp.Unhex: rename_func("FROM_HEX"),
507            exp.Values: _derived_table_values_to_unnest,
508            exp.VariancePop: rename_func("VAR_POP"),
509        }
510
511        TYPE_MAPPING = {
512            **generator.Generator.TYPE_MAPPING,
513            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
514            exp.DataType.Type.BIGINT: "INT64",
515            exp.DataType.Type.BINARY: "BYTES",
516            exp.DataType.Type.BOOLEAN: "BOOL",
517            exp.DataType.Type.CHAR: "STRING",
518            exp.DataType.Type.DECIMAL: "NUMERIC",
519            exp.DataType.Type.DOUBLE: "FLOAT64",
520            exp.DataType.Type.FLOAT: "FLOAT64",
521            exp.DataType.Type.INT: "INT64",
522            exp.DataType.Type.NCHAR: "STRING",
523            exp.DataType.Type.NVARCHAR: "STRING",
524            exp.DataType.Type.SMALLINT: "INT64",
525            exp.DataType.Type.TEXT: "STRING",
526            exp.DataType.Type.TIMESTAMP: "DATETIME",
527            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
528            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
529            exp.DataType.Type.TINYINT: "INT64",
530            exp.DataType.Type.VARBINARY: "BYTES",
531            exp.DataType.Type.VARCHAR: "STRING",
532            exp.DataType.Type.VARIANT: "ANY TYPE",
533        }
534
535        PROPERTIES_LOCATION = {
536            **generator.Generator.PROPERTIES_LOCATION,
537            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
538            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
539        }
540
541        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
542        RESERVED_KEYWORDS = {
543            *generator.Generator.RESERVED_KEYWORDS,
544            "all",
545            "and",
546            "any",
547            "array",
548            "as",
549            "asc",
550            "assert_rows_modified",
551            "at",
552            "between",
553            "by",
554            "case",
555            "cast",
556            "collate",
557            "contains",
558            "create",
559            "cross",
560            "cube",
561            "current",
562            "default",
563            "define",
564            "desc",
565            "distinct",
566            "else",
567            "end",
568            "enum",
569            "escape",
570            "except",
571            "exclude",
572            "exists",
573            "extract",
574            "false",
575            "fetch",
576            "following",
577            "for",
578            "from",
579            "full",
580            "group",
581            "grouping",
582            "groups",
583            "hash",
584            "having",
585            "if",
586            "ignore",
587            "in",
588            "inner",
589            "intersect",
590            "interval",
591            "into",
592            "is",
593            "join",
594            "lateral",
595            "left",
596            "like",
597            "limit",
598            "lookup",
599            "merge",
600            "natural",
601            "new",
602            "no",
603            "not",
604            "null",
605            "nulls",
606            "of",
607            "on",
608            "or",
609            "order",
610            "outer",
611            "over",
612            "partition",
613            "preceding",
614            "proto",
615            "qualify",
616            "range",
617            "recursive",
618            "respect",
619            "right",
620            "rollup",
621            "rows",
622            "select",
623            "set",
624            "some",
625            "struct",
626            "tablesample",
627            "then",
628            "to",
629            "treat",
630            "true",
631            "unbounded",
632            "union",
633            "unnest",
634            "using",
635            "when",
636            "where",
637            "window",
638            "with",
639            "within",
640        }
641
642        def eq_sql(self, expression: exp.EQ) -> str:
643            # Operands of = cannot be NULL in BigQuery
644            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
645                return "NULL"
646
647            return self.binary(expression, "=")
648
649        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
650            parent = expression.parent
651
652            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
653            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
654            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
655                return self.func(
656                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
657                )
658
659            return super().attimezone_sql(expression)
660
661        def trycast_sql(self, expression: exp.TryCast) -> str:
662            return self.cast_sql(expression, safe_prefix="SAFE_")
663
664        def cte_sql(self, expression: exp.CTE) -> str:
665            if expression.alias_column_names:
666                self.unsupported("Column names in CTE definition are not supported.")
667            return super().cte_sql(expression)
668
669        def array_sql(self, expression: exp.Array) -> str:
670            first_arg = seq_get(expression.expressions, 0)
671            if isinstance(first_arg, exp.Subqueryable):
672                return f"ARRAY{self.wrap(self.sql(first_arg))}"
673
674            return inline_array_sql(self, expression)
675
676        def transaction_sql(self, *_) -> str:
677            return "BEGIN TRANSACTION"
678
679        def commit_sql(self, *_) -> str:
680            return "COMMIT TRANSACTION"
681
682        def rollback_sql(self, *_) -> str:
683            return "ROLLBACK TRANSACTION"
684
685        def in_unnest_op(self, expression: exp.Unnest) -> str:
686            return self.sql(expression)
687
688        def except_op(self, expression: exp.Except) -> str:
689            if not expression.args.get("distinct", False):
690                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
691            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
692
693        def intersect_op(self, expression: exp.Intersect) -> str:
694            if not expression.args.get("distinct", False):
695                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
696            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
697
698        def with_properties(self, properties: exp.Properties) -> str:
699            return self.properties(properties, prefix=self.seg("OPTIONS"))
700
701        def version_sql(self, expression: exp.Version) -> str:
702            if expression.name == "TIMESTAMP":
703                expression = expression.copy()
704                expression.set("this", "SYSTEM_TIME")
705            return super().version_sql(expression)
UNNEST_COLUMN_ONLY = True
SUPPORTS_USER_DEFINED_TYPES = False
SUPPORTS_SEMI_ANTI_JOIN = False
LOG_BASE_FIRST = False
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
ESCAPE_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
PSEUDOCOLUMNS: Set[str] = {'_PARTITIONTIME', '_PARTITIONDATE'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
225    @classmethod
226    def normalize_identifier(cls, expression: E) -> E:
227        if isinstance(expression, exp.Identifier):
228            parent = expression.parent
229            while isinstance(parent, exp.Dot):
230                parent = parent.parent
231
232            # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
233            # The following check is essentially a heuristic to detect tables based on whether or
234            # not they're qualified. It also avoids normalizing UDFs, because they're case-sensitive.
235            if (
236                not isinstance(parent, exp.UserDefinedFunction)
237                and not (isinstance(parent, exp.Table) and parent.db)
238                and not expression.meta.get("is_table")
239            ):
240                expression.set("this", expression.this.lower())
241
242        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized to lowercase regardless of being quoted or not.

tokenizer_class = <class 'BigQuery.Tokenizer'>
parser_class = <class 'BigQuery.Parser'>
generator_class = <class 'BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
244    class Tokenizer(tokens.Tokenizer):
245        QUOTES = ["'", '"', '"""', "'''"]
246        COMMENTS = ["--", "#", ("/*", "*/")]
247        IDENTIFIERS = ["`"]
248        STRING_ESCAPES = ["\\"]
249
250        HEX_STRINGS = [("0x", ""), ("0X", "")]
251
252        BYTE_STRINGS = [
253            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
254        ]
255
256        RAW_STRINGS = [
257            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
258        ]
259
260        KEYWORDS = {
261            **tokens.Tokenizer.KEYWORDS,
262            "ANY TYPE": TokenType.VARIANT,
263            "BEGIN": TokenType.COMMAND,
264            "BEGIN TRANSACTION": TokenType.BEGIN,
265            "BYTES": TokenType.BINARY,
266            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
267            "DECLARE": TokenType.COMMAND,
268            "FLOAT64": TokenType.DOUBLE,
269            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
270            "INT64": TokenType.BIGINT,
271            "MODEL": TokenType.MODEL,
272            "NOT DETERMINISTIC": TokenType.VOLATILE,
273            "RECORD": TokenType.STRUCT,
274            "TIMESTAMP": TokenType.TIMESTAMPTZ,
275        }
276        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'MODEL': <TokenType.MODEL: 'MODEL'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>}
ESCAPE_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b'}
class BigQuery.Parser(sqlglot.parser.Parser):
278    class Parser(parser.Parser):
279        PREFIXED_PIVOT_COLUMNS = True
280
281        LOG_DEFAULTS_TO_LN = True
282
283        FUNCTIONS = {
284            **parser.Parser.FUNCTIONS,
285            "DATE": _parse_date,
286            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
287            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
288            "DATE_TRUNC": lambda args: exp.DateTrunc(
289                unit=exp.Literal.string(str(seq_get(args, 1))),
290                this=seq_get(args, 0),
291            ),
292            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
293            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
294            "DIV": binary_from_function(exp.IntDiv),
295            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
296            "MD5": exp.MD5Digest.from_arg_list,
297            "TO_HEX": _parse_to_hex,
298            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
299                [seq_get(args, 1), seq_get(args, 0)]
300            ),
301            "PARSE_TIMESTAMP": _parse_timestamp,
302            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
303            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1),
306                position=seq_get(args, 2),
307                occurrence=seq_get(args, 3),
308                group=exp.Literal.number(1) if re.compile(args[1].name).groups == 1 else None,
309            ),
310            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
311            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
312            "SPLIT": lambda args: exp.Split(
313                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
314                this=seq_get(args, 0),
315                expression=seq_get(args, 1) or exp.Literal.string(","),
316            ),
317            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
318            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
319            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
320            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
321            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
322        }
323
324        FUNCTION_PARSERS = {
325            **parser.Parser.FUNCTION_PARSERS,
326            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
327        }
328        FUNCTION_PARSERS.pop("TRIM")
329
330        NO_PAREN_FUNCTIONS = {
331            **parser.Parser.NO_PAREN_FUNCTIONS,
332            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
333        }
334
335        NESTED_TYPE_TOKENS = {
336            *parser.Parser.NESTED_TYPE_TOKENS,
337            TokenType.TABLE,
338        }
339
340        ID_VAR_TOKENS = {
341            *parser.Parser.ID_VAR_TOKENS,
342            TokenType.VALUES,
343        }
344
345        PROPERTY_PARSERS = {
346            **parser.Parser.PROPERTY_PARSERS,
347            "NOT DETERMINISTIC": lambda self: self.expression(
348                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
349            ),
350            "OPTIONS": lambda self: self._parse_with_property(),
351        }
352
353        CONSTRAINT_PARSERS = {
354            **parser.Parser.CONSTRAINT_PARSERS,
355            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
356        }
357
358        RANGE_PARSERS = parser.Parser.RANGE_PARSERS.copy()
359        RANGE_PARSERS.pop(TokenType.OVERLAPS, None)
360
361        NULL_TOKENS = {TokenType.NULL, TokenType.UNKNOWN}
362
363        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
364            this = super()._parse_table_part(schema=schema) or self._parse_number()
365
366            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
367            if isinstance(this, exp.Identifier):
368                table_name = this.name
369                while self._match(TokenType.DASH, advance=False) and self._next:
370                    self._advance(2)
371                    table_name += f"-{self._prev.text}"
372
373                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
374            elif isinstance(this, exp.Literal):
375                table_name = this.name
376
377                if (
378                    self._curr
379                    and self._prev.end == self._curr.start - 1
380                    and self._parse_var(any_token=True)
381                ):
382                    table_name += self._prev.text
383
384                this = exp.Identifier(this=table_name, quoted=True)
385
386            return this
387
388        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
389            table = super()._parse_table_parts(schema=schema)
390            if isinstance(table.this, exp.Identifier) and "." in table.name:
391                catalog, db, this, *rest = (
392                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
393                    for x in split_num_words(table.name, ".", 3)
394                )
395
396                if rest and this:
397                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
398
399                table = exp.Table(this=this, db=db, catalog=catalog)
400
401            return table
402
403        def _parse_json_object(self) -> exp.JSONObject:
404            json_object = super()._parse_json_object()
405            array_kv_pair = seq_get(json_object.expressions, 0)
406
407            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
408            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
409            if (
410                array_kv_pair
411                and isinstance(array_kv_pair.this, exp.Array)
412                and isinstance(array_kv_pair.expression, exp.Array)
413            ):
414                keys = array_kv_pair.this.expressions
415                values = array_kv_pair.expression.expressions
416
417                json_object.set(
418                    "expressions",
419                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
420                )
421
422            return json_object

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_DEFAULTS_TO_LN = True
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Flatten'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.MAP: 'MAP'>, <TokenType.TABLE: 'TABLE'>}
ID_VAR_TOKENS = {<TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DATE: 'DATE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.LEFT: 'LEFT'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.KILL: 'KILL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.USE: 'USE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.SEMI: 'SEMI'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INT256: 'INT256'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.SET: 'SET'>, <TokenType.CASE: 'CASE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.INET: 'INET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VAR: 'VAR'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DIV: 'DIV'>, <TokenType.UUID: 'UUID'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UINT: 'UINT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.APPLY: 'APPLY'>, <TokenType.FULL: 'FULL'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.ROW: 'ROW'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.NULL: 'NULL'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.VALUES: 'VALUES'>, <TokenType.DELETE: 'DELETE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.ANY: 'ANY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT: 'INT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NESTED: 'NESTED'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.INT128: 'INT128'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.IS: 'IS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.BIT: 'BIT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.BINARY: 'BINARY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.MONEY: 'MONEY'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SOME: 'SOME'>, <TokenType.XML: 'XML'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TOP: 'TOP'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.ALL: 'ALL'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TIME: 'TIME'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.END: 'END'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.ASC: 'ASC'>, <TokenType.VIEW: 'VIEW'>, <TokenType.TABLE: 'TABLE'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>}
NULL_TOKENS = {<TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.NULL: 'NULL'>}
TABLE_ALIAS_TOKENS = {<TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DATE: 'DATE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.KILL: 'KILL'>, <TokenType.FIRST: 'FIRST'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.USE: 'USE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.JSON: 'JSON'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SEMI: 'SEMI'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INT256: 'INT256'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.SET: 'SET'>, <TokenType.CASE: 'CASE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.INET: 'INET'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.VAR: 'VAR'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DIV: 'DIV'>, <TokenType.UUID: 'UUID'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.UINT: 'UINT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.ROW: 'ROW'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.NULL: 'NULL'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.DELETE: 'DELETE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.MAP: 'MAP'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.ANY: 'ANY'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.INT: 'INT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.NESTED: 'NESTED'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.INT128: 'INT128'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DESC: 'DESC'>, <TokenType.IS: 'IS'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.BIT: 'BIT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.BINARY: 'BINARY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.MONEY: 'MONEY'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.SOME: 'SOME'>, <TokenType.XML: 'XML'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TOP: 'TOP'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.ALL: 'ALL'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.TIME: 'TIME'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.UINT256: 'UINT256'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.END: 'END'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.ASC: 'ASC'>, <TokenType.VIEW: 'VIEW'>, <TokenType.TABLE: 'TABLE'>}
LOG_BASE_FIRST = False
TOKENIZER_CLASS: Type[sqlglot.tokens.Tokenizer] = <class 'BigQuery.Tokenizer'>
UNNEST_COLUMN_ONLY: bool = True
SUPPORTS_USER_DEFINED_TYPES = False
NORMALIZE_FUNCTIONS = False
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
424    class Generator(generator.Generator):
425        EXPLICIT_UNION = True
426        INTERVAL_ALLOWS_PLURAL_FORM = False
427        JOIN_HINTS = False
428        QUERY_HINTS = False
429        TABLE_HINTS = False
430        LIMIT_FETCH = "LIMIT"
431        RENAME_TABLE_WITH_DB = False
432        NVL2_SUPPORTED = False
433        UNNEST_WITH_ORDINALITY = False
434        COLLATE_IS_FUNC = True
435
436        TRANSFORMS = {
437            **generator.Generator.TRANSFORMS,
438            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
439            exp.ArgMax: arg_max_or_min_no_count("MAX_BY"),
440            exp.ArgMin: arg_max_or_min_no_count("MIN_BY"),
441            exp.ArraySize: rename_func("ARRAY_LENGTH"),
442            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
443            exp.CollateProperty: lambda self, e: f"DEFAULT COLLATE {self.sql(e, 'this')}"
444            if e.args.get("default")
445            else f"COLLATE {self.sql(e, 'this')}",
446            exp.Create: _create_sql,
447            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
448            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
449            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
450            exp.DateFromParts: rename_func("DATE"),
451            exp.DateStrToDate: datestrtodate_sql,
452            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
453            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
454            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
455            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
456            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
457            exp.GroupConcat: rename_func("STRING_AGG"),
458            exp.Hex: rename_func("TO_HEX"),
459            exp.If: if_sql(false_value="NULL"),
460            exp.ILike: no_ilike_sql,
461            exp.IntDiv: rename_func("DIV"),
462            exp.JSONFormat: rename_func("TO_JSON_STRING"),
463            exp.JSONKeyValue: json_keyvalue_comma_sql,
464            exp.Max: max_or_greatest,
465            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
466            exp.MD5Digest: rename_func("MD5"),
467            exp.Min: min_or_least,
468            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
469            exp.RegexpExtract: lambda self, e: self.func(
470                "REGEXP_EXTRACT",
471                e.this,
472                e.expression,
473                e.args.get("position"),
474                e.args.get("occurrence"),
475            ),
476            exp.RegexpReplace: regexp_replace_sql,
477            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
478            exp.ReturnsProperty: _returnsproperty_sql,
479            exp.Select: transforms.preprocess(
480                [
481                    transforms.explode_to_unnest(),
482                    _unqualify_unnest,
483                    transforms.eliminate_distinct_on,
484                    _alias_ordered_group,
485                    transforms.eliminate_semi_and_anti_joins,
486                ]
487            ),
488            exp.SHA2: lambda self, e: self.func(
489                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
490            ),
491            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
492            if e.name == "IMMUTABLE"
493            else "NOT DETERMINISTIC",
494            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
495            exp.StrToTime: lambda self, e: self.func(
496                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
497            ),
498            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
499            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
500            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
501            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
502            exp.TimeStrToTime: timestrtotime_sql,
503            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
504            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
505            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
506            exp.Unhex: rename_func("FROM_HEX"),
507            exp.Values: _derived_table_values_to_unnest,
508            exp.VariancePop: rename_func("VAR_POP"),
509        }
510
511        TYPE_MAPPING = {
512            **generator.Generator.TYPE_MAPPING,
513            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
514            exp.DataType.Type.BIGINT: "INT64",
515            exp.DataType.Type.BINARY: "BYTES",
516            exp.DataType.Type.BOOLEAN: "BOOL",
517            exp.DataType.Type.CHAR: "STRING",
518            exp.DataType.Type.DECIMAL: "NUMERIC",
519            exp.DataType.Type.DOUBLE: "FLOAT64",
520            exp.DataType.Type.FLOAT: "FLOAT64",
521            exp.DataType.Type.INT: "INT64",
522            exp.DataType.Type.NCHAR: "STRING",
523            exp.DataType.Type.NVARCHAR: "STRING",
524            exp.DataType.Type.SMALLINT: "INT64",
525            exp.DataType.Type.TEXT: "STRING",
526            exp.DataType.Type.TIMESTAMP: "DATETIME",
527            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
528            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
529            exp.DataType.Type.TINYINT: "INT64",
530            exp.DataType.Type.VARBINARY: "BYTES",
531            exp.DataType.Type.VARCHAR: "STRING",
532            exp.DataType.Type.VARIANT: "ANY TYPE",
533        }
534
535        PROPERTIES_LOCATION = {
536            **generator.Generator.PROPERTIES_LOCATION,
537            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
538            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
539        }
540
541        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
542        RESERVED_KEYWORDS = {
543            *generator.Generator.RESERVED_KEYWORDS,
544            "all",
545            "and",
546            "any",
547            "array",
548            "as",
549            "asc",
550            "assert_rows_modified",
551            "at",
552            "between",
553            "by",
554            "case",
555            "cast",
556            "collate",
557            "contains",
558            "create",
559            "cross",
560            "cube",
561            "current",
562            "default",
563            "define",
564            "desc",
565            "distinct",
566            "else",
567            "end",
568            "enum",
569            "escape",
570            "except",
571            "exclude",
572            "exists",
573            "extract",
574            "false",
575            "fetch",
576            "following",
577            "for",
578            "from",
579            "full",
580            "group",
581            "grouping",
582            "groups",
583            "hash",
584            "having",
585            "if",
586            "ignore",
587            "in",
588            "inner",
589            "intersect",
590            "interval",
591            "into",
592            "is",
593            "join",
594            "lateral",
595            "left",
596            "like",
597            "limit",
598            "lookup",
599            "merge",
600            "natural",
601            "new",
602            "no",
603            "not",
604            "null",
605            "nulls",
606            "of",
607            "on",
608            "or",
609            "order",
610            "outer",
611            "over",
612            "partition",
613            "preceding",
614            "proto",
615            "qualify",
616            "range",
617            "recursive",
618            "respect",
619            "right",
620            "rollup",
621            "rows",
622            "select",
623            "set",
624            "some",
625            "struct",
626            "tablesample",
627            "then",
628            "to",
629            "treat",
630            "true",
631            "unbounded",
632            "union",
633            "unnest",
634            "using",
635            "when",
636            "where",
637            "window",
638            "with",
639            "within",
640        }
641
642        def eq_sql(self, expression: exp.EQ) -> str:
643            # Operands of = cannot be NULL in BigQuery
644            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
645                return "NULL"
646
647            return self.binary(expression, "=")
648
649        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
650            parent = expression.parent
651
652            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
653            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
654            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
655                return self.func(
656                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
657                )
658
659            return super().attimezone_sql(expression)
660
661        def trycast_sql(self, expression: exp.TryCast) -> str:
662            return self.cast_sql(expression, safe_prefix="SAFE_")
663
664        def cte_sql(self, expression: exp.CTE) -> str:
665            if expression.alias_column_names:
666                self.unsupported("Column names in CTE definition are not supported.")
667            return super().cte_sql(expression)
668
669        def array_sql(self, expression: exp.Array) -> str:
670            first_arg = seq_get(expression.expressions, 0)
671            if isinstance(first_arg, exp.Subqueryable):
672                return f"ARRAY{self.wrap(self.sql(first_arg))}"
673
674            return inline_array_sql(self, expression)
675
676        def transaction_sql(self, *_) -> str:
677            return "BEGIN TRANSACTION"
678
679        def commit_sql(self, *_) -> str:
680            return "COMMIT TRANSACTION"
681
682        def rollback_sql(self, *_) -> str:
683            return "ROLLBACK TRANSACTION"
684
685        def in_unnest_op(self, expression: exp.Unnest) -> str:
686            return self.sql(expression)
687
688        def except_op(self, expression: exp.Except) -> str:
689            if not expression.args.get("distinct", False):
690                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
691            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
692
693        def intersect_op(self, expression: exp.Intersect) -> str:
694            if not expression.args.get("distinct", False):
695                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
696            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
697
698        def with_properties(self, properties: exp.Properties) -> str:
699            return self.properties(properties, prefix=self.seg("OPTIONS"))
700
701        def version_sql(self, expression: exp.Version) -> str:
702            if expression.name == "TIMESTAMP":
703                expression = expression.copy()
704                expression.set("this", "SYSTEM_TIME")
705            return super().version_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
NVL2_SUPPORTED = False
UNNEST_WITH_ORDINALITY = False
COLLATE_IS_FUNC = True
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArgMin'>: <function arg_max_or_min_no_count.<locals>._arg_max_or_min_sql>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.CollateProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS = {'array', 'where', 'join', 'of', 'is', 'tablesample', 'cube', 'window', 'extract', 'some', 'respect', 'partition', 'hash', 'if', 'ignore', 'rows', 'union', 'into', 'when', 'cross', 'lookup', 'left', 'recursive', 'qualify', 'grouping', 'merge', 'rollup', 'false', 'to', 'unbounded', 'desc', 'proto', 'enum', 'null', 'and', 'new', 'within', 'collate', 'from', 'true', 'intersect', 'not', 'as', 'define', 'any', 'interval', 'full', 'outer', 'groups', 'using', 'default', 'no', 'having', 'limit', 'nulls', 'except', 'on', 'then', 'create', 'exclude', 'assert_rows_modified', 'current', 'between', 'for', 'set', 'group', 'over', 'exists', 'by', 'distinct', 'preceding', 'range', 'struct', 'else', 'all', 'in', 'select', 'or', 'contains', 'order', 'cast', 'lateral', 'asc', 'following', 'treat', 'escape', 'case', 'inner', 'end', 'fetch', 'like', 'at', 'with', 'unnest', 'natural', 'right'}
def eq_sql(self, expression: sqlglot.expressions.EQ) -> str:
642        def eq_sql(self, expression: exp.EQ) -> str:
643            # Operands of = cannot be NULL in BigQuery
644            if isinstance(expression.left, exp.Null) or isinstance(expression.right, exp.Null):
645                return "NULL"
646
647            return self.binary(expression, "=")
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
649        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
650            parent = expression.parent
651
652            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
653            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
654            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
655                return self.func(
656                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
657                )
658
659            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
661        def trycast_sql(self, expression: exp.TryCast) -> str:
662            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
664        def cte_sql(self, expression: exp.CTE) -> str:
665            if expression.alias_column_names:
666                self.unsupported("Column names in CTE definition are not supported.")
667            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
669        def array_sql(self, expression: exp.Array) -> str:
670            first_arg = seq_get(expression.expressions, 0)
671            if isinstance(first_arg, exp.Subqueryable):
672                return f"ARRAY{self.wrap(self.sql(first_arg))}"
673
674            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
676        def transaction_sql(self, *_) -> str:
677            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
679        def commit_sql(self, *_) -> str:
680            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
682        def rollback_sql(self, *_) -> str:
683            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
685        def in_unnest_op(self, expression: exp.Unnest) -> str:
686            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
688        def except_op(self, expression: exp.Except) -> str:
689            if not expression.args.get("distinct", False):
690                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
691            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
693        def intersect_op(self, expression: exp.Intersect) -> str:
694            if not expression.args.get("distinct", False):
695                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
696            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
698        def with_properties(self, properties: exp.Properties) -> str:
699            return self.properties(properties, prefix=self.seg("OPTIONS"))
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
701        def version_sql(self, expression: exp.Version) -> str:
702            if expression.name == "TIMESTAMP":
703                expression = expression.copy()
704                expression.set("this", "SYSTEM_TIME")
705            return super().version_sql(expression)
LOG_BASE_FIRST = False
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v'}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
279    @classmethod
280    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
281        """Checks if text can be identified given an identify option.
282
283        Args:
284            text: The text to check.
285            identify:
286                "always" or `True`: Always returns true.
287                "safe": True if the identifier is case-insensitive.
288
289        Returns:
290            Whether or not the given text can be identified.
291        """
292        if identify is True or identify == "always":
293            return True
294
295        if identify == "safe":
296            return not cls.case_sensitive(text)
297
298        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
TOKENIZER_CLASS = <class 'BigQuery.Tokenizer'>
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
LIMIT_ONLY_LITERALS
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_ADD_COLUMN_KEYWORD
AGGREGATE_FILTER_SUPPORTED
SEMI_ANTI_JOIN_WITH_SIDE
SUPPORTS_PARAMETERS
COMPUTED_COLUMN_WITH_TYPE
SUPPORTS_TABLE_COPY
TABLESAMPLE_REQUIRES_PARENS
DATA_TYPE_SPECIFIERS_ALLOWED
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
log_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql