Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    binary_from_function,
 12    datestrtodate_sql,
 13    format_time_lambda,
 14    inline_array_sql,
 15    max_or_greatest,
 16    min_or_least,
 17    no_ilike_sql,
 18    parse_date_delta_with_interval,
 19    regexp_replace_sql,
 20    rename_func,
 21    timestrtotime_sql,
 22    ts_or_ds_to_date_sql,
 23)
 24from sqlglot.helper import seq_get, split_num_words
 25from sqlglot.tokens import TokenType
 26
 27logger = logging.getLogger("sqlglot")
 28
 29
 30def _date_add_sql(
 31    data_type: str, kind: str
 32) -> t.Callable[[generator.Generator, exp.Expression], str]:
 33    def func(self, expression):
 34        this = self.sql(expression, "this")
 35        unit = expression.args.get("unit")
 36        unit = exp.var(unit.name.upper() if unit else "DAY")
 37        interval = exp.Interval(this=expression.expression.copy(), unit=unit)
 38        return f"{data_type}_{kind}({this}, {self.sql(interval)})"
 39
 40    return func
 41
 42
 43def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str:
 44    if not expression.find_ancestor(exp.From, exp.Join):
 45        return self.values_sql(expression)
 46
 47    alias = expression.args.get("alias")
 48
 49    structs = [
 50        exp.Struct(
 51            expressions=[
 52                exp.alias_(value, column_name)
 53                for value, column_name in zip(
 54                    t.expressions,
 55                    alias.columns
 56                    if alias and alias.columns
 57                    else (f"_c{i}" for i in range(len(t.expressions))),
 58                )
 59            ]
 60        )
 61        for t in expression.find_all(exp.Tuple)
 62    ]
 63
 64    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 65
 66
 67def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str:
 68    this = expression.this
 69    if isinstance(this, exp.Schema):
 70        this = f"{this.this} <{self.expressions(this)}>"
 71    else:
 72        this = self.sql(this)
 73    return f"RETURNS {this}"
 74
 75
 76def _create_sql(self: generator.Generator, expression: exp.Create) -> str:
 77    kind = expression.args["kind"]
 78    returns = expression.find(exp.ReturnsProperty)
 79
 80    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 81        expression = expression.copy()
 82        expression.set("kind", "TABLE FUNCTION")
 83
 84        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 85            expression.set("expression", expression.expression.this)
 86
 87        return self.create_sql(expression)
 88
 89    return self.create_sql(expression)
 90
 91
 92def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 93    """Remove references to unnest table aliases since bigquery doesn't allow them.
 94
 95    These are added by the optimizer's qualify_column step.
 96    """
 97    from sqlglot.optimizer.scope import Scope
 98
 99    if isinstance(expression, exp.Select):
100        for unnest in expression.find_all(exp.Unnest):
101            if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias:
102                for column in Scope(expression).find_all(exp.Column):
103                    if column.table == unnest.alias:
104                        column.set("table", None)
105
106    return expression
107
108
109# https://issuetracker.google.com/issues/162294746
110# workaround for bigquery bug when grouping by an expression and then ordering
111# WITH x AS (SELECT 1 y)
112# SELECT y + 1 z
113# FROM x
114# GROUP BY x + 1
115# ORDER by z
116def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
117    if isinstance(expression, exp.Select):
118        group = expression.args.get("group")
119        order = expression.args.get("order")
120
121        if group and order:
122            aliases = {
123                select.this: select.args["alias"]
124                for select in expression.selects
125                if isinstance(select, exp.Alias)
126            }
127
128            for e in group.expressions:
129                alias = aliases.get(e)
130
131                if alias:
132                    e.replace(exp.column(alias))
133
134    return expression
135
136
137def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
138    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
139    if isinstance(expression, exp.CTE) and expression.alias_column_names:
140        cte_query = expression.this
141
142        if cte_query.is_star:
143            logger.warning(
144                "Can't push down CTE column names for star queries. Run the query through"
145                " the optimizer or use 'qualify' to expand the star projections first."
146            )
147            return expression
148
149        column_names = expression.alias_column_names
150        expression.args["alias"].set("columns", None)
151
152        for name, select in zip(column_names, cte_query.selects):
153            to_replace = select
154
155            if isinstance(select, exp.Alias):
156                select = select.this
157
158            # Inner aliases are shadowed by the CTE column names
159            to_replace.replace(exp.alias_(select, name))
160
161    return expression
162
163
164def _parse_timestamp(args: t.List) -> exp.StrToTime:
165    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
166    this.set("zone", seq_get(args, 2))
167    return this
168
169
170def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
171    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
172    return expr_type.from_arg_list(args)
173
174
175def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5:
176    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
177    arg = seq_get(args, 0)
178    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg)
179
180
181class BigQuery(Dialect):
182    UNNEST_COLUMN_ONLY = True
183
184    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
185    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
186
187    # bigquery udfs are case sensitive
188    NORMALIZE_FUNCTIONS = False
189
190    TIME_MAPPING = {
191        "%D": "%m/%d/%y",
192    }
193
194    FORMAT_MAPPING = {
195        "DD": "%d",
196        "MM": "%m",
197        "MON": "%b",
198        "MONTH": "%B",
199        "YYYY": "%Y",
200        "YY": "%y",
201        "HH": "%I",
202        "HH12": "%I",
203        "HH24": "%H",
204        "MI": "%M",
205        "SS": "%S",
206        "SSSSS": "%f",
207        "TZH": "%z",
208    }
209
210    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
211    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
212    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
213
214    @classmethod
215    def normalize_identifier(cls, expression: E) -> E:
216        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
217        # The following check is essentially a heuristic to detect tables based on whether or
218        # not they're qualified.
219        if isinstance(expression, exp.Identifier):
220            parent = expression.parent
221
222            while isinstance(parent, exp.Dot):
223                parent = parent.parent
224
225            if (
226                not isinstance(parent, exp.UserDefinedFunction)
227                and not (isinstance(parent, exp.Table) and parent.db)
228                and not expression.meta.get("is_table")
229            ):
230                expression.set("this", expression.this.lower())
231
232        return expression
233
234    class Tokenizer(tokens.Tokenizer):
235        QUOTES = ["'", '"', '"""', "'''"]
236        COMMENTS = ["--", "#", ("/*", "*/")]
237        IDENTIFIERS = ["`"]
238        STRING_ESCAPES = ["\\"]
239
240        HEX_STRINGS = [("0x", ""), ("0X", "")]
241
242        BYTE_STRINGS = [
243            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
244        ]
245
246        RAW_STRINGS = [
247            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
248        ]
249
250        KEYWORDS = {
251            **tokens.Tokenizer.KEYWORDS,
252            "ANY TYPE": TokenType.VARIANT,
253            "BEGIN": TokenType.COMMAND,
254            "BEGIN TRANSACTION": TokenType.BEGIN,
255            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
256            "BYTES": TokenType.BINARY,
257            "DECLARE": TokenType.COMMAND,
258            "FLOAT64": TokenType.DOUBLE,
259            "INT64": TokenType.BIGINT,
260            "RECORD": TokenType.STRUCT,
261            "TIMESTAMP": TokenType.TIMESTAMPTZ,
262            "NOT DETERMINISTIC": TokenType.VOLATILE,
263            "UNKNOWN": TokenType.NULL,
264        }
265        KEYWORDS.pop("DIV")
266
267    class Parser(parser.Parser):
268        PREFIXED_PIVOT_COLUMNS = True
269
270        LOG_BASE_FIRST = False
271        LOG_DEFAULTS_TO_LN = True
272
273        FUNCTIONS = {
274            **parser.Parser.FUNCTIONS,
275            "DATE": _parse_date,
276            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
277            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
278            "DATE_TRUNC": lambda args: exp.DateTrunc(
279                unit=exp.Literal.string(str(seq_get(args, 1))),
280                this=seq_get(args, 0),
281            ),
282            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
283            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
284            "DIV": binary_from_function(exp.IntDiv),
285            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
286            "MD5": exp.MD5Digest.from_arg_list,
287            "TO_HEX": _parse_to_hex,
288            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
289                [seq_get(args, 1), seq_get(args, 0)]
290            ),
291            "PARSE_TIMESTAMP": _parse_timestamp,
292            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
293            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
294                this=seq_get(args, 0),
295                expression=seq_get(args, 1),
296                position=seq_get(args, 2),
297                occurrence=seq_get(args, 3),
298                group=exp.Literal.number(1)
299                if re.compile(str(seq_get(args, 1))).groups == 1
300                else None,
301            ),
302            "SPLIT": lambda args: exp.Split(
303                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1) or exp.Literal.string(","),
306            ),
307            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
308            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
309            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
310            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
311            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
312        }
313
314        FUNCTION_PARSERS = {
315            **parser.Parser.FUNCTION_PARSERS,
316            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
317        }
318        FUNCTION_PARSERS.pop("TRIM")
319
320        NO_PAREN_FUNCTIONS = {
321            **parser.Parser.NO_PAREN_FUNCTIONS,
322            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
323        }
324
325        NESTED_TYPE_TOKENS = {
326            *parser.Parser.NESTED_TYPE_TOKENS,
327            TokenType.TABLE,
328        }
329
330        ID_VAR_TOKENS = {
331            *parser.Parser.ID_VAR_TOKENS,
332            TokenType.VALUES,
333        }
334
335        PROPERTY_PARSERS = {
336            **parser.Parser.PROPERTY_PARSERS,
337            "NOT DETERMINISTIC": lambda self: self.expression(
338                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
339            ),
340            "OPTIONS": lambda self: self._parse_with_property(),
341        }
342
343        CONSTRAINT_PARSERS = {
344            **parser.Parser.CONSTRAINT_PARSERS,
345            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
346        }
347
348        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
349            this = super()._parse_table_part(schema=schema)
350
351            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
352            if isinstance(this, exp.Identifier):
353                table_name = this.name
354                while self._match(TokenType.DASH, advance=False) and self._next:
355                    self._advance(2)
356                    table_name += f"-{self._prev.text}"
357
358                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
359
360            return this
361
362        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
363            table = super()._parse_table_parts(schema=schema)
364            if isinstance(table.this, exp.Identifier) and "." in table.name:
365                catalog, db, this, *rest = (
366                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
367                    for x in split_num_words(table.name, ".", 3)
368                )
369
370                if rest and this:
371                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
372
373                table = exp.Table(this=this, db=db, catalog=catalog)
374
375            return table
376
377    class Generator(generator.Generator):
378        EXPLICIT_UNION = True
379        INTERVAL_ALLOWS_PLURAL_FORM = False
380        JOIN_HINTS = False
381        QUERY_HINTS = False
382        TABLE_HINTS = False
383        LIMIT_FETCH = "LIMIT"
384        RENAME_TABLE_WITH_DB = False
385        ESCAPE_LINE_BREAK = True
386        NVL2_SUPPORTED = False
387
388        TRANSFORMS = {
389            **generator.Generator.TRANSFORMS,
390            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
391            exp.ArraySize: rename_func("ARRAY_LENGTH"),
392            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
393            exp.Create: _create_sql,
394            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
395            exp.DateAdd: _date_add_sql("DATE", "ADD"),
396            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
397            exp.DateFromParts: rename_func("DATE"),
398            exp.DateStrToDate: datestrtodate_sql,
399            exp.DateSub: _date_add_sql("DATE", "SUB"),
400            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
401            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
402            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
403            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
404            exp.GroupConcat: rename_func("STRING_AGG"),
405            exp.Hex: rename_func("TO_HEX"),
406            exp.ILike: no_ilike_sql,
407            exp.IntDiv: rename_func("DIV"),
408            exp.JSONFormat: rename_func("TO_JSON_STRING"),
409            exp.Max: max_or_greatest,
410            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
411            exp.MD5Digest: rename_func("MD5"),
412            exp.Min: min_or_least,
413            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
414            exp.RegexpExtract: lambda self, e: self.func(
415                "REGEXP_EXTRACT",
416                e.this,
417                e.expression,
418                e.args.get("position"),
419                e.args.get("occurrence"),
420            ),
421            exp.RegexpReplace: regexp_replace_sql,
422            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
423            exp.ReturnsProperty: _returnsproperty_sql,
424            exp.Select: transforms.preprocess(
425                [
426                    transforms.explode_to_unnest,
427                    _unqualify_unnest,
428                    transforms.eliminate_distinct_on,
429                    _alias_ordered_group,
430                ]
431            ),
432            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
433            if e.name == "IMMUTABLE"
434            else "NOT DETERMINISTIC",
435            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
436            exp.StrToTime: lambda self, e: self.func(
437                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
438            ),
439            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
440            exp.TimeSub: _date_add_sql("TIME", "SUB"),
441            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
442            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
443            exp.TimeStrToTime: timestrtotime_sql,
444            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
445            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
446            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
447            exp.Unhex: rename_func("FROM_HEX"),
448            exp.Values: _derived_table_values_to_unnest,
449            exp.VariancePop: rename_func("VAR_POP"),
450        }
451
452        TYPE_MAPPING = {
453            **generator.Generator.TYPE_MAPPING,
454            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
455            exp.DataType.Type.BIGINT: "INT64",
456            exp.DataType.Type.BINARY: "BYTES",
457            exp.DataType.Type.BOOLEAN: "BOOL",
458            exp.DataType.Type.CHAR: "STRING",
459            exp.DataType.Type.DECIMAL: "NUMERIC",
460            exp.DataType.Type.DOUBLE: "FLOAT64",
461            exp.DataType.Type.FLOAT: "FLOAT64",
462            exp.DataType.Type.INT: "INT64",
463            exp.DataType.Type.NCHAR: "STRING",
464            exp.DataType.Type.NVARCHAR: "STRING",
465            exp.DataType.Type.SMALLINT: "INT64",
466            exp.DataType.Type.TEXT: "STRING",
467            exp.DataType.Type.TIMESTAMP: "DATETIME",
468            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
469            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
470            exp.DataType.Type.TINYINT: "INT64",
471            exp.DataType.Type.VARBINARY: "BYTES",
472            exp.DataType.Type.VARCHAR: "STRING",
473            exp.DataType.Type.VARIANT: "ANY TYPE",
474        }
475
476        PROPERTIES_LOCATION = {
477            **generator.Generator.PROPERTIES_LOCATION,
478            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
479            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
480        }
481
482        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
483        RESERVED_KEYWORDS = {
484            *generator.Generator.RESERVED_KEYWORDS,
485            "all",
486            "and",
487            "any",
488            "array",
489            "as",
490            "asc",
491            "assert_rows_modified",
492            "at",
493            "between",
494            "by",
495            "case",
496            "cast",
497            "collate",
498            "contains",
499            "create",
500            "cross",
501            "cube",
502            "current",
503            "default",
504            "define",
505            "desc",
506            "distinct",
507            "else",
508            "end",
509            "enum",
510            "escape",
511            "except",
512            "exclude",
513            "exists",
514            "extract",
515            "false",
516            "fetch",
517            "following",
518            "for",
519            "from",
520            "full",
521            "group",
522            "grouping",
523            "groups",
524            "hash",
525            "having",
526            "if",
527            "ignore",
528            "in",
529            "inner",
530            "intersect",
531            "interval",
532            "into",
533            "is",
534            "join",
535            "lateral",
536            "left",
537            "like",
538            "limit",
539            "lookup",
540            "merge",
541            "natural",
542            "new",
543            "no",
544            "not",
545            "null",
546            "nulls",
547            "of",
548            "on",
549            "or",
550            "order",
551            "outer",
552            "over",
553            "partition",
554            "preceding",
555            "proto",
556            "qualify",
557            "range",
558            "recursive",
559            "respect",
560            "right",
561            "rollup",
562            "rows",
563            "select",
564            "set",
565            "some",
566            "struct",
567            "tablesample",
568            "then",
569            "to",
570            "treat",
571            "true",
572            "unbounded",
573            "union",
574            "unnest",
575            "using",
576            "when",
577            "where",
578            "window",
579            "with",
580            "within",
581        }
582
583        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
584            parent = expression.parent
585
586            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
587            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
588            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
589                return self.func(
590                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
591                )
592
593            return super().attimezone_sql(expression)
594
595        def trycast_sql(self, expression: exp.TryCast) -> str:
596            return self.cast_sql(expression, safe_prefix="SAFE_")
597
598        def cte_sql(self, expression: exp.CTE) -> str:
599            if expression.alias_column_names:
600                self.unsupported("Column names in CTE definition are not supported.")
601            return super().cte_sql(expression)
602
603        def array_sql(self, expression: exp.Array) -> str:
604            first_arg = seq_get(expression.expressions, 0)
605            if isinstance(first_arg, exp.Subqueryable):
606                return f"ARRAY{self.wrap(self.sql(first_arg))}"
607
608            return inline_array_sql(self, expression)
609
610        def transaction_sql(self, *_) -> str:
611            return "BEGIN TRANSACTION"
612
613        def commit_sql(self, *_) -> str:
614            return "COMMIT TRANSACTION"
615
616        def rollback_sql(self, *_) -> str:
617            return "ROLLBACK TRANSACTION"
618
619        def in_unnest_op(self, expression: exp.Unnest) -> str:
620            return self.sql(expression)
621
622        def except_op(self, expression: exp.Except) -> str:
623            if not expression.args.get("distinct", False):
624                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
625            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
626
627        def intersect_op(self, expression: exp.Intersect) -> str:
628            if not expression.args.get("distinct", False):
629                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
630            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
631
632        def with_properties(self, properties: exp.Properties) -> str:
633            return self.properties(properties, prefix=self.seg("OPTIONS"))
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
182class BigQuery(Dialect):
183    UNNEST_COLUMN_ONLY = True
184
185    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
186    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
187
188    # bigquery udfs are case sensitive
189    NORMALIZE_FUNCTIONS = False
190
191    TIME_MAPPING = {
192        "%D": "%m/%d/%y",
193    }
194
195    FORMAT_MAPPING = {
196        "DD": "%d",
197        "MM": "%m",
198        "MON": "%b",
199        "MONTH": "%B",
200        "YYYY": "%Y",
201        "YY": "%y",
202        "HH": "%I",
203        "HH12": "%I",
204        "HH24": "%H",
205        "MI": "%M",
206        "SS": "%S",
207        "SSSSS": "%f",
208        "TZH": "%z",
209    }
210
211    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
212    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
213    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
214
215    @classmethod
216    def normalize_identifier(cls, expression: E) -> E:
217        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
218        # The following check is essentially a heuristic to detect tables based on whether or
219        # not they're qualified.
220        if isinstance(expression, exp.Identifier):
221            parent = expression.parent
222
223            while isinstance(parent, exp.Dot):
224                parent = parent.parent
225
226            if (
227                not isinstance(parent, exp.UserDefinedFunction)
228                and not (isinstance(parent, exp.Table) and parent.db)
229                and not expression.meta.get("is_table")
230            ):
231                expression.set("this", expression.this.lower())
232
233        return expression
234
235    class Tokenizer(tokens.Tokenizer):
236        QUOTES = ["'", '"', '"""', "'''"]
237        COMMENTS = ["--", "#", ("/*", "*/")]
238        IDENTIFIERS = ["`"]
239        STRING_ESCAPES = ["\\"]
240
241        HEX_STRINGS = [("0x", ""), ("0X", "")]
242
243        BYTE_STRINGS = [
244            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
245        ]
246
247        RAW_STRINGS = [
248            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
249        ]
250
251        KEYWORDS = {
252            **tokens.Tokenizer.KEYWORDS,
253            "ANY TYPE": TokenType.VARIANT,
254            "BEGIN": TokenType.COMMAND,
255            "BEGIN TRANSACTION": TokenType.BEGIN,
256            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
257            "BYTES": TokenType.BINARY,
258            "DECLARE": TokenType.COMMAND,
259            "FLOAT64": TokenType.DOUBLE,
260            "INT64": TokenType.BIGINT,
261            "RECORD": TokenType.STRUCT,
262            "TIMESTAMP": TokenType.TIMESTAMPTZ,
263            "NOT DETERMINISTIC": TokenType.VOLATILE,
264            "UNKNOWN": TokenType.NULL,
265        }
266        KEYWORDS.pop("DIV")
267
268    class Parser(parser.Parser):
269        PREFIXED_PIVOT_COLUMNS = True
270
271        LOG_BASE_FIRST = False
272        LOG_DEFAULTS_TO_LN = True
273
274        FUNCTIONS = {
275            **parser.Parser.FUNCTIONS,
276            "DATE": _parse_date,
277            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
278            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
279            "DATE_TRUNC": lambda args: exp.DateTrunc(
280                unit=exp.Literal.string(str(seq_get(args, 1))),
281                this=seq_get(args, 0),
282            ),
283            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
284            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
285            "DIV": binary_from_function(exp.IntDiv),
286            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
287            "MD5": exp.MD5Digest.from_arg_list,
288            "TO_HEX": _parse_to_hex,
289            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
290                [seq_get(args, 1), seq_get(args, 0)]
291            ),
292            "PARSE_TIMESTAMP": _parse_timestamp,
293            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
294            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
295                this=seq_get(args, 0),
296                expression=seq_get(args, 1),
297                position=seq_get(args, 2),
298                occurrence=seq_get(args, 3),
299                group=exp.Literal.number(1)
300                if re.compile(str(seq_get(args, 1))).groups == 1
301                else None,
302            ),
303            "SPLIT": lambda args: exp.Split(
304                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
305                this=seq_get(args, 0),
306                expression=seq_get(args, 1) or exp.Literal.string(","),
307            ),
308            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
309            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
310            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
311            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
312            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
313        }
314
315        FUNCTION_PARSERS = {
316            **parser.Parser.FUNCTION_PARSERS,
317            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
318        }
319        FUNCTION_PARSERS.pop("TRIM")
320
321        NO_PAREN_FUNCTIONS = {
322            **parser.Parser.NO_PAREN_FUNCTIONS,
323            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
324        }
325
326        NESTED_TYPE_TOKENS = {
327            *parser.Parser.NESTED_TYPE_TOKENS,
328            TokenType.TABLE,
329        }
330
331        ID_VAR_TOKENS = {
332            *parser.Parser.ID_VAR_TOKENS,
333            TokenType.VALUES,
334        }
335
336        PROPERTY_PARSERS = {
337            **parser.Parser.PROPERTY_PARSERS,
338            "NOT DETERMINISTIC": lambda self: self.expression(
339                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
340            ),
341            "OPTIONS": lambda self: self._parse_with_property(),
342        }
343
344        CONSTRAINT_PARSERS = {
345            **parser.Parser.CONSTRAINT_PARSERS,
346            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
347        }
348
349        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
350            this = super()._parse_table_part(schema=schema)
351
352            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
353            if isinstance(this, exp.Identifier):
354                table_name = this.name
355                while self._match(TokenType.DASH, advance=False) and self._next:
356                    self._advance(2)
357                    table_name += f"-{self._prev.text}"
358
359                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
360
361            return this
362
363        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
364            table = super()._parse_table_parts(schema=schema)
365            if isinstance(table.this, exp.Identifier) and "." in table.name:
366                catalog, db, this, *rest = (
367                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
368                    for x in split_num_words(table.name, ".", 3)
369                )
370
371                if rest and this:
372                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
373
374                table = exp.Table(this=this, db=db, catalog=catalog)
375
376            return table
377
378    class Generator(generator.Generator):
379        EXPLICIT_UNION = True
380        INTERVAL_ALLOWS_PLURAL_FORM = False
381        JOIN_HINTS = False
382        QUERY_HINTS = False
383        TABLE_HINTS = False
384        LIMIT_FETCH = "LIMIT"
385        RENAME_TABLE_WITH_DB = False
386        ESCAPE_LINE_BREAK = True
387        NVL2_SUPPORTED = False
388
389        TRANSFORMS = {
390            **generator.Generator.TRANSFORMS,
391            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
392            exp.ArraySize: rename_func("ARRAY_LENGTH"),
393            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
394            exp.Create: _create_sql,
395            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
396            exp.DateAdd: _date_add_sql("DATE", "ADD"),
397            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
398            exp.DateFromParts: rename_func("DATE"),
399            exp.DateStrToDate: datestrtodate_sql,
400            exp.DateSub: _date_add_sql("DATE", "SUB"),
401            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
402            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
403            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
404            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
405            exp.GroupConcat: rename_func("STRING_AGG"),
406            exp.Hex: rename_func("TO_HEX"),
407            exp.ILike: no_ilike_sql,
408            exp.IntDiv: rename_func("DIV"),
409            exp.JSONFormat: rename_func("TO_JSON_STRING"),
410            exp.Max: max_or_greatest,
411            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
412            exp.MD5Digest: rename_func("MD5"),
413            exp.Min: min_or_least,
414            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
415            exp.RegexpExtract: lambda self, e: self.func(
416                "REGEXP_EXTRACT",
417                e.this,
418                e.expression,
419                e.args.get("position"),
420                e.args.get("occurrence"),
421            ),
422            exp.RegexpReplace: regexp_replace_sql,
423            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
424            exp.ReturnsProperty: _returnsproperty_sql,
425            exp.Select: transforms.preprocess(
426                [
427                    transforms.explode_to_unnest,
428                    _unqualify_unnest,
429                    transforms.eliminate_distinct_on,
430                    _alias_ordered_group,
431                ]
432            ),
433            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
434            if e.name == "IMMUTABLE"
435            else "NOT DETERMINISTIC",
436            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
437            exp.StrToTime: lambda self, e: self.func(
438                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
439            ),
440            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
441            exp.TimeSub: _date_add_sql("TIME", "SUB"),
442            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
443            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
444            exp.TimeStrToTime: timestrtotime_sql,
445            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
446            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
447            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
448            exp.Unhex: rename_func("FROM_HEX"),
449            exp.Values: _derived_table_values_to_unnest,
450            exp.VariancePop: rename_func("VAR_POP"),
451        }
452
453        TYPE_MAPPING = {
454            **generator.Generator.TYPE_MAPPING,
455            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
456            exp.DataType.Type.BIGINT: "INT64",
457            exp.DataType.Type.BINARY: "BYTES",
458            exp.DataType.Type.BOOLEAN: "BOOL",
459            exp.DataType.Type.CHAR: "STRING",
460            exp.DataType.Type.DECIMAL: "NUMERIC",
461            exp.DataType.Type.DOUBLE: "FLOAT64",
462            exp.DataType.Type.FLOAT: "FLOAT64",
463            exp.DataType.Type.INT: "INT64",
464            exp.DataType.Type.NCHAR: "STRING",
465            exp.DataType.Type.NVARCHAR: "STRING",
466            exp.DataType.Type.SMALLINT: "INT64",
467            exp.DataType.Type.TEXT: "STRING",
468            exp.DataType.Type.TIMESTAMP: "DATETIME",
469            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
470            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
471            exp.DataType.Type.TINYINT: "INT64",
472            exp.DataType.Type.VARBINARY: "BYTES",
473            exp.DataType.Type.VARCHAR: "STRING",
474            exp.DataType.Type.VARIANT: "ANY TYPE",
475        }
476
477        PROPERTIES_LOCATION = {
478            **generator.Generator.PROPERTIES_LOCATION,
479            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
480            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
481        }
482
483        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
484        RESERVED_KEYWORDS = {
485            *generator.Generator.RESERVED_KEYWORDS,
486            "all",
487            "and",
488            "any",
489            "array",
490            "as",
491            "asc",
492            "assert_rows_modified",
493            "at",
494            "between",
495            "by",
496            "case",
497            "cast",
498            "collate",
499            "contains",
500            "create",
501            "cross",
502            "cube",
503            "current",
504            "default",
505            "define",
506            "desc",
507            "distinct",
508            "else",
509            "end",
510            "enum",
511            "escape",
512            "except",
513            "exclude",
514            "exists",
515            "extract",
516            "false",
517            "fetch",
518            "following",
519            "for",
520            "from",
521            "full",
522            "group",
523            "grouping",
524            "groups",
525            "hash",
526            "having",
527            "if",
528            "ignore",
529            "in",
530            "inner",
531            "intersect",
532            "interval",
533            "into",
534            "is",
535            "join",
536            "lateral",
537            "left",
538            "like",
539            "limit",
540            "lookup",
541            "merge",
542            "natural",
543            "new",
544            "no",
545            "not",
546            "null",
547            "nulls",
548            "of",
549            "on",
550            "or",
551            "order",
552            "outer",
553            "over",
554            "partition",
555            "preceding",
556            "proto",
557            "qualify",
558            "range",
559            "recursive",
560            "respect",
561            "right",
562            "rollup",
563            "rows",
564            "select",
565            "set",
566            "some",
567            "struct",
568            "tablesample",
569            "then",
570            "to",
571            "treat",
572            "true",
573            "unbounded",
574            "union",
575            "unnest",
576            "using",
577            "when",
578            "where",
579            "window",
580            "with",
581            "within",
582        }
583
584        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
585            parent = expression.parent
586
587            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
588            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
589            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
590                return self.func(
591                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
592                )
593
594            return super().attimezone_sql(expression)
595
596        def trycast_sql(self, expression: exp.TryCast) -> str:
597            return self.cast_sql(expression, safe_prefix="SAFE_")
598
599        def cte_sql(self, expression: exp.CTE) -> str:
600            if expression.alias_column_names:
601                self.unsupported("Column names in CTE definition are not supported.")
602            return super().cte_sql(expression)
603
604        def array_sql(self, expression: exp.Array) -> str:
605            first_arg = seq_get(expression.expressions, 0)
606            if isinstance(first_arg, exp.Subqueryable):
607                return f"ARRAY{self.wrap(self.sql(first_arg))}"
608
609            return inline_array_sql(self, expression)
610
611        def transaction_sql(self, *_) -> str:
612            return "BEGIN TRANSACTION"
613
614        def commit_sql(self, *_) -> str:
615            return "COMMIT TRANSACTION"
616
617        def rollback_sql(self, *_) -> str:
618            return "ROLLBACK TRANSACTION"
619
620        def in_unnest_op(self, expression: exp.Unnest) -> str:
621            return self.sql(expression)
622
623        def except_op(self, expression: exp.Except) -> str:
624            if not expression.args.get("distinct", False):
625                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
626            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
627
628        def intersect_op(self, expression: exp.Intersect) -> str:
629            if not expression.args.get("distinct", False):
630                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
631            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
632
633        def with_properties(self, properties: exp.Properties) -> str:
634            return self.properties(properties, prefix=self.seg("OPTIONS"))
UNNEST_COLUMN_ONLY = True
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
PSEUDOCOLUMNS: Set[str] = {'_PARTITIONTIME', '_PARTITIONDATE'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
215    @classmethod
216    def normalize_identifier(cls, expression: E) -> E:
217        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
218        # The following check is essentially a heuristic to detect tables based on whether or
219        # not they're qualified.
220        if isinstance(expression, exp.Identifier):
221            parent = expression.parent
222
223            while isinstance(parent, exp.Dot):
224                parent = parent.parent
225
226            if (
227                not isinstance(parent, exp.UserDefinedFunction)
228                and not (isinstance(parent, exp.Table) and parent.db)
229                and not expression.meta.get("is_table")
230            ):
231                expression.set("this", expression.this.lower())
232
233        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.

tokenizer_class = <class 'sqlglot.dialects.bigquery.BigQuery.Tokenizer'>
generator_class = <class 'sqlglot.dialects.bigquery.BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
235    class Tokenizer(tokens.Tokenizer):
236        QUOTES = ["'", '"', '"""', "'''"]
237        COMMENTS = ["--", "#", ("/*", "*/")]
238        IDENTIFIERS = ["`"]
239        STRING_ESCAPES = ["\\"]
240
241        HEX_STRINGS = [("0x", ""), ("0X", "")]
242
243        BYTE_STRINGS = [
244            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
245        ]
246
247        RAW_STRINGS = [
248            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
249        ]
250
251        KEYWORDS = {
252            **tokens.Tokenizer.KEYWORDS,
253            "ANY TYPE": TokenType.VARIANT,
254            "BEGIN": TokenType.COMMAND,
255            "BEGIN TRANSACTION": TokenType.BEGIN,
256            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
257            "BYTES": TokenType.BINARY,
258            "DECLARE": TokenType.COMMAND,
259            "FLOAT64": TokenType.DOUBLE,
260            "INT64": TokenType.BIGINT,
261            "RECORD": TokenType.STRUCT,
262            "TIMESTAMP": TokenType.TIMESTAMPTZ,
263            "NOT DETERMINISTIC": TokenType.VOLATILE,
264            "UNKNOWN": TokenType.NULL,
265        }
266        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>}
class BigQuery.Parser(sqlglot.parser.Parser):
268    class Parser(parser.Parser):
269        PREFIXED_PIVOT_COLUMNS = True
270
271        LOG_BASE_FIRST = False
272        LOG_DEFAULTS_TO_LN = True
273
274        FUNCTIONS = {
275            **parser.Parser.FUNCTIONS,
276            "DATE": _parse_date,
277            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
278            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
279            "DATE_TRUNC": lambda args: exp.DateTrunc(
280                unit=exp.Literal.string(str(seq_get(args, 1))),
281                this=seq_get(args, 0),
282            ),
283            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
284            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
285            "DIV": binary_from_function(exp.IntDiv),
286            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
287            "MD5": exp.MD5Digest.from_arg_list,
288            "TO_HEX": _parse_to_hex,
289            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
290                [seq_get(args, 1), seq_get(args, 0)]
291            ),
292            "PARSE_TIMESTAMP": _parse_timestamp,
293            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
294            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
295                this=seq_get(args, 0),
296                expression=seq_get(args, 1),
297                position=seq_get(args, 2),
298                occurrence=seq_get(args, 3),
299                group=exp.Literal.number(1)
300                if re.compile(str(seq_get(args, 1))).groups == 1
301                else None,
302            ),
303            "SPLIT": lambda args: exp.Split(
304                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
305                this=seq_get(args, 0),
306                expression=seq_get(args, 1) or exp.Literal.string(","),
307            ),
308            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
309            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
310            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
311            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
312            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
313        }
314
315        FUNCTION_PARSERS = {
316            **parser.Parser.FUNCTION_PARSERS,
317            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
318        }
319        FUNCTION_PARSERS.pop("TRIM")
320
321        NO_PAREN_FUNCTIONS = {
322            **parser.Parser.NO_PAREN_FUNCTIONS,
323            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
324        }
325
326        NESTED_TYPE_TOKENS = {
327            *parser.Parser.NESTED_TYPE_TOKENS,
328            TokenType.TABLE,
329        }
330
331        ID_VAR_TOKENS = {
332            *parser.Parser.ID_VAR_TOKENS,
333            TokenType.VALUES,
334        }
335
336        PROPERTY_PARSERS = {
337            **parser.Parser.PROPERTY_PARSERS,
338            "NOT DETERMINISTIC": lambda self: self.expression(
339                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
340            ),
341            "OPTIONS": lambda self: self._parse_with_property(),
342        }
343
344        CONSTRAINT_PARSERS = {
345            **parser.Parser.CONSTRAINT_PARSERS,
346            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
347        }
348
349        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
350            this = super()._parse_table_part(schema=schema)
351
352            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
353            if isinstance(this, exp.Identifier):
354                table_name = this.name
355                while self._match(TokenType.DASH, advance=False) and self._next:
356                    self._advance(2)
357                    table_name += f"-{self._prev.text}"
358
359                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
360
361            return this
362
363        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
364            table = super()._parse_table_parts(schema=schema)
365            if isinstance(table.this, exp.Identifier) and "." in table.name:
366                catalog, db, this, *rest = (
367                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
368                    for x in split_num_words(table.name, ".", 3)
369                )
370
371                if rest and this:
372                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
373
374                table = exp.Table(this=this, db=db, catalog=catalog)
375
376            return table

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NESTED: 'NESTED'>, <TokenType.MAP: 'MAP'>, <TokenType.TABLE: 'TABLE'>}
ID_VAR_TOKENS = {<TokenType.VARIANT: 'VARIANT'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.ALL: 'ALL'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.FALSE: 'FALSE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.FIRST: 'FIRST'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.TRUE: 'TRUE'>, <TokenType.VALUES: 'VALUES'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.NEXT: 'NEXT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.LEFT: 'LEFT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.INT256: 'INT256'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.RANGE: 'RANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.KEEP: 'KEEP'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.UINT: 'UINT'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.DIV: 'DIV'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIME: 'TIME'>, <TokenType.FULL: 'FULL'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.CASE: 'CASE'>, <TokenType.SOME: 'SOME'>, <TokenType.INDEX: 'INDEX'>, <TokenType.MERGE: 'MERGE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.ENUM: 'ENUM'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.IS: 'IS'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.SET: 'SET'>, <TokenType.ANY: 'ANY'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.VIEW: 'VIEW'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.TEXT: 'TEXT'>, <TokenType.XML: 'XML'>, <TokenType.ROW: 'ROW'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.APPLY: 'APPLY'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.ROWS: 'ROWS'>, <TokenType.TABLE: 'TABLE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TOP: 'TOP'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.UINT256: 'UINT256'>, <TokenType.BINARY: 'BINARY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.BIT: 'BIT'>, <TokenType.VAR: 'VAR'>, <TokenType.DATE: 'DATE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.FILTER: 'FILTER'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.MAP: 'MAP'>, <TokenType.DESC: 'DESC'>, <TokenType.JSONB: 'JSONB'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.ASC: 'ASC'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.MONEY: 'MONEY'>, <TokenType.JSON: 'JSON'>, <TokenType.SUPER: 'SUPER'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.END: 'END'>, <TokenType.CHAR: 'CHAR'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.INT: 'INT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.INET: 'INET'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.CACHE: 'CACHE'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
UNNEST_COLUMN_ONLY: bool = True
NORMALIZE_FUNCTIONS = False
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
378    class Generator(generator.Generator):
379        EXPLICIT_UNION = True
380        INTERVAL_ALLOWS_PLURAL_FORM = False
381        JOIN_HINTS = False
382        QUERY_HINTS = False
383        TABLE_HINTS = False
384        LIMIT_FETCH = "LIMIT"
385        RENAME_TABLE_WITH_DB = False
386        ESCAPE_LINE_BREAK = True
387        NVL2_SUPPORTED = False
388
389        TRANSFORMS = {
390            **generator.Generator.TRANSFORMS,
391            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
392            exp.ArraySize: rename_func("ARRAY_LENGTH"),
393            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
394            exp.Create: _create_sql,
395            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
396            exp.DateAdd: _date_add_sql("DATE", "ADD"),
397            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
398            exp.DateFromParts: rename_func("DATE"),
399            exp.DateStrToDate: datestrtodate_sql,
400            exp.DateSub: _date_add_sql("DATE", "SUB"),
401            exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"),
402            exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"),
403            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
404            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
405            exp.GroupConcat: rename_func("STRING_AGG"),
406            exp.Hex: rename_func("TO_HEX"),
407            exp.ILike: no_ilike_sql,
408            exp.IntDiv: rename_func("DIV"),
409            exp.JSONFormat: rename_func("TO_JSON_STRING"),
410            exp.Max: max_or_greatest,
411            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
412            exp.MD5Digest: rename_func("MD5"),
413            exp.Min: min_or_least,
414            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
415            exp.RegexpExtract: lambda self, e: self.func(
416                "REGEXP_EXTRACT",
417                e.this,
418                e.expression,
419                e.args.get("position"),
420                e.args.get("occurrence"),
421            ),
422            exp.RegexpReplace: regexp_replace_sql,
423            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
424            exp.ReturnsProperty: _returnsproperty_sql,
425            exp.Select: transforms.preprocess(
426                [
427                    transforms.explode_to_unnest,
428                    _unqualify_unnest,
429                    transforms.eliminate_distinct_on,
430                    _alias_ordered_group,
431                ]
432            ),
433            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
434            if e.name == "IMMUTABLE"
435            else "NOT DETERMINISTIC",
436            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
437            exp.StrToTime: lambda self, e: self.func(
438                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
439            ),
440            exp.TimeAdd: _date_add_sql("TIME", "ADD"),
441            exp.TimeSub: _date_add_sql("TIME", "SUB"),
442            exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"),
443            exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"),
444            exp.TimeStrToTime: timestrtotime_sql,
445            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
446            exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"),
447            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
448            exp.Unhex: rename_func("FROM_HEX"),
449            exp.Values: _derived_table_values_to_unnest,
450            exp.VariancePop: rename_func("VAR_POP"),
451        }
452
453        TYPE_MAPPING = {
454            **generator.Generator.TYPE_MAPPING,
455            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
456            exp.DataType.Type.BIGINT: "INT64",
457            exp.DataType.Type.BINARY: "BYTES",
458            exp.DataType.Type.BOOLEAN: "BOOL",
459            exp.DataType.Type.CHAR: "STRING",
460            exp.DataType.Type.DECIMAL: "NUMERIC",
461            exp.DataType.Type.DOUBLE: "FLOAT64",
462            exp.DataType.Type.FLOAT: "FLOAT64",
463            exp.DataType.Type.INT: "INT64",
464            exp.DataType.Type.NCHAR: "STRING",
465            exp.DataType.Type.NVARCHAR: "STRING",
466            exp.DataType.Type.SMALLINT: "INT64",
467            exp.DataType.Type.TEXT: "STRING",
468            exp.DataType.Type.TIMESTAMP: "DATETIME",
469            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
470            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
471            exp.DataType.Type.TINYINT: "INT64",
472            exp.DataType.Type.VARBINARY: "BYTES",
473            exp.DataType.Type.VARCHAR: "STRING",
474            exp.DataType.Type.VARIANT: "ANY TYPE",
475        }
476
477        PROPERTIES_LOCATION = {
478            **generator.Generator.PROPERTIES_LOCATION,
479            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
480            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
481        }
482
483        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
484        RESERVED_KEYWORDS = {
485            *generator.Generator.RESERVED_KEYWORDS,
486            "all",
487            "and",
488            "any",
489            "array",
490            "as",
491            "asc",
492            "assert_rows_modified",
493            "at",
494            "between",
495            "by",
496            "case",
497            "cast",
498            "collate",
499            "contains",
500            "create",
501            "cross",
502            "cube",
503            "current",
504            "default",
505            "define",
506            "desc",
507            "distinct",
508            "else",
509            "end",
510            "enum",
511            "escape",
512            "except",
513            "exclude",
514            "exists",
515            "extract",
516            "false",
517            "fetch",
518            "following",
519            "for",
520            "from",
521            "full",
522            "group",
523            "grouping",
524            "groups",
525            "hash",
526            "having",
527            "if",
528            "ignore",
529            "in",
530            "inner",
531            "intersect",
532            "interval",
533            "into",
534            "is",
535            "join",
536            "lateral",
537            "left",
538            "like",
539            "limit",
540            "lookup",
541            "merge",
542            "natural",
543            "new",
544            "no",
545            "not",
546            "null",
547            "nulls",
548            "of",
549            "on",
550            "or",
551            "order",
552            "outer",
553            "over",
554            "partition",
555            "preceding",
556            "proto",
557            "qualify",
558            "range",
559            "recursive",
560            "respect",
561            "right",
562            "rollup",
563            "rows",
564            "select",
565            "set",
566            "some",
567            "struct",
568            "tablesample",
569            "then",
570            "to",
571            "treat",
572            "true",
573            "unbounded",
574            "union",
575            "unnest",
576            "using",
577            "when",
578            "where",
579            "window",
580            "with",
581            "within",
582        }
583
584        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
585            parent = expression.parent
586
587            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
588            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
589            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
590                return self.func(
591                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
592                )
593
594            return super().attimezone_sql(expression)
595
596        def trycast_sql(self, expression: exp.TryCast) -> str:
597            return self.cast_sql(expression, safe_prefix="SAFE_")
598
599        def cte_sql(self, expression: exp.CTE) -> str:
600            if expression.alias_column_names:
601                self.unsupported("Column names in CTE definition are not supported.")
602            return super().cte_sql(expression)
603
604        def array_sql(self, expression: exp.Array) -> str:
605            first_arg = seq_get(expression.expressions, 0)
606            if isinstance(first_arg, exp.Subqueryable):
607                return f"ARRAY{self.wrap(self.sql(first_arg))}"
608
609            return inline_array_sql(self, expression)
610
611        def transaction_sql(self, *_) -> str:
612            return "BEGIN TRANSACTION"
613
614        def commit_sql(self, *_) -> str:
615            return "COMMIT TRANSACTION"
616
617        def rollback_sql(self, *_) -> str:
618            return "ROLLBACK TRANSACTION"
619
620        def in_unnest_op(self, expression: exp.Unnest) -> str:
621            return self.sql(expression)
622
623        def except_op(self, expression: exp.Except) -> str:
624            if not expression.args.get("distinct", False):
625                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
626            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
627
628        def intersect_op(self, expression: exp.Intersect) -> str:
629            if not expression.args.get("distinct", False):
630                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
631            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
632
633        def with_properties(self, properties: exp.Properties) -> str:
634            return self.properties(properties, prefix=self.seg("OPTIONS"))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
ESCAPE_LINE_BREAK = True
NVL2_SUPPORTED = False
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalDayToSecondSpan'>: 'DAY TO SECOND', <class 'sqlglot.expressions.IntervalYearToMonthSpan'>: 'YEAR TO MONTH', <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function _date_add_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS = {'not', 'fetch', 'at', 'create', 'asc', 'treat', 'true', 'range', 'all', 'no', 'groups', 'extract', 'new', 'to', 'preceding', 'following', 'partition', 'intersect', 'for', 'then', 'contains', 'group', 'cube', 'define', 'proto', 'distinct', 'any', 'recursive', 'within', 'assert_rows_modified', 'merge', 'respect', 'between', 'like', 'by', 'where', 'unbounded', 'natural', 'enum', 'ignore', 'end', 'nulls', 'from', 'some', 'rows', 'except', 'is', 'left', 'using', 'limit', 'right', 'array', 'if', 'null', 'else', 'when', 'into', 'on', 'select', 'struct', 'inner', 'false', 'escape', 'of', 'join', 'as', 'tablesample', 'over', 'qualify', 'lateral', 'exclude', 'and', 'outer', 'order', 'set', 'unnest', 'in', 'current', 'hash', 'lookup', 'full', 'cast', 'window', 'case', 'union', 'with', 'or', 'collate', 'default', 'rollup', 'exists', 'cross', 'interval', 'having', 'grouping', 'desc'}
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
584        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
585            parent = expression.parent
586
587            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
588            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
589            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
590                return self.func(
591                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
592                )
593
594            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
596        def trycast_sql(self, expression: exp.TryCast) -> str:
597            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
599        def cte_sql(self, expression: exp.CTE) -> str:
600            if expression.alias_column_names:
601                self.unsupported("Column names in CTE definition are not supported.")
602            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
604        def array_sql(self, expression: exp.Array) -> str:
605            first_arg = seq_get(expression.expressions, 0)
606            if isinstance(first_arg, exp.Subqueryable):
607                return f"ARRAY{self.wrap(self.sql(first_arg))}"
608
609            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
611        def transaction_sql(self, *_) -> str:
612            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
614        def commit_sql(self, *_) -> str:
615            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
617        def rollback_sql(self, *_) -> str:
618            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
620        def in_unnest_op(self, expression: exp.Unnest) -> str:
621            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
623        def except_op(self, expression: exp.Except) -> str:
624            if not expression.args.get("distinct", False):
625                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
626            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
628        def intersect_op(self, expression: exp.Intersect) -> str:
629            if not expression.args.get("distinct", False):
630                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
631            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
633        def with_properties(self, properties: exp.Properties) -> str:
634            return self.properties(properties, prefix=self.seg("OPTIONS"))
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
257    @classmethod
258    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
259        """Checks if text can be identified given an identify option.
260
261        Args:
262            text: The text to check.
263            identify:
264                "always" or `True`: Always returns true.
265                "safe": True if the identifier is case-insensitive.
266
267        Returns:
268            Whether or not the given text can be identified.
269        """
270        if identify is True or identify == "always":
271            return True
272
273        if identify == "safe":
274            return not cls.case_sensitive(text)
275
276        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
STRING_ESCAPE = '\\'
IDENTIFIER_ESCAPE = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypesize_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql