Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    date_delta_sql,
  11    date_trunc_to_time,
  12    datestrtodate_sql,
  13    build_formatted_time,
  14    if_sql,
  15    inline_array_sql,
  16    max_or_greatest,
  17    min_or_least,
  18    rename_func,
  19    timestamptrunc_sql,
  20    timestrtotime_sql,
  21    var_map_sql,
  22)
  23from sqlglot.helper import flatten, is_float, is_int, seq_get
  24from sqlglot.tokens import TokenType
  25
  26if t.TYPE_CHECKING:
  27    from sqlglot._typing import E
  28
  29
  30# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  31def _build_datetime(
  32    name: str, kind: exp.DataType.Type, safe: bool = False
  33) -> t.Callable[[t.List], exp.Func]:
  34    def _builder(args: t.List) -> exp.Func:
  35        value = seq_get(args, 0)
  36
  37        if isinstance(value, exp.Literal):
  38            int_value = is_int(value.this)
  39
  40            # Converts calls like `TO_TIME('01:02:03')` into casts
  41            if len(args) == 1 and value.is_string and not int_value:
  42                return exp.cast(value, kind)
  43
  44            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  45            # cases so we can transpile them, since they're relatively common
  46            if kind == exp.DataType.Type.TIMESTAMP:
  47                if int_value:
  48                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  49                if not is_float(value.this):
  50                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  51            if kind == exp.DataType.Type.DATE and not int_value:
  52                formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  53                formatted_exp.set("safe", safe)
  54                return formatted_exp
  55        return exp.Anonymous(this=name, expressions=args)
  56
  57    return _builder
  58
  59
  60def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  61    expression = parser.build_var_map(args)
  62
  63    if isinstance(expression, exp.StarMap):
  64        return expression
  65
  66    return exp.Struct(
  67        expressions=[
  68            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  69        ]
  70    )
  71
  72
  73def _build_datediff(args: t.List) -> exp.DateDiff:
  74    return exp.DateDiff(
  75        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
  76    )
  77
  78
  79# https://docs.snowflake.com/en/sql-reference/functions/div0
  80def _build_if_from_div0(args: t.List) -> exp.If:
  81    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
  82    true = exp.Literal.number(0)
  83    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
  84    return exp.If(this=cond, true=true, false=false)
  85
  86
  87# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
  88def _build_if_from_zeroifnull(args: t.List) -> exp.If:
  89    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
  90    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
  91
  92
  93# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
  94def _build_if_from_nullifzero(args: t.List) -> exp.If:
  95    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
  96    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
  97
  98
  99def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 100    flag = expression.text("flag")
 101
 102    if "i" not in flag:
 103        flag += "i"
 104
 105    return self.func(
 106        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 107    )
 108
 109
 110def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
 111    if len(args) == 3:
 112        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
 113    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
 114
 115
 116def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 117    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 118
 119    if not regexp_replace.args.get("replacement"):
 120        regexp_replace.set("replacement", exp.Literal.string(""))
 121
 122    return regexp_replace
 123
 124
 125def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 126    def _parse(self: Snowflake.Parser) -> exp.Show:
 127        return self._parse_show_snowflake(*args, **kwargs)
 128
 129    return _parse
 130
 131
 132DATE_PART_MAPPING = {
 133    "Y": "YEAR",
 134    "YY": "YEAR",
 135    "YYY": "YEAR",
 136    "YYYY": "YEAR",
 137    "YR": "YEAR",
 138    "YEARS": "YEAR",
 139    "YRS": "YEAR",
 140    "MM": "MONTH",
 141    "MON": "MONTH",
 142    "MONS": "MONTH",
 143    "MONTHS": "MONTH",
 144    "D": "DAY",
 145    "DD": "DAY",
 146    "DAYS": "DAY",
 147    "DAYOFMONTH": "DAY",
 148    "WEEKDAY": "DAYOFWEEK",
 149    "DOW": "DAYOFWEEK",
 150    "DW": "DAYOFWEEK",
 151    "WEEKDAY_ISO": "DAYOFWEEKISO",
 152    "DOW_ISO": "DAYOFWEEKISO",
 153    "DW_ISO": "DAYOFWEEKISO",
 154    "YEARDAY": "DAYOFYEAR",
 155    "DOY": "DAYOFYEAR",
 156    "DY": "DAYOFYEAR",
 157    "W": "WEEK",
 158    "WK": "WEEK",
 159    "WEEKOFYEAR": "WEEK",
 160    "WOY": "WEEK",
 161    "WY": "WEEK",
 162    "WEEK_ISO": "WEEKISO",
 163    "WEEKOFYEARISO": "WEEKISO",
 164    "WEEKOFYEAR_ISO": "WEEKISO",
 165    "Q": "QUARTER",
 166    "QTR": "QUARTER",
 167    "QTRS": "QUARTER",
 168    "QUARTERS": "QUARTER",
 169    "H": "HOUR",
 170    "HH": "HOUR",
 171    "HR": "HOUR",
 172    "HOURS": "HOUR",
 173    "HRS": "HOUR",
 174    "M": "MINUTE",
 175    "MI": "MINUTE",
 176    "MIN": "MINUTE",
 177    "MINUTES": "MINUTE",
 178    "MINS": "MINUTE",
 179    "S": "SECOND",
 180    "SEC": "SECOND",
 181    "SECONDS": "SECOND",
 182    "SECS": "SECOND",
 183    "MS": "MILLISECOND",
 184    "MSEC": "MILLISECOND",
 185    "MILLISECONDS": "MILLISECOND",
 186    "US": "MICROSECOND",
 187    "USEC": "MICROSECOND",
 188    "MICROSECONDS": "MICROSECOND",
 189    "NS": "NANOSECOND",
 190    "NSEC": "NANOSECOND",
 191    "NANOSEC": "NANOSECOND",
 192    "NSECOND": "NANOSECOND",
 193    "NSECONDS": "NANOSECOND",
 194    "NANOSECS": "NANOSECOND",
 195    "EPOCH": "EPOCH_SECOND",
 196    "EPOCH_SECONDS": "EPOCH_SECOND",
 197    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
 198    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
 199    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
 200    "TZH": "TIMEZONE_HOUR",
 201    "TZM": "TIMEZONE_MINUTE",
 202}
 203
 204
 205@t.overload
 206def _map_date_part(part: exp.Expression) -> exp.Var:
 207    pass
 208
 209
 210@t.overload
 211def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 212    pass
 213
 214
 215def _map_date_part(part):
 216    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
 217    return exp.var(mapped) if mapped else part
 218
 219
 220def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 221    trunc = date_trunc_to_time(args)
 222    trunc.set("unit", _map_date_part(trunc.args["unit"]))
 223    return trunc
 224
 225
 226def _build_timestamp_from_parts(args: t.List) -> exp.Func:
 227    if len(args) == 2:
 228        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
 229        # so we parse this into Anonymous for now instead of introducing complexity
 230        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
 231
 232    return exp.TimestampFromParts.from_arg_list(args)
 233
 234
 235def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 236    """
 237    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 238    so we need to unqualify them.
 239
 240    Example:
 241        >>> from sqlglot import parse_one
 242        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 243        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 244        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 245    """
 246    if isinstance(expression, exp.Pivot) and expression.unpivot:
 247        expression = transforms.unqualify_columns(expression)
 248
 249    return expression
 250
 251
 252def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 253    assert isinstance(expression, exp.Create)
 254
 255    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 256        if expression.this in exp.DataType.NESTED_TYPES:
 257            expression.set("expressions", None)
 258        return expression
 259
 260    props = expression.args.get("properties")
 261    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 262        for schema_expression in expression.this.expressions:
 263            if isinstance(schema_expression, exp.ColumnDef):
 264                column_type = schema_expression.kind
 265                if isinstance(column_type, exp.DataType):
 266                    column_type.transform(_flatten_structured_type, copy=False)
 267
 268    return expression
 269
 270
 271class Snowflake(Dialect):
 272    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 273    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 274    NULL_ORDERING = "nulls_are_large"
 275    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 276    SUPPORTS_USER_DEFINED_TYPES = False
 277    SUPPORTS_SEMI_ANTI_JOIN = False
 278    PREFER_CTE_ALIAS_COLUMN = True
 279    TABLESAMPLE_SIZE_IS_PERCENT = True
 280
 281    TIME_MAPPING = {
 282        "YYYY": "%Y",
 283        "yyyy": "%Y",
 284        "YY": "%y",
 285        "yy": "%y",
 286        "MMMM": "%B",
 287        "mmmm": "%B",
 288        "MON": "%b",
 289        "mon": "%b",
 290        "MM": "%m",
 291        "mm": "%m",
 292        "DD": "%d",
 293        "dd": "%-d",
 294        "DY": "%a",
 295        "dy": "%w",
 296        "HH24": "%H",
 297        "hh24": "%H",
 298        "HH12": "%I",
 299        "hh12": "%I",
 300        "MI": "%M",
 301        "mi": "%M",
 302        "SS": "%S",
 303        "ss": "%S",
 304        "FF": "%f",
 305        "ff": "%f",
 306        "FF6": "%f",
 307        "ff6": "%f",
 308    }
 309
 310    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 311        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 312        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 313        if (
 314            isinstance(expression, exp.Identifier)
 315            and isinstance(expression.parent, exp.Table)
 316            and expression.name.lower() == "dual"
 317        ):
 318            return expression  # type: ignore
 319
 320        return super().quote_identifier(expression, identify=identify)
 321
 322    class Parser(parser.Parser):
 323        IDENTIFY_PIVOT_STRINGS = True
 324
 325        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 326
 327        FUNCTIONS = {
 328            **parser.Parser.FUNCTIONS,
 329            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 330            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 331            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 332                this=seq_get(args, 1), expression=seq_get(args, 0)
 333            ),
 334            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 335                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 336                start=seq_get(args, 0),
 337                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 338                step=seq_get(args, 2),
 339            ),
 340            "BITXOR": binary_from_function(exp.BitwiseXor),
 341            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 342            "BOOLXOR": binary_from_function(exp.Xor),
 343            "CONVERT_TIMEZONE": _build_convert_timezone,
 344            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 345            "DATE_TRUNC": _date_trunc_to_time,
 346            "DATEADD": lambda args: exp.DateAdd(
 347                this=seq_get(args, 2),
 348                expression=seq_get(args, 1),
 349                unit=_map_date_part(seq_get(args, 0)),
 350            ),
 351            "DATEDIFF": _build_datediff,
 352            "DIV0": _build_if_from_div0,
 353            "FLATTEN": exp.Explode.from_arg_list,
 354            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 355                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 356            ),
 357            "IFF": exp.If.from_arg_list,
 358            "LAST_DAY": lambda args: exp.LastDay(
 359                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 360            ),
 361            "LISTAGG": exp.GroupConcat.from_arg_list,
 362            "NULLIFZERO": _build_if_from_nullifzero,
 363            "OBJECT_CONSTRUCT": _build_object_construct,
 364            "REGEXP_REPLACE": _build_regexp_replace,
 365            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 366            "RLIKE": exp.RegexpLike.from_arg_list,
 367            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 368            "TIMEDIFF": _build_datediff,
 369            "TIMESTAMPDIFF": _build_datediff,
 370            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 371            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 372            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 373            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 374            "TO_NUMBER": lambda args: exp.ToNumber(
 375                this=seq_get(args, 0),
 376                format=seq_get(args, 1),
 377                precision=seq_get(args, 2),
 378                scale=seq_get(args, 3),
 379            ),
 380            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 381            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 382            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 383            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 384            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 385            "TO_VARCHAR": exp.ToChar.from_arg_list,
 386            "ZEROIFNULL": _build_if_from_zeroifnull,
 387        }
 388
 389        FUNCTION_PARSERS = {
 390            **parser.Parser.FUNCTION_PARSERS,
 391            "DATE_PART": lambda self: self._parse_date_part(),
 392            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 393        }
 394        FUNCTION_PARSERS.pop("TRIM")
 395
 396        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 397
 398        RANGE_PARSERS = {
 399            **parser.Parser.RANGE_PARSERS,
 400            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 401            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 402        }
 403
 404        ALTER_PARSERS = {
 405            **parser.Parser.ALTER_PARSERS,
 406            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 407            "UNSET": lambda self: self.expression(
 408                exp.Set,
 409                tag=self._match_text_seq("TAG"),
 410                expressions=self._parse_csv(self._parse_id_var),
 411                unset=True,
 412            ),
 413            "SWAP": lambda self: self._parse_alter_table_swap(),
 414        }
 415
 416        STATEMENT_PARSERS = {
 417            **parser.Parser.STATEMENT_PARSERS,
 418            TokenType.SHOW: lambda self: self._parse_show(),
 419        }
 420
 421        PROPERTY_PARSERS = {
 422            **parser.Parser.PROPERTY_PARSERS,
 423            "LOCATION": lambda self: self._parse_location(),
 424        }
 425
 426        SHOW_PARSERS = {
 427            "SCHEMAS": _show_parser("SCHEMAS"),
 428            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 429            "OBJECTS": _show_parser("OBJECTS"),
 430            "TERSE OBJECTS": _show_parser("OBJECTS"),
 431            "TABLES": _show_parser("TABLES"),
 432            "TERSE TABLES": _show_parser("TABLES"),
 433            "VIEWS": _show_parser("VIEWS"),
 434            "TERSE VIEWS": _show_parser("VIEWS"),
 435            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 436            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 437            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 438            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 439            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 440            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 441            "SEQUENCES": _show_parser("SEQUENCES"),
 442            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 443            "COLUMNS": _show_parser("COLUMNS"),
 444            "USERS": _show_parser("USERS"),
 445            "TERSE USERS": _show_parser("USERS"),
 446        }
 447
 448        STAGED_FILE_SINGLE_TOKENS = {
 449            TokenType.DOT,
 450            TokenType.MOD,
 451            TokenType.SLASH,
 452        }
 453
 454        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 455
 456        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 457
 458        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 459            this = super()._parse_column_ops(this)
 460
 461            casts = []
 462            json_path = []
 463
 464            while self._match(TokenType.COLON):
 465                path = super()._parse_column_ops(self._parse_field(any_token=True))
 466
 467                # The cast :: operator has a lower precedence than the extraction operator :, so
 468                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 469                while isinstance(path, exp.Cast):
 470                    casts.append(path.to)
 471                    path = path.this
 472
 473                if path:
 474                    json_path.append(path.sql(dialect="snowflake", copy=False))
 475
 476            if json_path:
 477                this = self.expression(
 478                    exp.JSONExtract,
 479                    this=this,
 480                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 481                )
 482
 483                while casts:
 484                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 485
 486            return this
 487
 488        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 489        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 490        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 491            this = self._parse_var() or self._parse_type()
 492
 493            if not this:
 494                return None
 495
 496            self._match(TokenType.COMMA)
 497            expression = self._parse_bitwise()
 498            this = _map_date_part(this)
 499            name = this.name.upper()
 500
 501            if name.startswith("EPOCH"):
 502                if name == "EPOCH_MILLISECOND":
 503                    scale = 10**3
 504                elif name == "EPOCH_MICROSECOND":
 505                    scale = 10**6
 506                elif name == "EPOCH_NANOSECOND":
 507                    scale = 10**9
 508                else:
 509                    scale = None
 510
 511                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 512                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 513
 514                if scale:
 515                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 516
 517                return to_unix
 518
 519            return self.expression(exp.Extract, this=this, expression=expression)
 520
 521        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 522            if is_map:
 523                # Keys are strings in Snowflake's objects, see also:
 524                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 525                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 526                return self._parse_slice(self._parse_string())
 527
 528            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 529
 530        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 531            lateral = super()._parse_lateral()
 532            if not lateral:
 533                return lateral
 534
 535            if isinstance(lateral.this, exp.Explode):
 536                table_alias = lateral.args.get("alias")
 537                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 538                if table_alias and not table_alias.args.get("columns"):
 539                    table_alias.set("columns", columns)
 540                elif not table_alias:
 541                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 542
 543            return lateral
 544
 545        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 546            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 547            index = self._index
 548            if self._match_texts(("AT", "BEFORE")):
 549                this = self._prev.text.upper()
 550                kind = (
 551                    self._match(TokenType.L_PAREN)
 552                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 553                    and self._prev.text.upper()
 554                )
 555                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 556
 557                if expression:
 558                    self._match_r_paren()
 559                    when = self.expression(
 560                        exp.HistoricalData, this=this, kind=kind, expression=expression
 561                    )
 562                    table.set("when", when)
 563                else:
 564                    self._retreat(index)
 565
 566            return table
 567
 568        def _parse_table_parts(
 569            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 570        ) -> exp.Table:
 571            # https://docs.snowflake.com/en/user-guide/querying-stage
 572            if self._match(TokenType.STRING, advance=False):
 573                table = self._parse_string()
 574            elif self._match_text_seq("@", advance=False):
 575                table = self._parse_location_path()
 576            else:
 577                table = None
 578
 579            if table:
 580                file_format = None
 581                pattern = None
 582
 583                self._match(TokenType.L_PAREN)
 584                while self._curr and not self._match(TokenType.R_PAREN):
 585                    if self._match_text_seq("FILE_FORMAT", "=>"):
 586                        file_format = self._parse_string() or super()._parse_table_parts(
 587                            is_db_reference=is_db_reference
 588                        )
 589                    elif self._match_text_seq("PATTERN", "=>"):
 590                        pattern = self._parse_string()
 591                    else:
 592                        break
 593
 594                    self._match(TokenType.COMMA)
 595
 596                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 597            else:
 598                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 599
 600            return self._parse_at_before(table)
 601
 602        def _parse_id_var(
 603            self,
 604            any_token: bool = True,
 605            tokens: t.Optional[t.Collection[TokenType]] = None,
 606        ) -> t.Optional[exp.Expression]:
 607            if self._match_text_seq("IDENTIFIER", "("):
 608                identifier = (
 609                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 610                    or self._parse_string()
 611                )
 612                self._match_r_paren()
 613                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 614
 615            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 616
 617        def _parse_show_snowflake(self, this: str) -> exp.Show:
 618            scope = None
 619            scope_kind = None
 620
 621            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 622            # which is syntactically valid but has no effect on the output
 623            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 624
 625            history = self._match_text_seq("HISTORY")
 626
 627            like = self._parse_string() if self._match(TokenType.LIKE) else None
 628
 629            if self._match(TokenType.IN):
 630                if self._match_text_seq("ACCOUNT"):
 631                    scope_kind = "ACCOUNT"
 632                elif self._match_set(self.DB_CREATABLES):
 633                    scope_kind = self._prev.text.upper()
 634                    if self._curr:
 635                        scope = self._parse_table_parts()
 636                elif self._curr:
 637                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 638                    scope = self._parse_table_parts()
 639
 640            return self.expression(
 641                exp.Show,
 642                **{
 643                    "terse": terse,
 644                    "this": this,
 645                    "history": history,
 646                    "like": like,
 647                    "scope": scope,
 648                    "scope_kind": scope_kind,
 649                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 650                    "limit": self._parse_limit(),
 651                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 652                },
 653            )
 654
 655        def _parse_alter_table_swap(self) -> exp.SwapTable:
 656            self._match_text_seq("WITH")
 657            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 658
 659        def _parse_location(self) -> exp.LocationProperty:
 660            self._match(TokenType.EQ)
 661            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 662
 663        def _parse_location_path(self) -> exp.Var:
 664            parts = [self._advance_any(ignore_reserved=True)]
 665
 666            # We avoid consuming a comma token because external tables like @foo and @bar
 667            # can be joined in a query with a comma separator.
 668            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 669                parts.append(self._advance_any(ignore_reserved=True))
 670
 671            return exp.var("".join(part.text for part in parts if part))
 672
 673    class Tokenizer(tokens.Tokenizer):
 674        STRING_ESCAPES = ["\\", "'"]
 675        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 676        RAW_STRINGS = ["$$"]
 677        COMMENTS = ["--", "//", ("/*", "*/")]
 678
 679        KEYWORDS = {
 680            **tokens.Tokenizer.KEYWORDS,
 681            "BYTEINT": TokenType.INT,
 682            "CHAR VARYING": TokenType.VARCHAR,
 683            "CHARACTER VARYING": TokenType.VARCHAR,
 684            "EXCLUDE": TokenType.EXCEPT,
 685            "ILIKE ANY": TokenType.ILIKE_ANY,
 686            "LIKE ANY": TokenType.LIKE_ANY,
 687            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 688            "MINUS": TokenType.EXCEPT,
 689            "NCHAR VARYING": TokenType.VARCHAR,
 690            "PUT": TokenType.COMMAND,
 691            "REMOVE": TokenType.COMMAND,
 692            "RENAME": TokenType.REPLACE,
 693            "RM": TokenType.COMMAND,
 694            "SAMPLE": TokenType.TABLE_SAMPLE,
 695            "SQL_DOUBLE": TokenType.DOUBLE,
 696            "SQL_VARCHAR": TokenType.VARCHAR,
 697            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 698            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 699            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 700            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 701            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 702            "TOP": TokenType.TOP,
 703        }
 704
 705        SINGLE_TOKENS = {
 706            **tokens.Tokenizer.SINGLE_TOKENS,
 707            "$": TokenType.PARAMETER,
 708        }
 709
 710        VAR_SINGLE_TOKENS = {"$"}
 711
 712        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 713
 714    class Generator(generator.Generator):
 715        PARAMETER_TOKEN = "$"
 716        MATCHED_BY_SOURCE = False
 717        SINGLE_STRING_INTERVAL = True
 718        JOIN_HINTS = False
 719        TABLE_HINTS = False
 720        QUERY_HINTS = False
 721        AGGREGATE_FILTER_SUPPORTED = False
 722        SUPPORTS_TABLE_COPY = False
 723        COLLATE_IS_FUNC = True
 724        LIMIT_ONLY_LITERALS = True
 725        JSON_KEY_VALUE_PAIR_SEP = ","
 726        INSERT_OVERWRITE = " OVERWRITE INTO"
 727        STRUCT_DELIMITER = ("(", ")")
 728
 729        TRANSFORMS = {
 730            **generator.Generator.TRANSFORMS,
 731            exp.ArgMax: rename_func("MAX_BY"),
 732            exp.ArgMin: rename_func("MIN_BY"),
 733            exp.Array: inline_array_sql,
 734            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 735            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 736            exp.AtTimeZone: lambda self, e: self.func(
 737                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 738            ),
 739            exp.BitwiseXor: rename_func("BITXOR"),
 740            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 741            exp.DateAdd: date_delta_sql("DATEADD"),
 742            exp.DateDiff: date_delta_sql("DATEDIFF"),
 743            exp.DateStrToDate: datestrtodate_sql,
 744            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 745            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 746            exp.DayOfYear: rename_func("DAYOFYEAR"),
 747            exp.Explode: rename_func("FLATTEN"),
 748            exp.Extract: rename_func("DATE_PART"),
 749            exp.FromTimeZone: lambda self, e: self.func(
 750                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 751            ),
 752            exp.GenerateSeries: lambda self, e: self.func(
 753                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 754            ),
 755            exp.GroupConcat: rename_func("LISTAGG"),
 756            exp.If: if_sql(name="IFF", false_value="NULL"),
 757            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 758            exp.JSONExtractScalar: lambda self, e: self.func(
 759                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 760            ),
 761            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 762            exp.JSONPathRoot: lambda *_: "",
 763            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 764            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 765            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 766            exp.Max: max_or_greatest,
 767            exp.Min: min_or_least,
 768            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 769            exp.PercentileCont: transforms.preprocess(
 770                [transforms.add_within_group_for_percentiles]
 771            ),
 772            exp.PercentileDisc: transforms.preprocess(
 773                [transforms.add_within_group_for_percentiles]
 774            ),
 775            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 776            exp.RegexpILike: _regexpilike_sql,
 777            exp.Rand: rename_func("RANDOM"),
 778            exp.Select: transforms.preprocess(
 779                [
 780                    transforms.eliminate_distinct_on,
 781                    transforms.explode_to_unnest(),
 782                    transforms.eliminate_semi_and_anti_joins,
 783                ]
 784            ),
 785            exp.SHA: rename_func("SHA1"),
 786            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 787            exp.StartsWith: rename_func("STARTSWITH"),
 788            exp.StrPosition: lambda self, e: self.func(
 789                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 790            ),
 791            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 792            exp.Stuff: rename_func("INSERT"),
 793            exp.TimestampDiff: lambda self, e: self.func(
 794                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 795            ),
 796            exp.TimestampTrunc: timestamptrunc_sql,
 797            exp.TimeStrToTime: timestrtotime_sql,
 798            exp.TimeToStr: lambda self, e: self.func(
 799                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 800            ),
 801            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 802            exp.ToArray: rename_func("TO_ARRAY"),
 803            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 804            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 805            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 806            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 807            exp.TsOrDsToDate: lambda self, e: self.func(
 808                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 809            ),
 810            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 811            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 812            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 813            exp.Xor: rename_func("BOOLXOR"),
 814        }
 815
 816        SUPPORTED_JSON_PATH_PARTS = {
 817            exp.JSONPathKey,
 818            exp.JSONPathRoot,
 819            exp.JSONPathSubscript,
 820        }
 821
 822        TYPE_MAPPING = {
 823            **generator.Generator.TYPE_MAPPING,
 824            exp.DataType.Type.NESTED: "OBJECT",
 825            exp.DataType.Type.STRUCT: "OBJECT",
 826            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 827        }
 828
 829        STAR_MAPPING = {
 830            "except": "EXCLUDE",
 831            "replace": "RENAME",
 832        }
 833
 834        PROPERTIES_LOCATION = {
 835            **generator.Generator.PROPERTIES_LOCATION,
 836            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 837            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 838        }
 839
 840        def datatype_sql(self, expression: exp.DataType) -> str:
 841            expressions = expression.expressions
 842            if (
 843                expressions
 844                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 845                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 846            ):
 847                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 848                return "OBJECT"
 849
 850            return super().datatype_sql(expression)
 851
 852        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 853            return self.func(
 854                "TO_NUMBER",
 855                expression.this,
 856                expression.args.get("format"),
 857                expression.args.get("precision"),
 858                expression.args.get("scale"),
 859            )
 860
 861        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 862            milli = expression.args.get("milli")
 863            if milli is not None:
 864                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 865                expression.set("nano", milli_to_nano)
 866
 867            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 868
 869        def trycast_sql(self, expression: exp.TryCast) -> str:
 870            value = expression.this
 871
 872            if value.type is None:
 873                from sqlglot.optimizer.annotate_types import annotate_types
 874
 875                value = annotate_types(value)
 876
 877            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 878                return super().trycast_sql(expression)
 879
 880            # TRY_CAST only works for string values in Snowflake
 881            return self.cast_sql(expression)
 882
 883        def log_sql(self, expression: exp.Log) -> str:
 884            if not expression.expression:
 885                return self.func("LN", expression.this)
 886
 887            return super().log_sql(expression)
 888
 889        def unnest_sql(self, expression: exp.Unnest) -> str:
 890            unnest_alias = expression.args.get("alias")
 891            offset = expression.args.get("offset")
 892
 893            columns = [
 894                exp.to_identifier("seq"),
 895                exp.to_identifier("key"),
 896                exp.to_identifier("path"),
 897                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 898                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 899                or exp.to_identifier("value"),
 900                exp.to_identifier("this"),
 901            ]
 902
 903            if unnest_alias:
 904                unnest_alias.set("columns", columns)
 905            else:
 906                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 907
 908            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 909            alias = self.sql(unnest_alias)
 910            alias = f" AS {alias}" if alias else ""
 911            return f"{explode}{alias}"
 912
 913        def show_sql(self, expression: exp.Show) -> str:
 914            terse = "TERSE " if expression.args.get("terse") else ""
 915            history = " HISTORY" if expression.args.get("history") else ""
 916            like = self.sql(expression, "like")
 917            like = f" LIKE {like}" if like else ""
 918
 919            scope = self.sql(expression, "scope")
 920            scope = f" {scope}" if scope else ""
 921
 922            scope_kind = self.sql(expression, "scope_kind")
 923            if scope_kind:
 924                scope_kind = f" IN {scope_kind}"
 925
 926            starts_with = self.sql(expression, "starts_with")
 927            if starts_with:
 928                starts_with = f" STARTS WITH {starts_with}"
 929
 930            limit = self.sql(expression, "limit")
 931
 932            from_ = self.sql(expression, "from")
 933            if from_:
 934                from_ = f" FROM {from_}"
 935
 936            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 937
 938        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 939            # Other dialects don't support all of the following parameters, so we need to
 940            # generate default values as necessary to ensure the transpilation is correct
 941            group = expression.args.get("group")
 942            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 943            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 944            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 945
 946            return self.func(
 947                "REGEXP_SUBSTR",
 948                expression.this,
 949                expression.expression,
 950                position,
 951                occurrence,
 952                parameters,
 953                group,
 954            )
 955
 956        def except_op(self, expression: exp.Except) -> str:
 957            if not expression.args.get("distinct"):
 958                self.unsupported("EXCEPT with All is not supported in Snowflake")
 959            return super().except_op(expression)
 960
 961        def intersect_op(self, expression: exp.Intersect) -> str:
 962            if not expression.args.get("distinct"):
 963                self.unsupported("INTERSECT with All is not supported in Snowflake")
 964            return super().intersect_op(expression)
 965
 966        def describe_sql(self, expression: exp.Describe) -> str:
 967            # Default to table if kind is unknown
 968            kind_value = expression.args.get("kind") or "TABLE"
 969            kind = f" {kind_value}" if kind_value else ""
 970            this = f" {self.sql(expression, 'this')}"
 971            expressions = self.expressions(expression, flat=True)
 972            expressions = f" {expressions}" if expressions else ""
 973            return f"DESCRIBE{kind}{this}{expressions}"
 974
 975        def generatedasidentitycolumnconstraint_sql(
 976            self, expression: exp.GeneratedAsIdentityColumnConstraint
 977        ) -> str:
 978            start = expression.args.get("start")
 979            start = f" START {start}" if start else ""
 980            increment = expression.args.get("increment")
 981            increment = f" INCREMENT {increment}" if increment else ""
 982            return f"AUTOINCREMENT{start}{increment}"
 983
 984        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 985            this = self.sql(expression, "this")
 986            return f"SWAP WITH {this}"
 987
 988        def with_properties(self, properties: exp.Properties) -> str:
 989            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
 990
 991        def cluster_sql(self, expression: exp.Cluster) -> str:
 992            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 993
 994        def struct_sql(self, expression: exp.Struct) -> str:
 995            keys = []
 996            values = []
 997
 998            for i, e in enumerate(expression.expressions):
 999                if isinstance(e, exp.PropertyEQ):
1000                    keys.append(
1001                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1002                    )
1003                    values.append(e.expression)
1004                else:
1005                    keys.append(exp.Literal.string(f"_{i}"))
1006                    values.append(e)
1007
1008            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
 272class Snowflake(Dialect):
 273    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 274    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 275    NULL_ORDERING = "nulls_are_large"
 276    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 277    SUPPORTS_USER_DEFINED_TYPES = False
 278    SUPPORTS_SEMI_ANTI_JOIN = False
 279    PREFER_CTE_ALIAS_COLUMN = True
 280    TABLESAMPLE_SIZE_IS_PERCENT = True
 281
 282    TIME_MAPPING = {
 283        "YYYY": "%Y",
 284        "yyyy": "%Y",
 285        "YY": "%y",
 286        "yy": "%y",
 287        "MMMM": "%B",
 288        "mmmm": "%B",
 289        "MON": "%b",
 290        "mon": "%b",
 291        "MM": "%m",
 292        "mm": "%m",
 293        "DD": "%d",
 294        "dd": "%-d",
 295        "DY": "%a",
 296        "dy": "%w",
 297        "HH24": "%H",
 298        "hh24": "%H",
 299        "HH12": "%I",
 300        "hh12": "%I",
 301        "MI": "%M",
 302        "mi": "%M",
 303        "SS": "%S",
 304        "ss": "%S",
 305        "FF": "%f",
 306        "ff": "%f",
 307        "FF6": "%f",
 308        "ff6": "%f",
 309    }
 310
 311    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 312        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 313        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 314        if (
 315            isinstance(expression, exp.Identifier)
 316            and isinstance(expression.parent, exp.Table)
 317            and expression.name.lower() == "dual"
 318        ):
 319            return expression  # type: ignore
 320
 321        return super().quote_identifier(expression, identify=identify)
 322
 323    class Parser(parser.Parser):
 324        IDENTIFY_PIVOT_STRINGS = True
 325
 326        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 327
 328        FUNCTIONS = {
 329            **parser.Parser.FUNCTIONS,
 330            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 331            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 332            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 333                this=seq_get(args, 1), expression=seq_get(args, 0)
 334            ),
 335            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 336                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 337                start=seq_get(args, 0),
 338                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 339                step=seq_get(args, 2),
 340            ),
 341            "BITXOR": binary_from_function(exp.BitwiseXor),
 342            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 343            "BOOLXOR": binary_from_function(exp.Xor),
 344            "CONVERT_TIMEZONE": _build_convert_timezone,
 345            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 346            "DATE_TRUNC": _date_trunc_to_time,
 347            "DATEADD": lambda args: exp.DateAdd(
 348                this=seq_get(args, 2),
 349                expression=seq_get(args, 1),
 350                unit=_map_date_part(seq_get(args, 0)),
 351            ),
 352            "DATEDIFF": _build_datediff,
 353            "DIV0": _build_if_from_div0,
 354            "FLATTEN": exp.Explode.from_arg_list,
 355            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 356                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 357            ),
 358            "IFF": exp.If.from_arg_list,
 359            "LAST_DAY": lambda args: exp.LastDay(
 360                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 361            ),
 362            "LISTAGG": exp.GroupConcat.from_arg_list,
 363            "NULLIFZERO": _build_if_from_nullifzero,
 364            "OBJECT_CONSTRUCT": _build_object_construct,
 365            "REGEXP_REPLACE": _build_regexp_replace,
 366            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 367            "RLIKE": exp.RegexpLike.from_arg_list,
 368            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 369            "TIMEDIFF": _build_datediff,
 370            "TIMESTAMPDIFF": _build_datediff,
 371            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 372            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 373            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 374            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 375            "TO_NUMBER": lambda args: exp.ToNumber(
 376                this=seq_get(args, 0),
 377                format=seq_get(args, 1),
 378                precision=seq_get(args, 2),
 379                scale=seq_get(args, 3),
 380            ),
 381            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 382            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 383            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 384            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 385            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 386            "TO_VARCHAR": exp.ToChar.from_arg_list,
 387            "ZEROIFNULL": _build_if_from_zeroifnull,
 388        }
 389
 390        FUNCTION_PARSERS = {
 391            **parser.Parser.FUNCTION_PARSERS,
 392            "DATE_PART": lambda self: self._parse_date_part(),
 393            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 394        }
 395        FUNCTION_PARSERS.pop("TRIM")
 396
 397        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 398
 399        RANGE_PARSERS = {
 400            **parser.Parser.RANGE_PARSERS,
 401            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 402            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 403        }
 404
 405        ALTER_PARSERS = {
 406            **parser.Parser.ALTER_PARSERS,
 407            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 408            "UNSET": lambda self: self.expression(
 409                exp.Set,
 410                tag=self._match_text_seq("TAG"),
 411                expressions=self._parse_csv(self._parse_id_var),
 412                unset=True,
 413            ),
 414            "SWAP": lambda self: self._parse_alter_table_swap(),
 415        }
 416
 417        STATEMENT_PARSERS = {
 418            **parser.Parser.STATEMENT_PARSERS,
 419            TokenType.SHOW: lambda self: self._parse_show(),
 420        }
 421
 422        PROPERTY_PARSERS = {
 423            **parser.Parser.PROPERTY_PARSERS,
 424            "LOCATION": lambda self: self._parse_location(),
 425        }
 426
 427        SHOW_PARSERS = {
 428            "SCHEMAS": _show_parser("SCHEMAS"),
 429            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 430            "OBJECTS": _show_parser("OBJECTS"),
 431            "TERSE OBJECTS": _show_parser("OBJECTS"),
 432            "TABLES": _show_parser("TABLES"),
 433            "TERSE TABLES": _show_parser("TABLES"),
 434            "VIEWS": _show_parser("VIEWS"),
 435            "TERSE VIEWS": _show_parser("VIEWS"),
 436            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 437            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 438            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 439            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 440            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 441            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 442            "SEQUENCES": _show_parser("SEQUENCES"),
 443            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 444            "COLUMNS": _show_parser("COLUMNS"),
 445            "USERS": _show_parser("USERS"),
 446            "TERSE USERS": _show_parser("USERS"),
 447        }
 448
 449        STAGED_FILE_SINGLE_TOKENS = {
 450            TokenType.DOT,
 451            TokenType.MOD,
 452            TokenType.SLASH,
 453        }
 454
 455        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 456
 457        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 458
 459        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 460            this = super()._parse_column_ops(this)
 461
 462            casts = []
 463            json_path = []
 464
 465            while self._match(TokenType.COLON):
 466                path = super()._parse_column_ops(self._parse_field(any_token=True))
 467
 468                # The cast :: operator has a lower precedence than the extraction operator :, so
 469                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 470                while isinstance(path, exp.Cast):
 471                    casts.append(path.to)
 472                    path = path.this
 473
 474                if path:
 475                    json_path.append(path.sql(dialect="snowflake", copy=False))
 476
 477            if json_path:
 478                this = self.expression(
 479                    exp.JSONExtract,
 480                    this=this,
 481                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 482                )
 483
 484                while casts:
 485                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 486
 487            return this
 488
 489        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 490        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 491        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 492            this = self._parse_var() or self._parse_type()
 493
 494            if not this:
 495                return None
 496
 497            self._match(TokenType.COMMA)
 498            expression = self._parse_bitwise()
 499            this = _map_date_part(this)
 500            name = this.name.upper()
 501
 502            if name.startswith("EPOCH"):
 503                if name == "EPOCH_MILLISECOND":
 504                    scale = 10**3
 505                elif name == "EPOCH_MICROSECOND":
 506                    scale = 10**6
 507                elif name == "EPOCH_NANOSECOND":
 508                    scale = 10**9
 509                else:
 510                    scale = None
 511
 512                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 513                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 514
 515                if scale:
 516                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 517
 518                return to_unix
 519
 520            return self.expression(exp.Extract, this=this, expression=expression)
 521
 522        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 523            if is_map:
 524                # Keys are strings in Snowflake's objects, see also:
 525                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 526                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 527                return self._parse_slice(self._parse_string())
 528
 529            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 530
 531        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 532            lateral = super()._parse_lateral()
 533            if not lateral:
 534                return lateral
 535
 536            if isinstance(lateral.this, exp.Explode):
 537                table_alias = lateral.args.get("alias")
 538                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 539                if table_alias and not table_alias.args.get("columns"):
 540                    table_alias.set("columns", columns)
 541                elif not table_alias:
 542                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 543
 544            return lateral
 545
 546        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 547            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 548            index = self._index
 549            if self._match_texts(("AT", "BEFORE")):
 550                this = self._prev.text.upper()
 551                kind = (
 552                    self._match(TokenType.L_PAREN)
 553                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 554                    and self._prev.text.upper()
 555                )
 556                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 557
 558                if expression:
 559                    self._match_r_paren()
 560                    when = self.expression(
 561                        exp.HistoricalData, this=this, kind=kind, expression=expression
 562                    )
 563                    table.set("when", when)
 564                else:
 565                    self._retreat(index)
 566
 567            return table
 568
 569        def _parse_table_parts(
 570            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 571        ) -> exp.Table:
 572            # https://docs.snowflake.com/en/user-guide/querying-stage
 573            if self._match(TokenType.STRING, advance=False):
 574                table = self._parse_string()
 575            elif self._match_text_seq("@", advance=False):
 576                table = self._parse_location_path()
 577            else:
 578                table = None
 579
 580            if table:
 581                file_format = None
 582                pattern = None
 583
 584                self._match(TokenType.L_PAREN)
 585                while self._curr and not self._match(TokenType.R_PAREN):
 586                    if self._match_text_seq("FILE_FORMAT", "=>"):
 587                        file_format = self._parse_string() or super()._parse_table_parts(
 588                            is_db_reference=is_db_reference
 589                        )
 590                    elif self._match_text_seq("PATTERN", "=>"):
 591                        pattern = self._parse_string()
 592                    else:
 593                        break
 594
 595                    self._match(TokenType.COMMA)
 596
 597                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 598            else:
 599                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 600
 601            return self._parse_at_before(table)
 602
 603        def _parse_id_var(
 604            self,
 605            any_token: bool = True,
 606            tokens: t.Optional[t.Collection[TokenType]] = None,
 607        ) -> t.Optional[exp.Expression]:
 608            if self._match_text_seq("IDENTIFIER", "("):
 609                identifier = (
 610                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 611                    or self._parse_string()
 612                )
 613                self._match_r_paren()
 614                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 615
 616            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 617
 618        def _parse_show_snowflake(self, this: str) -> exp.Show:
 619            scope = None
 620            scope_kind = None
 621
 622            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 623            # which is syntactically valid but has no effect on the output
 624            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 625
 626            history = self._match_text_seq("HISTORY")
 627
 628            like = self._parse_string() if self._match(TokenType.LIKE) else None
 629
 630            if self._match(TokenType.IN):
 631                if self._match_text_seq("ACCOUNT"):
 632                    scope_kind = "ACCOUNT"
 633                elif self._match_set(self.DB_CREATABLES):
 634                    scope_kind = self._prev.text.upper()
 635                    if self._curr:
 636                        scope = self._parse_table_parts()
 637                elif self._curr:
 638                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 639                    scope = self._parse_table_parts()
 640
 641            return self.expression(
 642                exp.Show,
 643                **{
 644                    "terse": terse,
 645                    "this": this,
 646                    "history": history,
 647                    "like": like,
 648                    "scope": scope,
 649                    "scope_kind": scope_kind,
 650                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 651                    "limit": self._parse_limit(),
 652                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 653                },
 654            )
 655
 656        def _parse_alter_table_swap(self) -> exp.SwapTable:
 657            self._match_text_seq("WITH")
 658            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 659
 660        def _parse_location(self) -> exp.LocationProperty:
 661            self._match(TokenType.EQ)
 662            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 663
 664        def _parse_location_path(self) -> exp.Var:
 665            parts = [self._advance_any(ignore_reserved=True)]
 666
 667            # We avoid consuming a comma token because external tables like @foo and @bar
 668            # can be joined in a query with a comma separator.
 669            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 670                parts.append(self._advance_any(ignore_reserved=True))
 671
 672            return exp.var("".join(part.text for part in parts if part))
 673
 674    class Tokenizer(tokens.Tokenizer):
 675        STRING_ESCAPES = ["\\", "'"]
 676        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 677        RAW_STRINGS = ["$$"]
 678        COMMENTS = ["--", "//", ("/*", "*/")]
 679
 680        KEYWORDS = {
 681            **tokens.Tokenizer.KEYWORDS,
 682            "BYTEINT": TokenType.INT,
 683            "CHAR VARYING": TokenType.VARCHAR,
 684            "CHARACTER VARYING": TokenType.VARCHAR,
 685            "EXCLUDE": TokenType.EXCEPT,
 686            "ILIKE ANY": TokenType.ILIKE_ANY,
 687            "LIKE ANY": TokenType.LIKE_ANY,
 688            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 689            "MINUS": TokenType.EXCEPT,
 690            "NCHAR VARYING": TokenType.VARCHAR,
 691            "PUT": TokenType.COMMAND,
 692            "REMOVE": TokenType.COMMAND,
 693            "RENAME": TokenType.REPLACE,
 694            "RM": TokenType.COMMAND,
 695            "SAMPLE": TokenType.TABLE_SAMPLE,
 696            "SQL_DOUBLE": TokenType.DOUBLE,
 697            "SQL_VARCHAR": TokenType.VARCHAR,
 698            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 699            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 700            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 701            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 702            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 703            "TOP": TokenType.TOP,
 704        }
 705
 706        SINGLE_TOKENS = {
 707            **tokens.Tokenizer.SINGLE_TOKENS,
 708            "$": TokenType.PARAMETER,
 709        }
 710
 711        VAR_SINGLE_TOKENS = {"$"}
 712
 713        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 714
 715    class Generator(generator.Generator):
 716        PARAMETER_TOKEN = "$"
 717        MATCHED_BY_SOURCE = False
 718        SINGLE_STRING_INTERVAL = True
 719        JOIN_HINTS = False
 720        TABLE_HINTS = False
 721        QUERY_HINTS = False
 722        AGGREGATE_FILTER_SUPPORTED = False
 723        SUPPORTS_TABLE_COPY = False
 724        COLLATE_IS_FUNC = True
 725        LIMIT_ONLY_LITERALS = True
 726        JSON_KEY_VALUE_PAIR_SEP = ","
 727        INSERT_OVERWRITE = " OVERWRITE INTO"
 728        STRUCT_DELIMITER = ("(", ")")
 729
 730        TRANSFORMS = {
 731            **generator.Generator.TRANSFORMS,
 732            exp.ArgMax: rename_func("MAX_BY"),
 733            exp.ArgMin: rename_func("MIN_BY"),
 734            exp.Array: inline_array_sql,
 735            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 736            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 737            exp.AtTimeZone: lambda self, e: self.func(
 738                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 739            ),
 740            exp.BitwiseXor: rename_func("BITXOR"),
 741            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 742            exp.DateAdd: date_delta_sql("DATEADD"),
 743            exp.DateDiff: date_delta_sql("DATEDIFF"),
 744            exp.DateStrToDate: datestrtodate_sql,
 745            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 746            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 747            exp.DayOfYear: rename_func("DAYOFYEAR"),
 748            exp.Explode: rename_func("FLATTEN"),
 749            exp.Extract: rename_func("DATE_PART"),
 750            exp.FromTimeZone: lambda self, e: self.func(
 751                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 752            ),
 753            exp.GenerateSeries: lambda self, e: self.func(
 754                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 755            ),
 756            exp.GroupConcat: rename_func("LISTAGG"),
 757            exp.If: if_sql(name="IFF", false_value="NULL"),
 758            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 759            exp.JSONExtractScalar: lambda self, e: self.func(
 760                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 761            ),
 762            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 763            exp.JSONPathRoot: lambda *_: "",
 764            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 765            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 766            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 767            exp.Max: max_or_greatest,
 768            exp.Min: min_or_least,
 769            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 770            exp.PercentileCont: transforms.preprocess(
 771                [transforms.add_within_group_for_percentiles]
 772            ),
 773            exp.PercentileDisc: transforms.preprocess(
 774                [transforms.add_within_group_for_percentiles]
 775            ),
 776            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 777            exp.RegexpILike: _regexpilike_sql,
 778            exp.Rand: rename_func("RANDOM"),
 779            exp.Select: transforms.preprocess(
 780                [
 781                    transforms.eliminate_distinct_on,
 782                    transforms.explode_to_unnest(),
 783                    transforms.eliminate_semi_and_anti_joins,
 784                ]
 785            ),
 786            exp.SHA: rename_func("SHA1"),
 787            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 788            exp.StartsWith: rename_func("STARTSWITH"),
 789            exp.StrPosition: lambda self, e: self.func(
 790                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 791            ),
 792            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 793            exp.Stuff: rename_func("INSERT"),
 794            exp.TimestampDiff: lambda self, e: self.func(
 795                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 796            ),
 797            exp.TimestampTrunc: timestamptrunc_sql,
 798            exp.TimeStrToTime: timestrtotime_sql,
 799            exp.TimeToStr: lambda self, e: self.func(
 800                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 801            ),
 802            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 803            exp.ToArray: rename_func("TO_ARRAY"),
 804            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 805            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 806            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 807            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 808            exp.TsOrDsToDate: lambda self, e: self.func(
 809                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 810            ),
 811            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 812            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 813            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 814            exp.Xor: rename_func("BOOLXOR"),
 815        }
 816
 817        SUPPORTED_JSON_PATH_PARTS = {
 818            exp.JSONPathKey,
 819            exp.JSONPathRoot,
 820            exp.JSONPathSubscript,
 821        }
 822
 823        TYPE_MAPPING = {
 824            **generator.Generator.TYPE_MAPPING,
 825            exp.DataType.Type.NESTED: "OBJECT",
 826            exp.DataType.Type.STRUCT: "OBJECT",
 827            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 828        }
 829
 830        STAR_MAPPING = {
 831            "except": "EXCLUDE",
 832            "replace": "RENAME",
 833        }
 834
 835        PROPERTIES_LOCATION = {
 836            **generator.Generator.PROPERTIES_LOCATION,
 837            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 838            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 839        }
 840
 841        def datatype_sql(self, expression: exp.DataType) -> str:
 842            expressions = expression.expressions
 843            if (
 844                expressions
 845                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 846                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 847            ):
 848                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 849                return "OBJECT"
 850
 851            return super().datatype_sql(expression)
 852
 853        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 854            return self.func(
 855                "TO_NUMBER",
 856                expression.this,
 857                expression.args.get("format"),
 858                expression.args.get("precision"),
 859                expression.args.get("scale"),
 860            )
 861
 862        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 863            milli = expression.args.get("milli")
 864            if milli is not None:
 865                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 866                expression.set("nano", milli_to_nano)
 867
 868            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 869
 870        def trycast_sql(self, expression: exp.TryCast) -> str:
 871            value = expression.this
 872
 873            if value.type is None:
 874                from sqlglot.optimizer.annotate_types import annotate_types
 875
 876                value = annotate_types(value)
 877
 878            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 879                return super().trycast_sql(expression)
 880
 881            # TRY_CAST only works for string values in Snowflake
 882            return self.cast_sql(expression)
 883
 884        def log_sql(self, expression: exp.Log) -> str:
 885            if not expression.expression:
 886                return self.func("LN", expression.this)
 887
 888            return super().log_sql(expression)
 889
 890        def unnest_sql(self, expression: exp.Unnest) -> str:
 891            unnest_alias = expression.args.get("alias")
 892            offset = expression.args.get("offset")
 893
 894            columns = [
 895                exp.to_identifier("seq"),
 896                exp.to_identifier("key"),
 897                exp.to_identifier("path"),
 898                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 899                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 900                or exp.to_identifier("value"),
 901                exp.to_identifier("this"),
 902            ]
 903
 904            if unnest_alias:
 905                unnest_alias.set("columns", columns)
 906            else:
 907                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 908
 909            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 910            alias = self.sql(unnest_alias)
 911            alias = f" AS {alias}" if alias else ""
 912            return f"{explode}{alias}"
 913
 914        def show_sql(self, expression: exp.Show) -> str:
 915            terse = "TERSE " if expression.args.get("terse") else ""
 916            history = " HISTORY" if expression.args.get("history") else ""
 917            like = self.sql(expression, "like")
 918            like = f" LIKE {like}" if like else ""
 919
 920            scope = self.sql(expression, "scope")
 921            scope = f" {scope}" if scope else ""
 922
 923            scope_kind = self.sql(expression, "scope_kind")
 924            if scope_kind:
 925                scope_kind = f" IN {scope_kind}"
 926
 927            starts_with = self.sql(expression, "starts_with")
 928            if starts_with:
 929                starts_with = f" STARTS WITH {starts_with}"
 930
 931            limit = self.sql(expression, "limit")
 932
 933            from_ = self.sql(expression, "from")
 934            if from_:
 935                from_ = f" FROM {from_}"
 936
 937            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 938
 939        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 940            # Other dialects don't support all of the following parameters, so we need to
 941            # generate default values as necessary to ensure the transpilation is correct
 942            group = expression.args.get("group")
 943            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 944            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 945            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 946
 947            return self.func(
 948                "REGEXP_SUBSTR",
 949                expression.this,
 950                expression.expression,
 951                position,
 952                occurrence,
 953                parameters,
 954                group,
 955            )
 956
 957        def except_op(self, expression: exp.Except) -> str:
 958            if not expression.args.get("distinct"):
 959                self.unsupported("EXCEPT with All is not supported in Snowflake")
 960            return super().except_op(expression)
 961
 962        def intersect_op(self, expression: exp.Intersect) -> str:
 963            if not expression.args.get("distinct"):
 964                self.unsupported("INTERSECT with All is not supported in Snowflake")
 965            return super().intersect_op(expression)
 966
 967        def describe_sql(self, expression: exp.Describe) -> str:
 968            # Default to table if kind is unknown
 969            kind_value = expression.args.get("kind") or "TABLE"
 970            kind = f" {kind_value}" if kind_value else ""
 971            this = f" {self.sql(expression, 'this')}"
 972            expressions = self.expressions(expression, flat=True)
 973            expressions = f" {expressions}" if expressions else ""
 974            return f"DESCRIBE{kind}{this}{expressions}"
 975
 976        def generatedasidentitycolumnconstraint_sql(
 977            self, expression: exp.GeneratedAsIdentityColumnConstraint
 978        ) -> str:
 979            start = expression.args.get("start")
 980            start = f" START {start}" if start else ""
 981            increment = expression.args.get("increment")
 982            increment = f" INCREMENT {increment}" if increment else ""
 983            return f"AUTOINCREMENT{start}{increment}"
 984
 985        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 986            this = self.sql(expression, "this")
 987            return f"SWAP WITH {this}"
 988
 989        def with_properties(self, properties: exp.Properties) -> str:
 990            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
 991
 992        def cluster_sql(self, expression: exp.Cluster) -> str:
 993            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 994
 995        def struct_sql(self, expression: exp.Struct) -> str:
 996            keys = []
 997            values = []
 998
 999            for i, e in enumerate(expression.expressions):
1000                if isinstance(e, exp.PropertyEQ):
1001                    keys.append(
1002                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1003                    )
1004                    values.append(e.expression)
1005                else:
1006                    keys.append(exp.Literal.string(f"_{i}"))
1007                    values.append(e)
1008
1009            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
311    def quote_identifier(self, expression: E, identify: bool = True) -> E:
312        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
313        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
314        if (
315            isinstance(expression, exp.Identifier)
316            and isinstance(expression.parent, exp.Table)
317            and expression.name.lower() == "dual"
318        ):
319            return expression  # type: ignore
320
321        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
323    class Parser(parser.Parser):
324        IDENTIFY_PIVOT_STRINGS = True
325
326        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
327
328        FUNCTIONS = {
329            **parser.Parser.FUNCTIONS,
330            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
331            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
332            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
333                this=seq_get(args, 1), expression=seq_get(args, 0)
334            ),
335            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
336                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
337                start=seq_get(args, 0),
338                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
339                step=seq_get(args, 2),
340            ),
341            "BITXOR": binary_from_function(exp.BitwiseXor),
342            "BIT_XOR": binary_from_function(exp.BitwiseXor),
343            "BOOLXOR": binary_from_function(exp.Xor),
344            "CONVERT_TIMEZONE": _build_convert_timezone,
345            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
346            "DATE_TRUNC": _date_trunc_to_time,
347            "DATEADD": lambda args: exp.DateAdd(
348                this=seq_get(args, 2),
349                expression=seq_get(args, 1),
350                unit=_map_date_part(seq_get(args, 0)),
351            ),
352            "DATEDIFF": _build_datediff,
353            "DIV0": _build_if_from_div0,
354            "FLATTEN": exp.Explode.from_arg_list,
355            "GET_PATH": lambda args, dialect: exp.JSONExtract(
356                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
357            ),
358            "IFF": exp.If.from_arg_list,
359            "LAST_DAY": lambda args: exp.LastDay(
360                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
361            ),
362            "LISTAGG": exp.GroupConcat.from_arg_list,
363            "NULLIFZERO": _build_if_from_nullifzero,
364            "OBJECT_CONSTRUCT": _build_object_construct,
365            "REGEXP_REPLACE": _build_regexp_replace,
366            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
367            "RLIKE": exp.RegexpLike.from_arg_list,
368            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
369            "TIMEDIFF": _build_datediff,
370            "TIMESTAMPDIFF": _build_datediff,
371            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
372            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
373            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
374            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
375            "TO_NUMBER": lambda args: exp.ToNumber(
376                this=seq_get(args, 0),
377                format=seq_get(args, 1),
378                precision=seq_get(args, 2),
379                scale=seq_get(args, 3),
380            ),
381            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
382            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
383            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
384            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
385            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
386            "TO_VARCHAR": exp.ToChar.from_arg_list,
387            "ZEROIFNULL": _build_if_from_zeroifnull,
388        }
389
390        FUNCTION_PARSERS = {
391            **parser.Parser.FUNCTION_PARSERS,
392            "DATE_PART": lambda self: self._parse_date_part(),
393            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
394        }
395        FUNCTION_PARSERS.pop("TRIM")
396
397        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
398
399        RANGE_PARSERS = {
400            **parser.Parser.RANGE_PARSERS,
401            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
402            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
403        }
404
405        ALTER_PARSERS = {
406            **parser.Parser.ALTER_PARSERS,
407            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
408            "UNSET": lambda self: self.expression(
409                exp.Set,
410                tag=self._match_text_seq("TAG"),
411                expressions=self._parse_csv(self._parse_id_var),
412                unset=True,
413            ),
414            "SWAP": lambda self: self._parse_alter_table_swap(),
415        }
416
417        STATEMENT_PARSERS = {
418            **parser.Parser.STATEMENT_PARSERS,
419            TokenType.SHOW: lambda self: self._parse_show(),
420        }
421
422        PROPERTY_PARSERS = {
423            **parser.Parser.PROPERTY_PARSERS,
424            "LOCATION": lambda self: self._parse_location(),
425        }
426
427        SHOW_PARSERS = {
428            "SCHEMAS": _show_parser("SCHEMAS"),
429            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
430            "OBJECTS": _show_parser("OBJECTS"),
431            "TERSE OBJECTS": _show_parser("OBJECTS"),
432            "TABLES": _show_parser("TABLES"),
433            "TERSE TABLES": _show_parser("TABLES"),
434            "VIEWS": _show_parser("VIEWS"),
435            "TERSE VIEWS": _show_parser("VIEWS"),
436            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
437            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
438            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
439            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
440            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
441            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
442            "SEQUENCES": _show_parser("SEQUENCES"),
443            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
444            "COLUMNS": _show_parser("COLUMNS"),
445            "USERS": _show_parser("USERS"),
446            "TERSE USERS": _show_parser("USERS"),
447        }
448
449        STAGED_FILE_SINGLE_TOKENS = {
450            TokenType.DOT,
451            TokenType.MOD,
452            TokenType.SLASH,
453        }
454
455        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
456
457        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
458
459        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
460            this = super()._parse_column_ops(this)
461
462            casts = []
463            json_path = []
464
465            while self._match(TokenType.COLON):
466                path = super()._parse_column_ops(self._parse_field(any_token=True))
467
468                # The cast :: operator has a lower precedence than the extraction operator :, so
469                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
470                while isinstance(path, exp.Cast):
471                    casts.append(path.to)
472                    path = path.this
473
474                if path:
475                    json_path.append(path.sql(dialect="snowflake", copy=False))
476
477            if json_path:
478                this = self.expression(
479                    exp.JSONExtract,
480                    this=this,
481                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
482                )
483
484                while casts:
485                    this = self.expression(exp.Cast, this=this, to=casts.pop())
486
487            return this
488
489        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
490        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
491        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
492            this = self._parse_var() or self._parse_type()
493
494            if not this:
495                return None
496
497            self._match(TokenType.COMMA)
498            expression = self._parse_bitwise()
499            this = _map_date_part(this)
500            name = this.name.upper()
501
502            if name.startswith("EPOCH"):
503                if name == "EPOCH_MILLISECOND":
504                    scale = 10**3
505                elif name == "EPOCH_MICROSECOND":
506                    scale = 10**6
507                elif name == "EPOCH_NANOSECOND":
508                    scale = 10**9
509                else:
510                    scale = None
511
512                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
513                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
514
515                if scale:
516                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
517
518                return to_unix
519
520            return self.expression(exp.Extract, this=this, expression=expression)
521
522        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
523            if is_map:
524                # Keys are strings in Snowflake's objects, see also:
525                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
526                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
527                return self._parse_slice(self._parse_string())
528
529            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
530
531        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
532            lateral = super()._parse_lateral()
533            if not lateral:
534                return lateral
535
536            if isinstance(lateral.this, exp.Explode):
537                table_alias = lateral.args.get("alias")
538                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
539                if table_alias and not table_alias.args.get("columns"):
540                    table_alias.set("columns", columns)
541                elif not table_alias:
542                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
543
544            return lateral
545
546        def _parse_at_before(self, table: exp.Table) -> exp.Table:
547            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
548            index = self._index
549            if self._match_texts(("AT", "BEFORE")):
550                this = self._prev.text.upper()
551                kind = (
552                    self._match(TokenType.L_PAREN)
553                    and self._match_texts(self.HISTORICAL_DATA_KIND)
554                    and self._prev.text.upper()
555                )
556                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
557
558                if expression:
559                    self._match_r_paren()
560                    when = self.expression(
561                        exp.HistoricalData, this=this, kind=kind, expression=expression
562                    )
563                    table.set("when", when)
564                else:
565                    self._retreat(index)
566
567            return table
568
569        def _parse_table_parts(
570            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
571        ) -> exp.Table:
572            # https://docs.snowflake.com/en/user-guide/querying-stage
573            if self._match(TokenType.STRING, advance=False):
574                table = self._parse_string()
575            elif self._match_text_seq("@", advance=False):
576                table = self._parse_location_path()
577            else:
578                table = None
579
580            if table:
581                file_format = None
582                pattern = None
583
584                self._match(TokenType.L_PAREN)
585                while self._curr and not self._match(TokenType.R_PAREN):
586                    if self._match_text_seq("FILE_FORMAT", "=>"):
587                        file_format = self._parse_string() or super()._parse_table_parts(
588                            is_db_reference=is_db_reference
589                        )
590                    elif self._match_text_seq("PATTERN", "=>"):
591                        pattern = self._parse_string()
592                    else:
593                        break
594
595                    self._match(TokenType.COMMA)
596
597                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
598            else:
599                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
600
601            return self._parse_at_before(table)
602
603        def _parse_id_var(
604            self,
605            any_token: bool = True,
606            tokens: t.Optional[t.Collection[TokenType]] = None,
607        ) -> t.Optional[exp.Expression]:
608            if self._match_text_seq("IDENTIFIER", "("):
609                identifier = (
610                    super()._parse_id_var(any_token=any_token, tokens=tokens)
611                    or self._parse_string()
612                )
613                self._match_r_paren()
614                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
615
616            return super()._parse_id_var(any_token=any_token, tokens=tokens)
617
618        def _parse_show_snowflake(self, this: str) -> exp.Show:
619            scope = None
620            scope_kind = None
621
622            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
623            # which is syntactically valid but has no effect on the output
624            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
625
626            history = self._match_text_seq("HISTORY")
627
628            like = self._parse_string() if self._match(TokenType.LIKE) else None
629
630            if self._match(TokenType.IN):
631                if self._match_text_seq("ACCOUNT"):
632                    scope_kind = "ACCOUNT"
633                elif self._match_set(self.DB_CREATABLES):
634                    scope_kind = self._prev.text.upper()
635                    if self._curr:
636                        scope = self._parse_table_parts()
637                elif self._curr:
638                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
639                    scope = self._parse_table_parts()
640
641            return self.expression(
642                exp.Show,
643                **{
644                    "terse": terse,
645                    "this": this,
646                    "history": history,
647                    "like": like,
648                    "scope": scope,
649                    "scope_kind": scope_kind,
650                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
651                    "limit": self._parse_limit(),
652                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
653                },
654            )
655
656        def _parse_alter_table_swap(self) -> exp.SwapTable:
657            self._match_text_seq("WITH")
658            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
659
660        def _parse_location(self) -> exp.LocationProperty:
661            self._match(TokenType.EQ)
662            return self.expression(exp.LocationProperty, this=self._parse_location_path())
663
664        def _parse_location_path(self) -> exp.Var:
665            parts = [self._advance_any(ignore_reserved=True)]
666
667            # We avoid consuming a comma token because external tables like @foo and @bar
668            # can be joined in a query with a comma separator.
669            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
670                parts.append(self._advance_any(ignore_reserved=True))
671
672            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
TABLE_ALIAS_TOKENS = {<TokenType.SMALLINT: 'SMALLINT'>, <TokenType.SOME: 'SOME'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.INET: 'INET'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.END: 'END'>, <TokenType.DIV: 'DIV'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FALSE: 'FALSE'>, <TokenType.INT: 'INT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.IPV6: 'IPV6'>, <TokenType.MAP: 'MAP'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.DATE32: 'DATE32'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.FILTER: 'FILTER'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DELETE: 'DELETE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.USE: 'USE'>, <TokenType.TOP: 'TOP'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.BIT: 'BIT'>, <TokenType.XML: 'XML'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.YEAR: 'YEAR'>, <TokenType.CASE: 'CASE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.NULL: 'NULL'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.UINT: 'UINT'>, <TokenType.TIME: 'TIME'>, <TokenType.KEEP: 'KEEP'>, <TokenType.JSON: 'JSON'>, <TokenType.DATE: 'DATE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.SEMI: 'SEMI'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.ALL: 'ALL'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.IS: 'IS'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.INT256: 'INT256'>, <TokenType.INT128: 'INT128'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TRUE: 'TRUE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.MERGE: 'MERGE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.NAME: 'NAME'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.VAR: 'VAR'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.SET: 'SET'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.TEXT: 'TEXT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.DESC: 'DESC'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.VIEW: 'VIEW'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.ANY: 'ANY'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.KILL: 'KILL'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.FIRST: 'FIRST'>, <TokenType.FINAL: 'FINAL'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.ROWS: 'ROWS'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.SHOW: 'SHOW'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.ASC: 'ASC'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MODEL: 'MODEL'>, <TokenType.ENUM: 'ENUM'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.ROW: 'ROW'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.UUID: 'UUID'>, <TokenType.MONEY: 'MONEY'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ANTI: 'ANTI'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function Parser.<lambda>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _build_convert_timezone>, 'DATEADD': <function Snowflake.Parser.<lambda>>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEDIFF': <function _build_datediff>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'IMPORTED KEYS', 'TABLES', 'SEQUENCES', 'UNIQUE KEYS', 'VIEWS', 'OBJECTS'}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ID_VAR_TOKENS
INTERVAL_VARS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
CONSTRAINT_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
USABLES
CAST_ACTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_UNION
UNION_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
674    class Tokenizer(tokens.Tokenizer):
675        STRING_ESCAPES = ["\\", "'"]
676        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
677        RAW_STRINGS = ["$$"]
678        COMMENTS = ["--", "//", ("/*", "*/")]
679
680        KEYWORDS = {
681            **tokens.Tokenizer.KEYWORDS,
682            "BYTEINT": TokenType.INT,
683            "CHAR VARYING": TokenType.VARCHAR,
684            "CHARACTER VARYING": TokenType.VARCHAR,
685            "EXCLUDE": TokenType.EXCEPT,
686            "ILIKE ANY": TokenType.ILIKE_ANY,
687            "LIKE ANY": TokenType.LIKE_ANY,
688            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
689            "MINUS": TokenType.EXCEPT,
690            "NCHAR VARYING": TokenType.VARCHAR,
691            "PUT": TokenType.COMMAND,
692            "REMOVE": TokenType.COMMAND,
693            "RENAME": TokenType.REPLACE,
694            "RM": TokenType.COMMAND,
695            "SAMPLE": TokenType.TABLE_SAMPLE,
696            "SQL_DOUBLE": TokenType.DOUBLE,
697            "SQL_VARCHAR": TokenType.VARCHAR,
698            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
699            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
700            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
701            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
702            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
703            "TOP": TokenType.TOP,
704        }
705
706        SINGLE_TOKENS = {
707            **tokens.Tokenizer.SINGLE_TOKENS,
708            "$": TokenType.PARAMETER,
709        }
710
711        VAR_SINGLE_TOKENS = {"$"}
712
713        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>, <TokenType.COMMAND: 'COMMAND'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 715    class Generator(generator.Generator):
 716        PARAMETER_TOKEN = "$"
 717        MATCHED_BY_SOURCE = False
 718        SINGLE_STRING_INTERVAL = True
 719        JOIN_HINTS = False
 720        TABLE_HINTS = False
 721        QUERY_HINTS = False
 722        AGGREGATE_FILTER_SUPPORTED = False
 723        SUPPORTS_TABLE_COPY = False
 724        COLLATE_IS_FUNC = True
 725        LIMIT_ONLY_LITERALS = True
 726        JSON_KEY_VALUE_PAIR_SEP = ","
 727        INSERT_OVERWRITE = " OVERWRITE INTO"
 728        STRUCT_DELIMITER = ("(", ")")
 729
 730        TRANSFORMS = {
 731            **generator.Generator.TRANSFORMS,
 732            exp.ArgMax: rename_func("MAX_BY"),
 733            exp.ArgMin: rename_func("MIN_BY"),
 734            exp.Array: inline_array_sql,
 735            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 736            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 737            exp.AtTimeZone: lambda self, e: self.func(
 738                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 739            ),
 740            exp.BitwiseXor: rename_func("BITXOR"),
 741            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 742            exp.DateAdd: date_delta_sql("DATEADD"),
 743            exp.DateDiff: date_delta_sql("DATEDIFF"),
 744            exp.DateStrToDate: datestrtodate_sql,
 745            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 746            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 747            exp.DayOfYear: rename_func("DAYOFYEAR"),
 748            exp.Explode: rename_func("FLATTEN"),
 749            exp.Extract: rename_func("DATE_PART"),
 750            exp.FromTimeZone: lambda self, e: self.func(
 751                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 752            ),
 753            exp.GenerateSeries: lambda self, e: self.func(
 754                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 755            ),
 756            exp.GroupConcat: rename_func("LISTAGG"),
 757            exp.If: if_sql(name="IFF", false_value="NULL"),
 758            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 759            exp.JSONExtractScalar: lambda self, e: self.func(
 760                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 761            ),
 762            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 763            exp.JSONPathRoot: lambda *_: "",
 764            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 765            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 766            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 767            exp.Max: max_or_greatest,
 768            exp.Min: min_or_least,
 769            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 770            exp.PercentileCont: transforms.preprocess(
 771                [transforms.add_within_group_for_percentiles]
 772            ),
 773            exp.PercentileDisc: transforms.preprocess(
 774                [transforms.add_within_group_for_percentiles]
 775            ),
 776            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 777            exp.RegexpILike: _regexpilike_sql,
 778            exp.Rand: rename_func("RANDOM"),
 779            exp.Select: transforms.preprocess(
 780                [
 781                    transforms.eliminate_distinct_on,
 782                    transforms.explode_to_unnest(),
 783                    transforms.eliminate_semi_and_anti_joins,
 784                ]
 785            ),
 786            exp.SHA: rename_func("SHA1"),
 787            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 788            exp.StartsWith: rename_func("STARTSWITH"),
 789            exp.StrPosition: lambda self, e: self.func(
 790                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 791            ),
 792            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 793            exp.Stuff: rename_func("INSERT"),
 794            exp.TimestampDiff: lambda self, e: self.func(
 795                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 796            ),
 797            exp.TimestampTrunc: timestamptrunc_sql,
 798            exp.TimeStrToTime: timestrtotime_sql,
 799            exp.TimeToStr: lambda self, e: self.func(
 800                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 801            ),
 802            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 803            exp.ToArray: rename_func("TO_ARRAY"),
 804            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 805            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 806            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 807            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 808            exp.TsOrDsToDate: lambda self, e: self.func(
 809                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 810            ),
 811            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 812            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 813            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 814            exp.Xor: rename_func("BOOLXOR"),
 815        }
 816
 817        SUPPORTED_JSON_PATH_PARTS = {
 818            exp.JSONPathKey,
 819            exp.JSONPathRoot,
 820            exp.JSONPathSubscript,
 821        }
 822
 823        TYPE_MAPPING = {
 824            **generator.Generator.TYPE_MAPPING,
 825            exp.DataType.Type.NESTED: "OBJECT",
 826            exp.DataType.Type.STRUCT: "OBJECT",
 827            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 828        }
 829
 830        STAR_MAPPING = {
 831            "except": "EXCLUDE",
 832            "replace": "RENAME",
 833        }
 834
 835        PROPERTIES_LOCATION = {
 836            **generator.Generator.PROPERTIES_LOCATION,
 837            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 838            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 839        }
 840
 841        def datatype_sql(self, expression: exp.DataType) -> str:
 842            expressions = expression.expressions
 843            if (
 844                expressions
 845                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 846                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 847            ):
 848                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 849                return "OBJECT"
 850
 851            return super().datatype_sql(expression)
 852
 853        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 854            return self.func(
 855                "TO_NUMBER",
 856                expression.this,
 857                expression.args.get("format"),
 858                expression.args.get("precision"),
 859                expression.args.get("scale"),
 860            )
 861
 862        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 863            milli = expression.args.get("milli")
 864            if milli is not None:
 865                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 866                expression.set("nano", milli_to_nano)
 867
 868            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 869
 870        def trycast_sql(self, expression: exp.TryCast) -> str:
 871            value = expression.this
 872
 873            if value.type is None:
 874                from sqlglot.optimizer.annotate_types import annotate_types
 875
 876                value = annotate_types(value)
 877
 878            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 879                return super().trycast_sql(expression)
 880
 881            # TRY_CAST only works for string values in Snowflake
 882            return self.cast_sql(expression)
 883
 884        def log_sql(self, expression: exp.Log) -> str:
 885            if not expression.expression:
 886                return self.func("LN", expression.this)
 887
 888            return super().log_sql(expression)
 889
 890        def unnest_sql(self, expression: exp.Unnest) -> str:
 891            unnest_alias = expression.args.get("alias")
 892            offset = expression.args.get("offset")
 893
 894            columns = [
 895                exp.to_identifier("seq"),
 896                exp.to_identifier("key"),
 897                exp.to_identifier("path"),
 898                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 899                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 900                or exp.to_identifier("value"),
 901                exp.to_identifier("this"),
 902            ]
 903
 904            if unnest_alias:
 905                unnest_alias.set("columns", columns)
 906            else:
 907                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 908
 909            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 910            alias = self.sql(unnest_alias)
 911            alias = f" AS {alias}" if alias else ""
 912            return f"{explode}{alias}"
 913
 914        def show_sql(self, expression: exp.Show) -> str:
 915            terse = "TERSE " if expression.args.get("terse") else ""
 916            history = " HISTORY" if expression.args.get("history") else ""
 917            like = self.sql(expression, "like")
 918            like = f" LIKE {like}" if like else ""
 919
 920            scope = self.sql(expression, "scope")
 921            scope = f" {scope}" if scope else ""
 922
 923            scope_kind = self.sql(expression, "scope_kind")
 924            if scope_kind:
 925                scope_kind = f" IN {scope_kind}"
 926
 927            starts_with = self.sql(expression, "starts_with")
 928            if starts_with:
 929                starts_with = f" STARTS WITH {starts_with}"
 930
 931            limit = self.sql(expression, "limit")
 932
 933            from_ = self.sql(expression, "from")
 934            if from_:
 935                from_ = f" FROM {from_}"
 936
 937            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 938
 939        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 940            # Other dialects don't support all of the following parameters, so we need to
 941            # generate default values as necessary to ensure the transpilation is correct
 942            group = expression.args.get("group")
 943            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 944            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 945            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 946
 947            return self.func(
 948                "REGEXP_SUBSTR",
 949                expression.this,
 950                expression.expression,
 951                position,
 952                occurrence,
 953                parameters,
 954                group,
 955            )
 956
 957        def except_op(self, expression: exp.Except) -> str:
 958            if not expression.args.get("distinct"):
 959                self.unsupported("EXCEPT with All is not supported in Snowflake")
 960            return super().except_op(expression)
 961
 962        def intersect_op(self, expression: exp.Intersect) -> str:
 963            if not expression.args.get("distinct"):
 964                self.unsupported("INTERSECT with All is not supported in Snowflake")
 965            return super().intersect_op(expression)
 966
 967        def describe_sql(self, expression: exp.Describe) -> str:
 968            # Default to table if kind is unknown
 969            kind_value = expression.args.get("kind") or "TABLE"
 970            kind = f" {kind_value}" if kind_value else ""
 971            this = f" {self.sql(expression, 'this')}"
 972            expressions = self.expressions(expression, flat=True)
 973            expressions = f" {expressions}" if expressions else ""
 974            return f"DESCRIBE{kind}{this}{expressions}"
 975
 976        def generatedasidentitycolumnconstraint_sql(
 977            self, expression: exp.GeneratedAsIdentityColumnConstraint
 978        ) -> str:
 979            start = expression.args.get("start")
 980            start = f" START {start}" if start else ""
 981            increment = expression.args.get("increment")
 982            increment = f" INCREMENT {increment}" if increment else ""
 983            return f"AUTOINCREMENT{start}{increment}"
 984
 985        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 986            this = self.sql(expression, "this")
 987            return f"SWAP WITH {this}"
 988
 989        def with_properties(self, properties: exp.Properties) -> str:
 990            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
 991
 992        def cluster_sql(self, expression: exp.Cluster) -> str:
 993            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 994
 995        def struct_sql(self, expression: exp.Struct) -> str:
 996            keys = []
 997            values = []
 998
 999            for i, e in enumerate(expression.expressions):
1000                if isinstance(e, exp.PropertyEQ):
1001                    keys.append(
1002                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1003                    )
1004                    values.append(e.expression)
1005                else:
1006                    keys.append(exp.Literal.string(f"_{i}"))
1007                    values.append(e)
1008
1009            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. Default: 2.
  • indent: The indentation size in a formatted string. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
841        def datatype_sql(self, expression: exp.DataType) -> str:
842            expressions = expression.expressions
843            if (
844                expressions
845                and expression.is_type(*exp.DataType.STRUCT_TYPES)
846                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
847            ):
848                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
849                return "OBJECT"
850
851            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
853        def tonumber_sql(self, expression: exp.ToNumber) -> str:
854            return self.func(
855                "TO_NUMBER",
856                expression.this,
857                expression.args.get("format"),
858                expression.args.get("precision"),
859                expression.args.get("scale"),
860            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
862        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
863            milli = expression.args.get("milli")
864            if milli is not None:
865                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
866                expression.set("nano", milli_to_nano)
867
868            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
870        def trycast_sql(self, expression: exp.TryCast) -> str:
871            value = expression.this
872
873            if value.type is None:
874                from sqlglot.optimizer.annotate_types import annotate_types
875
876                value = annotate_types(value)
877
878            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
879                return super().trycast_sql(expression)
880
881            # TRY_CAST only works for string values in Snowflake
882            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
884        def log_sql(self, expression: exp.Log) -> str:
885            if not expression.expression:
886                return self.func("LN", expression.this)
887
888            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
890        def unnest_sql(self, expression: exp.Unnest) -> str:
891            unnest_alias = expression.args.get("alias")
892            offset = expression.args.get("offset")
893
894            columns = [
895                exp.to_identifier("seq"),
896                exp.to_identifier("key"),
897                exp.to_identifier("path"),
898                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
899                seq_get(unnest_alias.columns if unnest_alias else [], 0)
900                or exp.to_identifier("value"),
901                exp.to_identifier("this"),
902            ]
903
904            if unnest_alias:
905                unnest_alias.set("columns", columns)
906            else:
907                unnest_alias = exp.TableAlias(this="_u", columns=columns)
908
909            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
910            alias = self.sql(unnest_alias)
911            alias = f" AS {alias}" if alias else ""
912            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
914        def show_sql(self, expression: exp.Show) -> str:
915            terse = "TERSE " if expression.args.get("terse") else ""
916            history = " HISTORY" if expression.args.get("history") else ""
917            like = self.sql(expression, "like")
918            like = f" LIKE {like}" if like else ""
919
920            scope = self.sql(expression, "scope")
921            scope = f" {scope}" if scope else ""
922
923            scope_kind = self.sql(expression, "scope_kind")
924            if scope_kind:
925                scope_kind = f" IN {scope_kind}"
926
927            starts_with = self.sql(expression, "starts_with")
928            if starts_with:
929                starts_with = f" STARTS WITH {starts_with}"
930
931            limit = self.sql(expression, "limit")
932
933            from_ = self.sql(expression, "from")
934            if from_:
935                from_ = f" FROM {from_}"
936
937            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
939        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
940            # Other dialects don't support all of the following parameters, so we need to
941            # generate default values as necessary to ensure the transpilation is correct
942            group = expression.args.get("group")
943            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
944            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
945            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
946
947            return self.func(
948                "REGEXP_SUBSTR",
949                expression.this,
950                expression.expression,
951                position,
952                occurrence,
953                parameters,
954                group,
955            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
957        def except_op(self, expression: exp.Except) -> str:
958            if not expression.args.get("distinct"):
959                self.unsupported("EXCEPT with All is not supported in Snowflake")
960            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
962        def intersect_op(self, expression: exp.Intersect) -> str:
963            if not expression.args.get("distinct"):
964                self.unsupported("INTERSECT with All is not supported in Snowflake")
965            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
967        def describe_sql(self, expression: exp.Describe) -> str:
968            # Default to table if kind is unknown
969            kind_value = expression.args.get("kind") or "TABLE"
970            kind = f" {kind_value}" if kind_value else ""
971            this = f" {self.sql(expression, 'this')}"
972            expressions = self.expressions(expression, flat=True)
973            expressions = f" {expressions}" if expressions else ""
974            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
976        def generatedasidentitycolumnconstraint_sql(
977            self, expression: exp.GeneratedAsIdentityColumnConstraint
978        ) -> str:
979            start = expression.args.get("start")
980            start = f" START {start}" if start else ""
981            increment = expression.args.get("increment")
982            increment = f" INCREMENT {increment}" if increment else ""
983            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
985        def swaptable_sql(self, expression: exp.SwapTable) -> str:
986            this = self.sql(expression, "this")
987            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
989        def with_properties(self, properties: exp.Properties) -> str:
990            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
992        def cluster_sql(self, expression: exp.Cluster) -> str:
993            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
 995        def struct_sql(self, expression: exp.Struct) -> str:
 996            keys = []
 997            values = []
 998
 999            for i, e in enumerate(expression.expressions):
1000                if isinstance(e, exp.PropertyEQ):
1001                    keys.append(
1002                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1003                    )
1004                    values.append(e.expression)
1005                else:
1006                    keys.append(exp.Literal.string(f"_{i}"))
1007                    values.append(e)
1008
1009            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
SELECT_KINDS: Tuple[str, ...] = ()
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
set_operations
union_sql
union_op
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
currenttimestamp_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
arrayany_sql
generateseries_sql
partitionrange_sql
truncatetable_sql
convert_sql