Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    date_delta_sql,
  11    date_trunc_to_time,
  12    datestrtodate_sql,
  13    build_formatted_time,
  14    if_sql,
  15    inline_array_sql,
  16    max_or_greatest,
  17    min_or_least,
  18    rename_func,
  19    timestamptrunc_sql,
  20    timestrtotime_sql,
  21    var_map_sql,
  22)
  23from sqlglot.helper import flatten, is_float, is_int, seq_get
  24from sqlglot.tokens import TokenType
  25
  26if t.TYPE_CHECKING:
  27    from sqlglot._typing import E
  28
  29
  30# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  31def _build_datetime(
  32    name: str, kind: exp.DataType.Type, safe: bool = False
  33) -> t.Callable[[t.List], exp.Func]:
  34    def _builder(args: t.List) -> exp.Func:
  35        value = seq_get(args, 0)
  36
  37        if isinstance(value, exp.Literal):
  38            int_value = is_int(value.this)
  39
  40            # Converts calls like `TO_TIME('01:02:03')` into casts
  41            if len(args) == 1 and value.is_string and not int_value:
  42                return exp.cast(value, kind)
  43
  44            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  45            # cases so we can transpile them, since they're relatively common
  46            if kind == exp.DataType.Type.TIMESTAMP:
  47                if int_value:
  48                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  49                if not is_float(value.this):
  50                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  51
  52        if len(args) == 2 and kind == exp.DataType.Type.DATE:
  53            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  54            formatted_exp.set("safe", safe)
  55            return formatted_exp
  56
  57        return exp.Anonymous(this=name, expressions=args)
  58
  59    return _builder
  60
  61
  62def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  63    expression = parser.build_var_map(args)
  64
  65    if isinstance(expression, exp.StarMap):
  66        return expression
  67
  68    return exp.Struct(
  69        expressions=[
  70            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  71        ]
  72    )
  73
  74
  75def _build_datediff(args: t.List) -> exp.DateDiff:
  76    return exp.DateDiff(
  77        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
  78    )
  79
  80
  81# https://docs.snowflake.com/en/sql-reference/functions/div0
  82def _build_if_from_div0(args: t.List) -> exp.If:
  83    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
  84    true = exp.Literal.number(0)
  85    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
  86    return exp.If(this=cond, true=true, false=false)
  87
  88
  89# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
  90def _build_if_from_zeroifnull(args: t.List) -> exp.If:
  91    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
  92    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
  93
  94
  95# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
  96def _build_if_from_nullifzero(args: t.List) -> exp.If:
  97    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
  98    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
  99
 100
 101def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 102    flag = expression.text("flag")
 103
 104    if "i" not in flag:
 105        flag += "i"
 106
 107    return self.func(
 108        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 109    )
 110
 111
 112def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
 113    if len(args) == 3:
 114        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
 115    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
 116
 117
 118def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 119    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 120
 121    if not regexp_replace.args.get("replacement"):
 122        regexp_replace.set("replacement", exp.Literal.string(""))
 123
 124    return regexp_replace
 125
 126
 127def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 128    def _parse(self: Snowflake.Parser) -> exp.Show:
 129        return self._parse_show_snowflake(*args, **kwargs)
 130
 131    return _parse
 132
 133
 134DATE_PART_MAPPING = {
 135    "Y": "YEAR",
 136    "YY": "YEAR",
 137    "YYY": "YEAR",
 138    "YYYY": "YEAR",
 139    "YR": "YEAR",
 140    "YEARS": "YEAR",
 141    "YRS": "YEAR",
 142    "MM": "MONTH",
 143    "MON": "MONTH",
 144    "MONS": "MONTH",
 145    "MONTHS": "MONTH",
 146    "D": "DAY",
 147    "DD": "DAY",
 148    "DAYS": "DAY",
 149    "DAYOFMONTH": "DAY",
 150    "WEEKDAY": "DAYOFWEEK",
 151    "DOW": "DAYOFWEEK",
 152    "DW": "DAYOFWEEK",
 153    "WEEKDAY_ISO": "DAYOFWEEKISO",
 154    "DOW_ISO": "DAYOFWEEKISO",
 155    "DW_ISO": "DAYOFWEEKISO",
 156    "YEARDAY": "DAYOFYEAR",
 157    "DOY": "DAYOFYEAR",
 158    "DY": "DAYOFYEAR",
 159    "W": "WEEK",
 160    "WK": "WEEK",
 161    "WEEKOFYEAR": "WEEK",
 162    "WOY": "WEEK",
 163    "WY": "WEEK",
 164    "WEEK_ISO": "WEEKISO",
 165    "WEEKOFYEARISO": "WEEKISO",
 166    "WEEKOFYEAR_ISO": "WEEKISO",
 167    "Q": "QUARTER",
 168    "QTR": "QUARTER",
 169    "QTRS": "QUARTER",
 170    "QUARTERS": "QUARTER",
 171    "H": "HOUR",
 172    "HH": "HOUR",
 173    "HR": "HOUR",
 174    "HOURS": "HOUR",
 175    "HRS": "HOUR",
 176    "M": "MINUTE",
 177    "MI": "MINUTE",
 178    "MIN": "MINUTE",
 179    "MINUTES": "MINUTE",
 180    "MINS": "MINUTE",
 181    "S": "SECOND",
 182    "SEC": "SECOND",
 183    "SECONDS": "SECOND",
 184    "SECS": "SECOND",
 185    "MS": "MILLISECOND",
 186    "MSEC": "MILLISECOND",
 187    "MILLISECONDS": "MILLISECOND",
 188    "US": "MICROSECOND",
 189    "USEC": "MICROSECOND",
 190    "MICROSECONDS": "MICROSECOND",
 191    "NS": "NANOSECOND",
 192    "NSEC": "NANOSECOND",
 193    "NANOSEC": "NANOSECOND",
 194    "NSECOND": "NANOSECOND",
 195    "NSECONDS": "NANOSECOND",
 196    "NANOSECS": "NANOSECOND",
 197    "EPOCH": "EPOCH_SECOND",
 198    "EPOCH_SECONDS": "EPOCH_SECOND",
 199    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
 200    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
 201    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
 202    "TZH": "TIMEZONE_HOUR",
 203    "TZM": "TIMEZONE_MINUTE",
 204}
 205
 206
 207@t.overload
 208def _map_date_part(part: exp.Expression) -> exp.Var:
 209    pass
 210
 211
 212@t.overload
 213def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 214    pass
 215
 216
 217def _map_date_part(part):
 218    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
 219    return exp.var(mapped) if mapped else part
 220
 221
 222def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 223    trunc = date_trunc_to_time(args)
 224    trunc.set("unit", _map_date_part(trunc.args["unit"]))
 225    return trunc
 226
 227
 228def _build_timestamp_from_parts(args: t.List) -> exp.Func:
 229    if len(args) == 2:
 230        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
 231        # so we parse this into Anonymous for now instead of introducing complexity
 232        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
 233
 234    return exp.TimestampFromParts.from_arg_list(args)
 235
 236
 237def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 238    """
 239    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 240    so we need to unqualify them.
 241
 242    Example:
 243        >>> from sqlglot import parse_one
 244        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 245        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 246        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 247    """
 248    if isinstance(expression, exp.Pivot) and expression.unpivot:
 249        expression = transforms.unqualify_columns(expression)
 250
 251    return expression
 252
 253
 254def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 255    assert isinstance(expression, exp.Create)
 256
 257    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 258        if expression.this in exp.DataType.NESTED_TYPES:
 259            expression.set("expressions", None)
 260        return expression
 261
 262    props = expression.args.get("properties")
 263    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 264        for schema_expression in expression.this.expressions:
 265            if isinstance(schema_expression, exp.ColumnDef):
 266                column_type = schema_expression.kind
 267                if isinstance(column_type, exp.DataType):
 268                    column_type.transform(_flatten_structured_type, copy=False)
 269
 270    return expression
 271
 272
 273class Snowflake(Dialect):
 274    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 275    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 276    NULL_ORDERING = "nulls_are_large"
 277    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 278    SUPPORTS_USER_DEFINED_TYPES = False
 279    SUPPORTS_SEMI_ANTI_JOIN = False
 280    PREFER_CTE_ALIAS_COLUMN = True
 281    TABLESAMPLE_SIZE_IS_PERCENT = True
 282
 283    TIME_MAPPING = {
 284        "YYYY": "%Y",
 285        "yyyy": "%Y",
 286        "YY": "%y",
 287        "yy": "%y",
 288        "MMMM": "%B",
 289        "mmmm": "%B",
 290        "MON": "%b",
 291        "mon": "%b",
 292        "MM": "%m",
 293        "mm": "%m",
 294        "DD": "%d",
 295        "dd": "%-d",
 296        "DY": "%a",
 297        "dy": "%w",
 298        "HH24": "%H",
 299        "hh24": "%H",
 300        "HH12": "%I",
 301        "hh12": "%I",
 302        "MI": "%M",
 303        "mi": "%M",
 304        "SS": "%S",
 305        "ss": "%S",
 306        "FF": "%f",
 307        "ff": "%f",
 308        "FF6": "%f",
 309        "ff6": "%f",
 310    }
 311
 312    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 313        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 314        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 315        if (
 316            isinstance(expression, exp.Identifier)
 317            and isinstance(expression.parent, exp.Table)
 318            and expression.name.lower() == "dual"
 319        ):
 320            return expression  # type: ignore
 321
 322        return super().quote_identifier(expression, identify=identify)
 323
 324    class Parser(parser.Parser):
 325        IDENTIFY_PIVOT_STRINGS = True
 326
 327        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 328
 329        FUNCTIONS = {
 330            **parser.Parser.FUNCTIONS,
 331            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 332            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 333            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 334                this=seq_get(args, 1), expression=seq_get(args, 0)
 335            ),
 336            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 337                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 338                start=seq_get(args, 0),
 339                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 340                step=seq_get(args, 2),
 341            ),
 342            "BITXOR": binary_from_function(exp.BitwiseXor),
 343            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 344            "BOOLXOR": binary_from_function(exp.Xor),
 345            "CONVERT_TIMEZONE": _build_convert_timezone,
 346            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 347            "DATE_TRUNC": _date_trunc_to_time,
 348            "DATEADD": lambda args: exp.DateAdd(
 349                this=seq_get(args, 2),
 350                expression=seq_get(args, 1),
 351                unit=_map_date_part(seq_get(args, 0)),
 352            ),
 353            "DATEDIFF": _build_datediff,
 354            "DIV0": _build_if_from_div0,
 355            "FLATTEN": exp.Explode.from_arg_list,
 356            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 357                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 358            ),
 359            "IFF": exp.If.from_arg_list,
 360            "LAST_DAY": lambda args: exp.LastDay(
 361                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 362            ),
 363            "LISTAGG": exp.GroupConcat.from_arg_list,
 364            "NULLIFZERO": _build_if_from_nullifzero,
 365            "OBJECT_CONSTRUCT": _build_object_construct,
 366            "REGEXP_REPLACE": _build_regexp_replace,
 367            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 368            "RLIKE": exp.RegexpLike.from_arg_list,
 369            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 370            "TIMEDIFF": _build_datediff,
 371            "TIMESTAMPDIFF": _build_datediff,
 372            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 373            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 374            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 375            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 376            "TO_NUMBER": lambda args: exp.ToNumber(
 377                this=seq_get(args, 0),
 378                format=seq_get(args, 1),
 379                precision=seq_get(args, 2),
 380                scale=seq_get(args, 3),
 381            ),
 382            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 383            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 384            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 385            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 386            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 387            "TO_VARCHAR": exp.ToChar.from_arg_list,
 388            "ZEROIFNULL": _build_if_from_zeroifnull,
 389        }
 390
 391        FUNCTION_PARSERS = {
 392            **parser.Parser.FUNCTION_PARSERS,
 393            "DATE_PART": lambda self: self._parse_date_part(),
 394            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 395        }
 396        FUNCTION_PARSERS.pop("TRIM")
 397
 398        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 399
 400        RANGE_PARSERS = {
 401            **parser.Parser.RANGE_PARSERS,
 402            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 403            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 404        }
 405
 406        ALTER_PARSERS = {
 407            **parser.Parser.ALTER_PARSERS,
 408            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 409            "UNSET": lambda self: self.expression(
 410                exp.Set,
 411                tag=self._match_text_seq("TAG"),
 412                expressions=self._parse_csv(self._parse_id_var),
 413                unset=True,
 414            ),
 415            "SWAP": lambda self: self._parse_alter_table_swap(),
 416        }
 417
 418        STATEMENT_PARSERS = {
 419            **parser.Parser.STATEMENT_PARSERS,
 420            TokenType.SHOW: lambda self: self._parse_show(),
 421        }
 422
 423        PROPERTY_PARSERS = {
 424            **parser.Parser.PROPERTY_PARSERS,
 425            "LOCATION": lambda self: self._parse_location(),
 426        }
 427
 428        SHOW_PARSERS = {
 429            "SCHEMAS": _show_parser("SCHEMAS"),
 430            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 431            "OBJECTS": _show_parser("OBJECTS"),
 432            "TERSE OBJECTS": _show_parser("OBJECTS"),
 433            "TABLES": _show_parser("TABLES"),
 434            "TERSE TABLES": _show_parser("TABLES"),
 435            "VIEWS": _show_parser("VIEWS"),
 436            "TERSE VIEWS": _show_parser("VIEWS"),
 437            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 438            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 439            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 440            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 441            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 442            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 443            "SEQUENCES": _show_parser("SEQUENCES"),
 444            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 445            "COLUMNS": _show_parser("COLUMNS"),
 446            "USERS": _show_parser("USERS"),
 447            "TERSE USERS": _show_parser("USERS"),
 448        }
 449
 450        STAGED_FILE_SINGLE_TOKENS = {
 451            TokenType.DOT,
 452            TokenType.MOD,
 453            TokenType.SLASH,
 454        }
 455
 456        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 457
 458        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 459
 460        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 461            this = super()._parse_column_ops(this)
 462
 463            casts = []
 464            json_path = []
 465
 466            while self._match(TokenType.COLON):
 467                path = super()._parse_column_ops(self._parse_field(any_token=True))
 468
 469                # The cast :: operator has a lower precedence than the extraction operator :, so
 470                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 471                while isinstance(path, exp.Cast):
 472                    casts.append(path.to)
 473                    path = path.this
 474
 475                if path:
 476                    json_path.append(path.sql(dialect="snowflake", copy=False))
 477
 478            if json_path:
 479                this = self.expression(
 480                    exp.JSONExtract,
 481                    this=this,
 482                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 483                )
 484
 485                while casts:
 486                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 487
 488            return this
 489
 490        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 491        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 492        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 493            this = self._parse_var() or self._parse_type()
 494
 495            if not this:
 496                return None
 497
 498            self._match(TokenType.COMMA)
 499            expression = self._parse_bitwise()
 500            this = _map_date_part(this)
 501            name = this.name.upper()
 502
 503            if name.startswith("EPOCH"):
 504                if name == "EPOCH_MILLISECOND":
 505                    scale = 10**3
 506                elif name == "EPOCH_MICROSECOND":
 507                    scale = 10**6
 508                elif name == "EPOCH_NANOSECOND":
 509                    scale = 10**9
 510                else:
 511                    scale = None
 512
 513                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 514                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 515
 516                if scale:
 517                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 518
 519                return to_unix
 520
 521            return self.expression(exp.Extract, this=this, expression=expression)
 522
 523        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 524            if is_map:
 525                # Keys are strings in Snowflake's objects, see also:
 526                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 527                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 528                return self._parse_slice(self._parse_string())
 529
 530            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 531
 532        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 533            lateral = super()._parse_lateral()
 534            if not lateral:
 535                return lateral
 536
 537            if isinstance(lateral.this, exp.Explode):
 538                table_alias = lateral.args.get("alias")
 539                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 540                if table_alias and not table_alias.args.get("columns"):
 541                    table_alias.set("columns", columns)
 542                elif not table_alias:
 543                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 544
 545            return lateral
 546
 547        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 548            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 549            index = self._index
 550            if self._match_texts(("AT", "BEFORE")):
 551                this = self._prev.text.upper()
 552                kind = (
 553                    self._match(TokenType.L_PAREN)
 554                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 555                    and self._prev.text.upper()
 556                )
 557                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 558
 559                if expression:
 560                    self._match_r_paren()
 561                    when = self.expression(
 562                        exp.HistoricalData, this=this, kind=kind, expression=expression
 563                    )
 564                    table.set("when", when)
 565                else:
 566                    self._retreat(index)
 567
 568            return table
 569
 570        def _parse_table_parts(
 571            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 572        ) -> exp.Table:
 573            # https://docs.snowflake.com/en/user-guide/querying-stage
 574            if self._match(TokenType.STRING, advance=False):
 575                table = self._parse_string()
 576            elif self._match_text_seq("@", advance=False):
 577                table = self._parse_location_path()
 578            else:
 579                table = None
 580
 581            if table:
 582                file_format = None
 583                pattern = None
 584
 585                self._match(TokenType.L_PAREN)
 586                while self._curr and not self._match(TokenType.R_PAREN):
 587                    if self._match_text_seq("FILE_FORMAT", "=>"):
 588                        file_format = self._parse_string() or super()._parse_table_parts(
 589                            is_db_reference=is_db_reference
 590                        )
 591                    elif self._match_text_seq("PATTERN", "=>"):
 592                        pattern = self._parse_string()
 593                    else:
 594                        break
 595
 596                    self._match(TokenType.COMMA)
 597
 598                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 599            else:
 600                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 601
 602            return self._parse_at_before(table)
 603
 604        def _parse_id_var(
 605            self,
 606            any_token: bool = True,
 607            tokens: t.Optional[t.Collection[TokenType]] = None,
 608        ) -> t.Optional[exp.Expression]:
 609            if self._match_text_seq("IDENTIFIER", "("):
 610                identifier = (
 611                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 612                    or self._parse_string()
 613                )
 614                self._match_r_paren()
 615                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 616
 617            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 618
 619        def _parse_show_snowflake(self, this: str) -> exp.Show:
 620            scope = None
 621            scope_kind = None
 622
 623            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 624            # which is syntactically valid but has no effect on the output
 625            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 626
 627            history = self._match_text_seq("HISTORY")
 628
 629            like = self._parse_string() if self._match(TokenType.LIKE) else None
 630
 631            if self._match(TokenType.IN):
 632                if self._match_text_seq("ACCOUNT"):
 633                    scope_kind = "ACCOUNT"
 634                elif self._match_set(self.DB_CREATABLES):
 635                    scope_kind = self._prev.text.upper()
 636                    if self._curr:
 637                        scope = self._parse_table_parts()
 638                elif self._curr:
 639                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 640                    scope = self._parse_table_parts()
 641
 642            return self.expression(
 643                exp.Show,
 644                **{
 645                    "terse": terse,
 646                    "this": this,
 647                    "history": history,
 648                    "like": like,
 649                    "scope": scope,
 650                    "scope_kind": scope_kind,
 651                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 652                    "limit": self._parse_limit(),
 653                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 654                },
 655            )
 656
 657        def _parse_alter_table_swap(self) -> exp.SwapTable:
 658            self._match_text_seq("WITH")
 659            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 660
 661        def _parse_location(self) -> exp.LocationProperty:
 662            self._match(TokenType.EQ)
 663            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 664
 665        def _parse_location_path(self) -> exp.Var:
 666            parts = [self._advance_any(ignore_reserved=True)]
 667
 668            # We avoid consuming a comma token because external tables like @foo and @bar
 669            # can be joined in a query with a comma separator.
 670            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 671                parts.append(self._advance_any(ignore_reserved=True))
 672
 673            return exp.var("".join(part.text for part in parts if part))
 674
 675    class Tokenizer(tokens.Tokenizer):
 676        STRING_ESCAPES = ["\\", "'"]
 677        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 678        RAW_STRINGS = ["$$"]
 679        COMMENTS = ["--", "//", ("/*", "*/")]
 680
 681        KEYWORDS = {
 682            **tokens.Tokenizer.KEYWORDS,
 683            "BYTEINT": TokenType.INT,
 684            "CHAR VARYING": TokenType.VARCHAR,
 685            "CHARACTER VARYING": TokenType.VARCHAR,
 686            "EXCLUDE": TokenType.EXCEPT,
 687            "ILIKE ANY": TokenType.ILIKE_ANY,
 688            "LIKE ANY": TokenType.LIKE_ANY,
 689            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 690            "MINUS": TokenType.EXCEPT,
 691            "NCHAR VARYING": TokenType.VARCHAR,
 692            "PUT": TokenType.COMMAND,
 693            "REMOVE": TokenType.COMMAND,
 694            "RENAME": TokenType.REPLACE,
 695            "RM": TokenType.COMMAND,
 696            "SAMPLE": TokenType.TABLE_SAMPLE,
 697            "SQL_DOUBLE": TokenType.DOUBLE,
 698            "SQL_VARCHAR": TokenType.VARCHAR,
 699            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 700            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 701            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 702            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 703            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 704            "TOP": TokenType.TOP,
 705        }
 706
 707        SINGLE_TOKENS = {
 708            **tokens.Tokenizer.SINGLE_TOKENS,
 709            "$": TokenType.PARAMETER,
 710        }
 711
 712        VAR_SINGLE_TOKENS = {"$"}
 713
 714        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 715
 716    class Generator(generator.Generator):
 717        PARAMETER_TOKEN = "$"
 718        MATCHED_BY_SOURCE = False
 719        SINGLE_STRING_INTERVAL = True
 720        JOIN_HINTS = False
 721        TABLE_HINTS = False
 722        QUERY_HINTS = False
 723        AGGREGATE_FILTER_SUPPORTED = False
 724        SUPPORTS_TABLE_COPY = False
 725        COLLATE_IS_FUNC = True
 726        LIMIT_ONLY_LITERALS = True
 727        JSON_KEY_VALUE_PAIR_SEP = ","
 728        INSERT_OVERWRITE = " OVERWRITE INTO"
 729        STRUCT_DELIMITER = ("(", ")")
 730
 731        TRANSFORMS = {
 732            **generator.Generator.TRANSFORMS,
 733            exp.ArgMax: rename_func("MAX_BY"),
 734            exp.ArgMin: rename_func("MIN_BY"),
 735            exp.Array: inline_array_sql,
 736            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 737            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 738            exp.AtTimeZone: lambda self, e: self.func(
 739                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 740            ),
 741            exp.BitwiseXor: rename_func("BITXOR"),
 742            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 743            exp.DateAdd: date_delta_sql("DATEADD"),
 744            exp.DateDiff: date_delta_sql("DATEDIFF"),
 745            exp.DateStrToDate: datestrtodate_sql,
 746            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 747            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 748            exp.DayOfYear: rename_func("DAYOFYEAR"),
 749            exp.Explode: rename_func("FLATTEN"),
 750            exp.Extract: rename_func("DATE_PART"),
 751            exp.FromTimeZone: lambda self, e: self.func(
 752                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 753            ),
 754            exp.GenerateSeries: lambda self, e: self.func(
 755                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 756            ),
 757            exp.GroupConcat: rename_func("LISTAGG"),
 758            exp.If: if_sql(name="IFF", false_value="NULL"),
 759            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 760            exp.JSONExtractScalar: lambda self, e: self.func(
 761                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 762            ),
 763            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 764            exp.JSONPathRoot: lambda *_: "",
 765            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 766            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 767            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 768            exp.Max: max_or_greatest,
 769            exp.Min: min_or_least,
 770            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 771            exp.PercentileCont: transforms.preprocess(
 772                [transforms.add_within_group_for_percentiles]
 773            ),
 774            exp.PercentileDisc: transforms.preprocess(
 775                [transforms.add_within_group_for_percentiles]
 776            ),
 777            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 778            exp.RegexpILike: _regexpilike_sql,
 779            exp.Rand: rename_func("RANDOM"),
 780            exp.Select: transforms.preprocess(
 781                [
 782                    transforms.eliminate_distinct_on,
 783                    transforms.explode_to_unnest(),
 784                    transforms.eliminate_semi_and_anti_joins,
 785                ]
 786            ),
 787            exp.SHA: rename_func("SHA1"),
 788            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 789            exp.StartsWith: rename_func("STARTSWITH"),
 790            exp.StrPosition: lambda self, e: self.func(
 791                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 792            ),
 793            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 794            exp.Stuff: rename_func("INSERT"),
 795            exp.TimestampDiff: lambda self, e: self.func(
 796                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 797            ),
 798            exp.TimestampTrunc: timestamptrunc_sql,
 799            exp.TimeStrToTime: timestrtotime_sql,
 800            exp.TimeToStr: lambda self, e: self.func(
 801                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 802            ),
 803            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 804            exp.ToArray: rename_func("TO_ARRAY"),
 805            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 806            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 807            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 808            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 809            exp.TsOrDsToDate: lambda self, e: self.func(
 810                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 811            ),
 812            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 813            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 814            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 815            exp.Xor: rename_func("BOOLXOR"),
 816        }
 817
 818        SUPPORTED_JSON_PATH_PARTS = {
 819            exp.JSONPathKey,
 820            exp.JSONPathRoot,
 821            exp.JSONPathSubscript,
 822        }
 823
 824        TYPE_MAPPING = {
 825            **generator.Generator.TYPE_MAPPING,
 826            exp.DataType.Type.NESTED: "OBJECT",
 827            exp.DataType.Type.STRUCT: "OBJECT",
 828            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 829        }
 830
 831        STAR_MAPPING = {
 832            "except": "EXCLUDE",
 833            "replace": "RENAME",
 834        }
 835
 836        PROPERTIES_LOCATION = {
 837            **generator.Generator.PROPERTIES_LOCATION,
 838            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 839            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 840        }
 841
 842        def datatype_sql(self, expression: exp.DataType) -> str:
 843            expressions = expression.expressions
 844            if (
 845                expressions
 846                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 847                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 848            ):
 849                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 850                return "OBJECT"
 851
 852            return super().datatype_sql(expression)
 853
 854        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 855            return self.func(
 856                "TO_NUMBER",
 857                expression.this,
 858                expression.args.get("format"),
 859                expression.args.get("precision"),
 860                expression.args.get("scale"),
 861            )
 862
 863        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 864            milli = expression.args.get("milli")
 865            if milli is not None:
 866                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 867                expression.set("nano", milli_to_nano)
 868
 869            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 870
 871        def trycast_sql(self, expression: exp.TryCast) -> str:
 872            value = expression.this
 873
 874            if value.type is None:
 875                from sqlglot.optimizer.annotate_types import annotate_types
 876
 877                value = annotate_types(value)
 878
 879            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 880                return super().trycast_sql(expression)
 881
 882            # TRY_CAST only works for string values in Snowflake
 883            return self.cast_sql(expression)
 884
 885        def log_sql(self, expression: exp.Log) -> str:
 886            if not expression.expression:
 887                return self.func("LN", expression.this)
 888
 889            return super().log_sql(expression)
 890
 891        def unnest_sql(self, expression: exp.Unnest) -> str:
 892            unnest_alias = expression.args.get("alias")
 893            offset = expression.args.get("offset")
 894
 895            columns = [
 896                exp.to_identifier("seq"),
 897                exp.to_identifier("key"),
 898                exp.to_identifier("path"),
 899                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 900                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 901                or exp.to_identifier("value"),
 902                exp.to_identifier("this"),
 903            ]
 904
 905            if unnest_alias:
 906                unnest_alias.set("columns", columns)
 907            else:
 908                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 909
 910            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 911            alias = self.sql(unnest_alias)
 912            alias = f" AS {alias}" if alias else ""
 913            return f"{explode}{alias}"
 914
 915        def show_sql(self, expression: exp.Show) -> str:
 916            terse = "TERSE " if expression.args.get("terse") else ""
 917            history = " HISTORY" if expression.args.get("history") else ""
 918            like = self.sql(expression, "like")
 919            like = f" LIKE {like}" if like else ""
 920
 921            scope = self.sql(expression, "scope")
 922            scope = f" {scope}" if scope else ""
 923
 924            scope_kind = self.sql(expression, "scope_kind")
 925            if scope_kind:
 926                scope_kind = f" IN {scope_kind}"
 927
 928            starts_with = self.sql(expression, "starts_with")
 929            if starts_with:
 930                starts_with = f" STARTS WITH {starts_with}"
 931
 932            limit = self.sql(expression, "limit")
 933
 934            from_ = self.sql(expression, "from")
 935            if from_:
 936                from_ = f" FROM {from_}"
 937
 938            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 939
 940        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 941            # Other dialects don't support all of the following parameters, so we need to
 942            # generate default values as necessary to ensure the transpilation is correct
 943            group = expression.args.get("group")
 944            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 945            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 946            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 947
 948            return self.func(
 949                "REGEXP_SUBSTR",
 950                expression.this,
 951                expression.expression,
 952                position,
 953                occurrence,
 954                parameters,
 955                group,
 956            )
 957
 958        def except_op(self, expression: exp.Except) -> str:
 959            if not expression.args.get("distinct"):
 960                self.unsupported("EXCEPT with All is not supported in Snowflake")
 961            return super().except_op(expression)
 962
 963        def intersect_op(self, expression: exp.Intersect) -> str:
 964            if not expression.args.get("distinct"):
 965                self.unsupported("INTERSECT with All is not supported in Snowflake")
 966            return super().intersect_op(expression)
 967
 968        def describe_sql(self, expression: exp.Describe) -> str:
 969            # Default to table if kind is unknown
 970            kind_value = expression.args.get("kind") or "TABLE"
 971            kind = f" {kind_value}" if kind_value else ""
 972            this = f" {self.sql(expression, 'this')}"
 973            expressions = self.expressions(expression, flat=True)
 974            expressions = f" {expressions}" if expressions else ""
 975            return f"DESCRIBE{kind}{this}{expressions}"
 976
 977        def generatedasidentitycolumnconstraint_sql(
 978            self, expression: exp.GeneratedAsIdentityColumnConstraint
 979        ) -> str:
 980            start = expression.args.get("start")
 981            start = f" START {start}" if start else ""
 982            increment = expression.args.get("increment")
 983            increment = f" INCREMENT {increment}" if increment else ""
 984            return f"AUTOINCREMENT{start}{increment}"
 985
 986        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 987            this = self.sql(expression, "this")
 988            return f"SWAP WITH {this}"
 989
 990        def with_properties(self, properties: exp.Properties) -> str:
 991            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
 992
 993        def cluster_sql(self, expression: exp.Cluster) -> str:
 994            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 995
 996        def struct_sql(self, expression: exp.Struct) -> str:
 997            keys = []
 998            values = []
 999
1000            for i, e in enumerate(expression.expressions):
1001                if isinstance(e, exp.PropertyEQ):
1002                    keys.append(
1003                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1004                    )
1005                    values.append(e.expression)
1006                else:
1007                    keys.append(exp.Literal.string(f"_{i}"))
1008                    values.append(e)
1009
1010            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
 274class Snowflake(Dialect):
 275    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 276    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 277    NULL_ORDERING = "nulls_are_large"
 278    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 279    SUPPORTS_USER_DEFINED_TYPES = False
 280    SUPPORTS_SEMI_ANTI_JOIN = False
 281    PREFER_CTE_ALIAS_COLUMN = True
 282    TABLESAMPLE_SIZE_IS_PERCENT = True
 283
 284    TIME_MAPPING = {
 285        "YYYY": "%Y",
 286        "yyyy": "%Y",
 287        "YY": "%y",
 288        "yy": "%y",
 289        "MMMM": "%B",
 290        "mmmm": "%B",
 291        "MON": "%b",
 292        "mon": "%b",
 293        "MM": "%m",
 294        "mm": "%m",
 295        "DD": "%d",
 296        "dd": "%-d",
 297        "DY": "%a",
 298        "dy": "%w",
 299        "HH24": "%H",
 300        "hh24": "%H",
 301        "HH12": "%I",
 302        "hh12": "%I",
 303        "MI": "%M",
 304        "mi": "%M",
 305        "SS": "%S",
 306        "ss": "%S",
 307        "FF": "%f",
 308        "ff": "%f",
 309        "FF6": "%f",
 310        "ff6": "%f",
 311    }
 312
 313    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 314        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 315        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 316        if (
 317            isinstance(expression, exp.Identifier)
 318            and isinstance(expression.parent, exp.Table)
 319            and expression.name.lower() == "dual"
 320        ):
 321            return expression  # type: ignore
 322
 323        return super().quote_identifier(expression, identify=identify)
 324
 325    class Parser(parser.Parser):
 326        IDENTIFY_PIVOT_STRINGS = True
 327
 328        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 329
 330        FUNCTIONS = {
 331            **parser.Parser.FUNCTIONS,
 332            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 333            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 334            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 335                this=seq_get(args, 1), expression=seq_get(args, 0)
 336            ),
 337            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 338                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 339                start=seq_get(args, 0),
 340                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 341                step=seq_get(args, 2),
 342            ),
 343            "BITXOR": binary_from_function(exp.BitwiseXor),
 344            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 345            "BOOLXOR": binary_from_function(exp.Xor),
 346            "CONVERT_TIMEZONE": _build_convert_timezone,
 347            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 348            "DATE_TRUNC": _date_trunc_to_time,
 349            "DATEADD": lambda args: exp.DateAdd(
 350                this=seq_get(args, 2),
 351                expression=seq_get(args, 1),
 352                unit=_map_date_part(seq_get(args, 0)),
 353            ),
 354            "DATEDIFF": _build_datediff,
 355            "DIV0": _build_if_from_div0,
 356            "FLATTEN": exp.Explode.from_arg_list,
 357            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 358                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 359            ),
 360            "IFF": exp.If.from_arg_list,
 361            "LAST_DAY": lambda args: exp.LastDay(
 362                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 363            ),
 364            "LISTAGG": exp.GroupConcat.from_arg_list,
 365            "NULLIFZERO": _build_if_from_nullifzero,
 366            "OBJECT_CONSTRUCT": _build_object_construct,
 367            "REGEXP_REPLACE": _build_regexp_replace,
 368            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 369            "RLIKE": exp.RegexpLike.from_arg_list,
 370            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 371            "TIMEDIFF": _build_datediff,
 372            "TIMESTAMPDIFF": _build_datediff,
 373            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 374            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 375            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 376            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 377            "TO_NUMBER": lambda args: exp.ToNumber(
 378                this=seq_get(args, 0),
 379                format=seq_get(args, 1),
 380                precision=seq_get(args, 2),
 381                scale=seq_get(args, 3),
 382            ),
 383            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 384            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 385            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 386            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 387            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 388            "TO_VARCHAR": exp.ToChar.from_arg_list,
 389            "ZEROIFNULL": _build_if_from_zeroifnull,
 390        }
 391
 392        FUNCTION_PARSERS = {
 393            **parser.Parser.FUNCTION_PARSERS,
 394            "DATE_PART": lambda self: self._parse_date_part(),
 395            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 396        }
 397        FUNCTION_PARSERS.pop("TRIM")
 398
 399        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 400
 401        RANGE_PARSERS = {
 402            **parser.Parser.RANGE_PARSERS,
 403            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 404            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 405        }
 406
 407        ALTER_PARSERS = {
 408            **parser.Parser.ALTER_PARSERS,
 409            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 410            "UNSET": lambda self: self.expression(
 411                exp.Set,
 412                tag=self._match_text_seq("TAG"),
 413                expressions=self._parse_csv(self._parse_id_var),
 414                unset=True,
 415            ),
 416            "SWAP": lambda self: self._parse_alter_table_swap(),
 417        }
 418
 419        STATEMENT_PARSERS = {
 420            **parser.Parser.STATEMENT_PARSERS,
 421            TokenType.SHOW: lambda self: self._parse_show(),
 422        }
 423
 424        PROPERTY_PARSERS = {
 425            **parser.Parser.PROPERTY_PARSERS,
 426            "LOCATION": lambda self: self._parse_location(),
 427        }
 428
 429        SHOW_PARSERS = {
 430            "SCHEMAS": _show_parser("SCHEMAS"),
 431            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 432            "OBJECTS": _show_parser("OBJECTS"),
 433            "TERSE OBJECTS": _show_parser("OBJECTS"),
 434            "TABLES": _show_parser("TABLES"),
 435            "TERSE TABLES": _show_parser("TABLES"),
 436            "VIEWS": _show_parser("VIEWS"),
 437            "TERSE VIEWS": _show_parser("VIEWS"),
 438            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 439            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 440            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 441            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 442            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 443            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 444            "SEQUENCES": _show_parser("SEQUENCES"),
 445            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 446            "COLUMNS": _show_parser("COLUMNS"),
 447            "USERS": _show_parser("USERS"),
 448            "TERSE USERS": _show_parser("USERS"),
 449        }
 450
 451        STAGED_FILE_SINGLE_TOKENS = {
 452            TokenType.DOT,
 453            TokenType.MOD,
 454            TokenType.SLASH,
 455        }
 456
 457        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 458
 459        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 460
 461        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 462            this = super()._parse_column_ops(this)
 463
 464            casts = []
 465            json_path = []
 466
 467            while self._match(TokenType.COLON):
 468                path = super()._parse_column_ops(self._parse_field(any_token=True))
 469
 470                # The cast :: operator has a lower precedence than the extraction operator :, so
 471                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 472                while isinstance(path, exp.Cast):
 473                    casts.append(path.to)
 474                    path = path.this
 475
 476                if path:
 477                    json_path.append(path.sql(dialect="snowflake", copy=False))
 478
 479            if json_path:
 480                this = self.expression(
 481                    exp.JSONExtract,
 482                    this=this,
 483                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 484                )
 485
 486                while casts:
 487                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 488
 489            return this
 490
 491        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 492        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 493        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 494            this = self._parse_var() or self._parse_type()
 495
 496            if not this:
 497                return None
 498
 499            self._match(TokenType.COMMA)
 500            expression = self._parse_bitwise()
 501            this = _map_date_part(this)
 502            name = this.name.upper()
 503
 504            if name.startswith("EPOCH"):
 505                if name == "EPOCH_MILLISECOND":
 506                    scale = 10**3
 507                elif name == "EPOCH_MICROSECOND":
 508                    scale = 10**6
 509                elif name == "EPOCH_NANOSECOND":
 510                    scale = 10**9
 511                else:
 512                    scale = None
 513
 514                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 515                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 516
 517                if scale:
 518                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 519
 520                return to_unix
 521
 522            return self.expression(exp.Extract, this=this, expression=expression)
 523
 524        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 525            if is_map:
 526                # Keys are strings in Snowflake's objects, see also:
 527                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 528                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 529                return self._parse_slice(self._parse_string())
 530
 531            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 532
 533        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 534            lateral = super()._parse_lateral()
 535            if not lateral:
 536                return lateral
 537
 538            if isinstance(lateral.this, exp.Explode):
 539                table_alias = lateral.args.get("alias")
 540                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 541                if table_alias and not table_alias.args.get("columns"):
 542                    table_alias.set("columns", columns)
 543                elif not table_alias:
 544                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 545
 546            return lateral
 547
 548        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 549            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 550            index = self._index
 551            if self._match_texts(("AT", "BEFORE")):
 552                this = self._prev.text.upper()
 553                kind = (
 554                    self._match(TokenType.L_PAREN)
 555                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 556                    and self._prev.text.upper()
 557                )
 558                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 559
 560                if expression:
 561                    self._match_r_paren()
 562                    when = self.expression(
 563                        exp.HistoricalData, this=this, kind=kind, expression=expression
 564                    )
 565                    table.set("when", when)
 566                else:
 567                    self._retreat(index)
 568
 569            return table
 570
 571        def _parse_table_parts(
 572            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 573        ) -> exp.Table:
 574            # https://docs.snowflake.com/en/user-guide/querying-stage
 575            if self._match(TokenType.STRING, advance=False):
 576                table = self._parse_string()
 577            elif self._match_text_seq("@", advance=False):
 578                table = self._parse_location_path()
 579            else:
 580                table = None
 581
 582            if table:
 583                file_format = None
 584                pattern = None
 585
 586                self._match(TokenType.L_PAREN)
 587                while self._curr and not self._match(TokenType.R_PAREN):
 588                    if self._match_text_seq("FILE_FORMAT", "=>"):
 589                        file_format = self._parse_string() or super()._parse_table_parts(
 590                            is_db_reference=is_db_reference
 591                        )
 592                    elif self._match_text_seq("PATTERN", "=>"):
 593                        pattern = self._parse_string()
 594                    else:
 595                        break
 596
 597                    self._match(TokenType.COMMA)
 598
 599                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 600            else:
 601                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 602
 603            return self._parse_at_before(table)
 604
 605        def _parse_id_var(
 606            self,
 607            any_token: bool = True,
 608            tokens: t.Optional[t.Collection[TokenType]] = None,
 609        ) -> t.Optional[exp.Expression]:
 610            if self._match_text_seq("IDENTIFIER", "("):
 611                identifier = (
 612                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 613                    or self._parse_string()
 614                )
 615                self._match_r_paren()
 616                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 617
 618            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 619
 620        def _parse_show_snowflake(self, this: str) -> exp.Show:
 621            scope = None
 622            scope_kind = None
 623
 624            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 625            # which is syntactically valid but has no effect on the output
 626            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 627
 628            history = self._match_text_seq("HISTORY")
 629
 630            like = self._parse_string() if self._match(TokenType.LIKE) else None
 631
 632            if self._match(TokenType.IN):
 633                if self._match_text_seq("ACCOUNT"):
 634                    scope_kind = "ACCOUNT"
 635                elif self._match_set(self.DB_CREATABLES):
 636                    scope_kind = self._prev.text.upper()
 637                    if self._curr:
 638                        scope = self._parse_table_parts()
 639                elif self._curr:
 640                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 641                    scope = self._parse_table_parts()
 642
 643            return self.expression(
 644                exp.Show,
 645                **{
 646                    "terse": terse,
 647                    "this": this,
 648                    "history": history,
 649                    "like": like,
 650                    "scope": scope,
 651                    "scope_kind": scope_kind,
 652                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 653                    "limit": self._parse_limit(),
 654                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 655                },
 656            )
 657
 658        def _parse_alter_table_swap(self) -> exp.SwapTable:
 659            self._match_text_seq("WITH")
 660            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 661
 662        def _parse_location(self) -> exp.LocationProperty:
 663            self._match(TokenType.EQ)
 664            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 665
 666        def _parse_location_path(self) -> exp.Var:
 667            parts = [self._advance_any(ignore_reserved=True)]
 668
 669            # We avoid consuming a comma token because external tables like @foo and @bar
 670            # can be joined in a query with a comma separator.
 671            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 672                parts.append(self._advance_any(ignore_reserved=True))
 673
 674            return exp.var("".join(part.text for part in parts if part))
 675
 676    class Tokenizer(tokens.Tokenizer):
 677        STRING_ESCAPES = ["\\", "'"]
 678        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 679        RAW_STRINGS = ["$$"]
 680        COMMENTS = ["--", "//", ("/*", "*/")]
 681
 682        KEYWORDS = {
 683            **tokens.Tokenizer.KEYWORDS,
 684            "BYTEINT": TokenType.INT,
 685            "CHAR VARYING": TokenType.VARCHAR,
 686            "CHARACTER VARYING": TokenType.VARCHAR,
 687            "EXCLUDE": TokenType.EXCEPT,
 688            "ILIKE ANY": TokenType.ILIKE_ANY,
 689            "LIKE ANY": TokenType.LIKE_ANY,
 690            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 691            "MINUS": TokenType.EXCEPT,
 692            "NCHAR VARYING": TokenType.VARCHAR,
 693            "PUT": TokenType.COMMAND,
 694            "REMOVE": TokenType.COMMAND,
 695            "RENAME": TokenType.REPLACE,
 696            "RM": TokenType.COMMAND,
 697            "SAMPLE": TokenType.TABLE_SAMPLE,
 698            "SQL_DOUBLE": TokenType.DOUBLE,
 699            "SQL_VARCHAR": TokenType.VARCHAR,
 700            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 701            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 702            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 703            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 704            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 705            "TOP": TokenType.TOP,
 706        }
 707
 708        SINGLE_TOKENS = {
 709            **tokens.Tokenizer.SINGLE_TOKENS,
 710            "$": TokenType.PARAMETER,
 711        }
 712
 713        VAR_SINGLE_TOKENS = {"$"}
 714
 715        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 716
 717    class Generator(generator.Generator):
 718        PARAMETER_TOKEN = "$"
 719        MATCHED_BY_SOURCE = False
 720        SINGLE_STRING_INTERVAL = True
 721        JOIN_HINTS = False
 722        TABLE_HINTS = False
 723        QUERY_HINTS = False
 724        AGGREGATE_FILTER_SUPPORTED = False
 725        SUPPORTS_TABLE_COPY = False
 726        COLLATE_IS_FUNC = True
 727        LIMIT_ONLY_LITERALS = True
 728        JSON_KEY_VALUE_PAIR_SEP = ","
 729        INSERT_OVERWRITE = " OVERWRITE INTO"
 730        STRUCT_DELIMITER = ("(", ")")
 731
 732        TRANSFORMS = {
 733            **generator.Generator.TRANSFORMS,
 734            exp.ArgMax: rename_func("MAX_BY"),
 735            exp.ArgMin: rename_func("MIN_BY"),
 736            exp.Array: inline_array_sql,
 737            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 738            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 739            exp.AtTimeZone: lambda self, e: self.func(
 740                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 741            ),
 742            exp.BitwiseXor: rename_func("BITXOR"),
 743            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 744            exp.DateAdd: date_delta_sql("DATEADD"),
 745            exp.DateDiff: date_delta_sql("DATEDIFF"),
 746            exp.DateStrToDate: datestrtodate_sql,
 747            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 748            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 749            exp.DayOfYear: rename_func("DAYOFYEAR"),
 750            exp.Explode: rename_func("FLATTEN"),
 751            exp.Extract: rename_func("DATE_PART"),
 752            exp.FromTimeZone: lambda self, e: self.func(
 753                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 754            ),
 755            exp.GenerateSeries: lambda self, e: self.func(
 756                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 757            ),
 758            exp.GroupConcat: rename_func("LISTAGG"),
 759            exp.If: if_sql(name="IFF", false_value="NULL"),
 760            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 761            exp.JSONExtractScalar: lambda self, e: self.func(
 762                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 763            ),
 764            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 765            exp.JSONPathRoot: lambda *_: "",
 766            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 767            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 768            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 769            exp.Max: max_or_greatest,
 770            exp.Min: min_or_least,
 771            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 772            exp.PercentileCont: transforms.preprocess(
 773                [transforms.add_within_group_for_percentiles]
 774            ),
 775            exp.PercentileDisc: transforms.preprocess(
 776                [transforms.add_within_group_for_percentiles]
 777            ),
 778            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 779            exp.RegexpILike: _regexpilike_sql,
 780            exp.Rand: rename_func("RANDOM"),
 781            exp.Select: transforms.preprocess(
 782                [
 783                    transforms.eliminate_distinct_on,
 784                    transforms.explode_to_unnest(),
 785                    transforms.eliminate_semi_and_anti_joins,
 786                ]
 787            ),
 788            exp.SHA: rename_func("SHA1"),
 789            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 790            exp.StartsWith: rename_func("STARTSWITH"),
 791            exp.StrPosition: lambda self, e: self.func(
 792                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 793            ),
 794            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 795            exp.Stuff: rename_func("INSERT"),
 796            exp.TimestampDiff: lambda self, e: self.func(
 797                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 798            ),
 799            exp.TimestampTrunc: timestamptrunc_sql,
 800            exp.TimeStrToTime: timestrtotime_sql,
 801            exp.TimeToStr: lambda self, e: self.func(
 802                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 803            ),
 804            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 805            exp.ToArray: rename_func("TO_ARRAY"),
 806            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 807            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 808            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 809            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 810            exp.TsOrDsToDate: lambda self, e: self.func(
 811                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 812            ),
 813            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 814            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 815            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 816            exp.Xor: rename_func("BOOLXOR"),
 817        }
 818
 819        SUPPORTED_JSON_PATH_PARTS = {
 820            exp.JSONPathKey,
 821            exp.JSONPathRoot,
 822            exp.JSONPathSubscript,
 823        }
 824
 825        TYPE_MAPPING = {
 826            **generator.Generator.TYPE_MAPPING,
 827            exp.DataType.Type.NESTED: "OBJECT",
 828            exp.DataType.Type.STRUCT: "OBJECT",
 829            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 830        }
 831
 832        STAR_MAPPING = {
 833            "except": "EXCLUDE",
 834            "replace": "RENAME",
 835        }
 836
 837        PROPERTIES_LOCATION = {
 838            **generator.Generator.PROPERTIES_LOCATION,
 839            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 840            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 841        }
 842
 843        def datatype_sql(self, expression: exp.DataType) -> str:
 844            expressions = expression.expressions
 845            if (
 846                expressions
 847                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 848                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 849            ):
 850                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 851                return "OBJECT"
 852
 853            return super().datatype_sql(expression)
 854
 855        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 856            return self.func(
 857                "TO_NUMBER",
 858                expression.this,
 859                expression.args.get("format"),
 860                expression.args.get("precision"),
 861                expression.args.get("scale"),
 862            )
 863
 864        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 865            milli = expression.args.get("milli")
 866            if milli is not None:
 867                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 868                expression.set("nano", milli_to_nano)
 869
 870            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 871
 872        def trycast_sql(self, expression: exp.TryCast) -> str:
 873            value = expression.this
 874
 875            if value.type is None:
 876                from sqlglot.optimizer.annotate_types import annotate_types
 877
 878                value = annotate_types(value)
 879
 880            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 881                return super().trycast_sql(expression)
 882
 883            # TRY_CAST only works for string values in Snowflake
 884            return self.cast_sql(expression)
 885
 886        def log_sql(self, expression: exp.Log) -> str:
 887            if not expression.expression:
 888                return self.func("LN", expression.this)
 889
 890            return super().log_sql(expression)
 891
 892        def unnest_sql(self, expression: exp.Unnest) -> str:
 893            unnest_alias = expression.args.get("alias")
 894            offset = expression.args.get("offset")
 895
 896            columns = [
 897                exp.to_identifier("seq"),
 898                exp.to_identifier("key"),
 899                exp.to_identifier("path"),
 900                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 901                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 902                or exp.to_identifier("value"),
 903                exp.to_identifier("this"),
 904            ]
 905
 906            if unnest_alias:
 907                unnest_alias.set("columns", columns)
 908            else:
 909                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 910
 911            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 912            alias = self.sql(unnest_alias)
 913            alias = f" AS {alias}" if alias else ""
 914            return f"{explode}{alias}"
 915
 916        def show_sql(self, expression: exp.Show) -> str:
 917            terse = "TERSE " if expression.args.get("terse") else ""
 918            history = " HISTORY" if expression.args.get("history") else ""
 919            like = self.sql(expression, "like")
 920            like = f" LIKE {like}" if like else ""
 921
 922            scope = self.sql(expression, "scope")
 923            scope = f" {scope}" if scope else ""
 924
 925            scope_kind = self.sql(expression, "scope_kind")
 926            if scope_kind:
 927                scope_kind = f" IN {scope_kind}"
 928
 929            starts_with = self.sql(expression, "starts_with")
 930            if starts_with:
 931                starts_with = f" STARTS WITH {starts_with}"
 932
 933            limit = self.sql(expression, "limit")
 934
 935            from_ = self.sql(expression, "from")
 936            if from_:
 937                from_ = f" FROM {from_}"
 938
 939            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 940
 941        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 942            # Other dialects don't support all of the following parameters, so we need to
 943            # generate default values as necessary to ensure the transpilation is correct
 944            group = expression.args.get("group")
 945            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 946            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 947            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 948
 949            return self.func(
 950                "REGEXP_SUBSTR",
 951                expression.this,
 952                expression.expression,
 953                position,
 954                occurrence,
 955                parameters,
 956                group,
 957            )
 958
 959        def except_op(self, expression: exp.Except) -> str:
 960            if not expression.args.get("distinct"):
 961                self.unsupported("EXCEPT with All is not supported in Snowflake")
 962            return super().except_op(expression)
 963
 964        def intersect_op(self, expression: exp.Intersect) -> str:
 965            if not expression.args.get("distinct"):
 966                self.unsupported("INTERSECT with All is not supported in Snowflake")
 967            return super().intersect_op(expression)
 968
 969        def describe_sql(self, expression: exp.Describe) -> str:
 970            # Default to table if kind is unknown
 971            kind_value = expression.args.get("kind") or "TABLE"
 972            kind = f" {kind_value}" if kind_value else ""
 973            this = f" {self.sql(expression, 'this')}"
 974            expressions = self.expressions(expression, flat=True)
 975            expressions = f" {expressions}" if expressions else ""
 976            return f"DESCRIBE{kind}{this}{expressions}"
 977
 978        def generatedasidentitycolumnconstraint_sql(
 979            self, expression: exp.GeneratedAsIdentityColumnConstraint
 980        ) -> str:
 981            start = expression.args.get("start")
 982            start = f" START {start}" if start else ""
 983            increment = expression.args.get("increment")
 984            increment = f" INCREMENT {increment}" if increment else ""
 985            return f"AUTOINCREMENT{start}{increment}"
 986
 987        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 988            this = self.sql(expression, "this")
 989            return f"SWAP WITH {this}"
 990
 991        def with_properties(self, properties: exp.Properties) -> str:
 992            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
 993
 994        def cluster_sql(self, expression: exp.Cluster) -> str:
 995            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 996
 997        def struct_sql(self, expression: exp.Struct) -> str:
 998            keys = []
 999            values = []
1000
1001            for i, e in enumerate(expression.expressions):
1002                if isinstance(e, exp.PropertyEQ):
1003                    keys.append(
1004                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1005                    )
1006                    values.append(e.expression)
1007                else:
1008                    keys.append(exp.Literal.string(f"_{i}"))
1009                    values.append(e)
1010
1011            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
313    def quote_identifier(self, expression: E, identify: bool = True) -> E:
314        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
315        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
316        if (
317            isinstance(expression, exp.Identifier)
318            and isinstance(expression.parent, exp.Table)
319            and expression.name.lower() == "dual"
320        ):
321            return expression  # type: ignore
322
323        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
325    class Parser(parser.Parser):
326        IDENTIFY_PIVOT_STRINGS = True
327
328        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
329
330        FUNCTIONS = {
331            **parser.Parser.FUNCTIONS,
332            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
333            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
334            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
335                this=seq_get(args, 1), expression=seq_get(args, 0)
336            ),
337            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
338                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
339                start=seq_get(args, 0),
340                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
341                step=seq_get(args, 2),
342            ),
343            "BITXOR": binary_from_function(exp.BitwiseXor),
344            "BIT_XOR": binary_from_function(exp.BitwiseXor),
345            "BOOLXOR": binary_from_function(exp.Xor),
346            "CONVERT_TIMEZONE": _build_convert_timezone,
347            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
348            "DATE_TRUNC": _date_trunc_to_time,
349            "DATEADD": lambda args: exp.DateAdd(
350                this=seq_get(args, 2),
351                expression=seq_get(args, 1),
352                unit=_map_date_part(seq_get(args, 0)),
353            ),
354            "DATEDIFF": _build_datediff,
355            "DIV0": _build_if_from_div0,
356            "FLATTEN": exp.Explode.from_arg_list,
357            "GET_PATH": lambda args, dialect: exp.JSONExtract(
358                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
359            ),
360            "IFF": exp.If.from_arg_list,
361            "LAST_DAY": lambda args: exp.LastDay(
362                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
363            ),
364            "LISTAGG": exp.GroupConcat.from_arg_list,
365            "NULLIFZERO": _build_if_from_nullifzero,
366            "OBJECT_CONSTRUCT": _build_object_construct,
367            "REGEXP_REPLACE": _build_regexp_replace,
368            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
369            "RLIKE": exp.RegexpLike.from_arg_list,
370            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
371            "TIMEDIFF": _build_datediff,
372            "TIMESTAMPDIFF": _build_datediff,
373            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
374            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
375            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
376            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
377            "TO_NUMBER": lambda args: exp.ToNumber(
378                this=seq_get(args, 0),
379                format=seq_get(args, 1),
380                precision=seq_get(args, 2),
381                scale=seq_get(args, 3),
382            ),
383            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
384            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
385            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
386            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
387            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
388            "TO_VARCHAR": exp.ToChar.from_arg_list,
389            "ZEROIFNULL": _build_if_from_zeroifnull,
390        }
391
392        FUNCTION_PARSERS = {
393            **parser.Parser.FUNCTION_PARSERS,
394            "DATE_PART": lambda self: self._parse_date_part(),
395            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
396        }
397        FUNCTION_PARSERS.pop("TRIM")
398
399        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
400
401        RANGE_PARSERS = {
402            **parser.Parser.RANGE_PARSERS,
403            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
404            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
405        }
406
407        ALTER_PARSERS = {
408            **parser.Parser.ALTER_PARSERS,
409            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
410            "UNSET": lambda self: self.expression(
411                exp.Set,
412                tag=self._match_text_seq("TAG"),
413                expressions=self._parse_csv(self._parse_id_var),
414                unset=True,
415            ),
416            "SWAP": lambda self: self._parse_alter_table_swap(),
417        }
418
419        STATEMENT_PARSERS = {
420            **parser.Parser.STATEMENT_PARSERS,
421            TokenType.SHOW: lambda self: self._parse_show(),
422        }
423
424        PROPERTY_PARSERS = {
425            **parser.Parser.PROPERTY_PARSERS,
426            "LOCATION": lambda self: self._parse_location(),
427        }
428
429        SHOW_PARSERS = {
430            "SCHEMAS": _show_parser("SCHEMAS"),
431            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
432            "OBJECTS": _show_parser("OBJECTS"),
433            "TERSE OBJECTS": _show_parser("OBJECTS"),
434            "TABLES": _show_parser("TABLES"),
435            "TERSE TABLES": _show_parser("TABLES"),
436            "VIEWS": _show_parser("VIEWS"),
437            "TERSE VIEWS": _show_parser("VIEWS"),
438            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
439            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
440            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
441            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
442            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
443            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
444            "SEQUENCES": _show_parser("SEQUENCES"),
445            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
446            "COLUMNS": _show_parser("COLUMNS"),
447            "USERS": _show_parser("USERS"),
448            "TERSE USERS": _show_parser("USERS"),
449        }
450
451        STAGED_FILE_SINGLE_TOKENS = {
452            TokenType.DOT,
453            TokenType.MOD,
454            TokenType.SLASH,
455        }
456
457        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
458
459        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
460
461        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
462            this = super()._parse_column_ops(this)
463
464            casts = []
465            json_path = []
466
467            while self._match(TokenType.COLON):
468                path = super()._parse_column_ops(self._parse_field(any_token=True))
469
470                # The cast :: operator has a lower precedence than the extraction operator :, so
471                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
472                while isinstance(path, exp.Cast):
473                    casts.append(path.to)
474                    path = path.this
475
476                if path:
477                    json_path.append(path.sql(dialect="snowflake", copy=False))
478
479            if json_path:
480                this = self.expression(
481                    exp.JSONExtract,
482                    this=this,
483                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
484                )
485
486                while casts:
487                    this = self.expression(exp.Cast, this=this, to=casts.pop())
488
489            return this
490
491        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
492        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
493        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
494            this = self._parse_var() or self._parse_type()
495
496            if not this:
497                return None
498
499            self._match(TokenType.COMMA)
500            expression = self._parse_bitwise()
501            this = _map_date_part(this)
502            name = this.name.upper()
503
504            if name.startswith("EPOCH"):
505                if name == "EPOCH_MILLISECOND":
506                    scale = 10**3
507                elif name == "EPOCH_MICROSECOND":
508                    scale = 10**6
509                elif name == "EPOCH_NANOSECOND":
510                    scale = 10**9
511                else:
512                    scale = None
513
514                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
515                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
516
517                if scale:
518                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
519
520                return to_unix
521
522            return self.expression(exp.Extract, this=this, expression=expression)
523
524        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
525            if is_map:
526                # Keys are strings in Snowflake's objects, see also:
527                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
528                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
529                return self._parse_slice(self._parse_string())
530
531            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
532
533        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
534            lateral = super()._parse_lateral()
535            if not lateral:
536                return lateral
537
538            if isinstance(lateral.this, exp.Explode):
539                table_alias = lateral.args.get("alias")
540                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
541                if table_alias and not table_alias.args.get("columns"):
542                    table_alias.set("columns", columns)
543                elif not table_alias:
544                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
545
546            return lateral
547
548        def _parse_at_before(self, table: exp.Table) -> exp.Table:
549            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
550            index = self._index
551            if self._match_texts(("AT", "BEFORE")):
552                this = self._prev.text.upper()
553                kind = (
554                    self._match(TokenType.L_PAREN)
555                    and self._match_texts(self.HISTORICAL_DATA_KIND)
556                    and self._prev.text.upper()
557                )
558                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
559
560                if expression:
561                    self._match_r_paren()
562                    when = self.expression(
563                        exp.HistoricalData, this=this, kind=kind, expression=expression
564                    )
565                    table.set("when", when)
566                else:
567                    self._retreat(index)
568
569            return table
570
571        def _parse_table_parts(
572            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
573        ) -> exp.Table:
574            # https://docs.snowflake.com/en/user-guide/querying-stage
575            if self._match(TokenType.STRING, advance=False):
576                table = self._parse_string()
577            elif self._match_text_seq("@", advance=False):
578                table = self._parse_location_path()
579            else:
580                table = None
581
582            if table:
583                file_format = None
584                pattern = None
585
586                self._match(TokenType.L_PAREN)
587                while self._curr and not self._match(TokenType.R_PAREN):
588                    if self._match_text_seq("FILE_FORMAT", "=>"):
589                        file_format = self._parse_string() or super()._parse_table_parts(
590                            is_db_reference=is_db_reference
591                        )
592                    elif self._match_text_seq("PATTERN", "=>"):
593                        pattern = self._parse_string()
594                    else:
595                        break
596
597                    self._match(TokenType.COMMA)
598
599                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
600            else:
601                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
602
603            return self._parse_at_before(table)
604
605        def _parse_id_var(
606            self,
607            any_token: bool = True,
608            tokens: t.Optional[t.Collection[TokenType]] = None,
609        ) -> t.Optional[exp.Expression]:
610            if self._match_text_seq("IDENTIFIER", "("):
611                identifier = (
612                    super()._parse_id_var(any_token=any_token, tokens=tokens)
613                    or self._parse_string()
614                )
615                self._match_r_paren()
616                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
617
618            return super()._parse_id_var(any_token=any_token, tokens=tokens)
619
620        def _parse_show_snowflake(self, this: str) -> exp.Show:
621            scope = None
622            scope_kind = None
623
624            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
625            # which is syntactically valid but has no effect on the output
626            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
627
628            history = self._match_text_seq("HISTORY")
629
630            like = self._parse_string() if self._match(TokenType.LIKE) else None
631
632            if self._match(TokenType.IN):
633                if self._match_text_seq("ACCOUNT"):
634                    scope_kind = "ACCOUNT"
635                elif self._match_set(self.DB_CREATABLES):
636                    scope_kind = self._prev.text.upper()
637                    if self._curr:
638                        scope = self._parse_table_parts()
639                elif self._curr:
640                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
641                    scope = self._parse_table_parts()
642
643            return self.expression(
644                exp.Show,
645                **{
646                    "terse": terse,
647                    "this": this,
648                    "history": history,
649                    "like": like,
650                    "scope": scope,
651                    "scope_kind": scope_kind,
652                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
653                    "limit": self._parse_limit(),
654                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
655                },
656            )
657
658        def _parse_alter_table_swap(self) -> exp.SwapTable:
659            self._match_text_seq("WITH")
660            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
661
662        def _parse_location(self) -> exp.LocationProperty:
663            self._match(TokenType.EQ)
664            return self.expression(exp.LocationProperty, this=self._parse_location_path())
665
666        def _parse_location_path(self) -> exp.Var:
667            parts = [self._advance_any(ignore_reserved=True)]
668
669            # We avoid consuming a comma token because external tables like @foo and @bar
670            # can be joined in a query with a comma separator.
671            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
672                parts.append(self._advance_any(ignore_reserved=True))
673
674            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
TABLE_ALIAS_TOKENS = {<TokenType.INT128: 'INT128'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.KILL: 'KILL'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.TRUE: 'TRUE'>, <TokenType.END: 'END'>, <TokenType.NAME: 'NAME'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.JSON: 'JSON'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATE: 'DATE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.DESC: 'DESC'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.NESTED: 'NESTED'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.ALL: 'ALL'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.SHOW: 'SHOW'>, <TokenType.DATE32: 'DATE32'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.VAR: 'VAR'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.USE: 'USE'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.VIEW: 'VIEW'>, <TokenType.ENUM: 'ENUM'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TABLE: 'TABLE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.MERGE: 'MERGE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TIME: 'TIME'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DIV: 'DIV'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.ANY: 'ANY'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INT: 'INT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.UINT128: 'UINT128'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SUPER: 'SUPER'>, <TokenType.BIT: 'BIT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.DELETE: 'DELETE'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.ANTI: 'ANTI'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.UINT: 'UINT'>, <TokenType.IS: 'IS'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.MODEL: 'MODEL'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.IPV6: 'IPV6'>, <TokenType.INT256: 'INT256'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.FALSE: 'FALSE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SOME: 'SOME'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.INET: 'INET'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.SET: 'SET'>, <TokenType.ASC: 'ASC'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.ROWS: 'ROWS'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.CASE: 'CASE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.FIRST: 'FIRST'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.TEXT: 'TEXT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.NEXT: 'NEXT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.KEEP: 'KEEP'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.NULL: 'NULL'>, <TokenType.JSONB: 'JSONB'>, <TokenType.MAP: 'MAP'>, <TokenType.ROW: 'ROW'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.XML: 'XML'>, <TokenType.BINARY: 'BINARY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.TOP: 'TOP'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function Parser.<lambda>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _build_convert_timezone>, 'DATEADD': <function Snowflake.Parser.<lambda>>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEDIFF': <function _build_datediff>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'OBJECTS', 'UNIQUE KEYS', 'SEQUENCES', 'IMPORTED KEYS', 'VIEWS', 'TABLES'}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ID_VAR_TOKENS
INTERVAL_VARS
ALIAS_TOKENS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
CONSTRAINT_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
USABLES
CAST_ACTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_UNION
UNION_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
676    class Tokenizer(tokens.Tokenizer):
677        STRING_ESCAPES = ["\\", "'"]
678        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
679        RAW_STRINGS = ["$$"]
680        COMMENTS = ["--", "//", ("/*", "*/")]
681
682        KEYWORDS = {
683            **tokens.Tokenizer.KEYWORDS,
684            "BYTEINT": TokenType.INT,
685            "CHAR VARYING": TokenType.VARCHAR,
686            "CHARACTER VARYING": TokenType.VARCHAR,
687            "EXCLUDE": TokenType.EXCEPT,
688            "ILIKE ANY": TokenType.ILIKE_ANY,
689            "LIKE ANY": TokenType.LIKE_ANY,
690            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
691            "MINUS": TokenType.EXCEPT,
692            "NCHAR VARYING": TokenType.VARCHAR,
693            "PUT": TokenType.COMMAND,
694            "REMOVE": TokenType.COMMAND,
695            "RENAME": TokenType.REPLACE,
696            "RM": TokenType.COMMAND,
697            "SAMPLE": TokenType.TABLE_SAMPLE,
698            "SQL_DOUBLE": TokenType.DOUBLE,
699            "SQL_VARCHAR": TokenType.VARCHAR,
700            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
701            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
702            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
703            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
704            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
705            "TOP": TokenType.TOP,
706        }
707
708        SINGLE_TOKENS = {
709            **tokens.Tokenizer.SINGLE_TOKENS,
710            "$": TokenType.PARAMETER,
711        }
712
713        VAR_SINGLE_TOKENS = {"$"}
714
715        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 717    class Generator(generator.Generator):
 718        PARAMETER_TOKEN = "$"
 719        MATCHED_BY_SOURCE = False
 720        SINGLE_STRING_INTERVAL = True
 721        JOIN_HINTS = False
 722        TABLE_HINTS = False
 723        QUERY_HINTS = False
 724        AGGREGATE_FILTER_SUPPORTED = False
 725        SUPPORTS_TABLE_COPY = False
 726        COLLATE_IS_FUNC = True
 727        LIMIT_ONLY_LITERALS = True
 728        JSON_KEY_VALUE_PAIR_SEP = ","
 729        INSERT_OVERWRITE = " OVERWRITE INTO"
 730        STRUCT_DELIMITER = ("(", ")")
 731
 732        TRANSFORMS = {
 733            **generator.Generator.TRANSFORMS,
 734            exp.ArgMax: rename_func("MAX_BY"),
 735            exp.ArgMin: rename_func("MIN_BY"),
 736            exp.Array: inline_array_sql,
 737            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 738            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 739            exp.AtTimeZone: lambda self, e: self.func(
 740                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 741            ),
 742            exp.BitwiseXor: rename_func("BITXOR"),
 743            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 744            exp.DateAdd: date_delta_sql("DATEADD"),
 745            exp.DateDiff: date_delta_sql("DATEDIFF"),
 746            exp.DateStrToDate: datestrtodate_sql,
 747            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 748            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 749            exp.DayOfYear: rename_func("DAYOFYEAR"),
 750            exp.Explode: rename_func("FLATTEN"),
 751            exp.Extract: rename_func("DATE_PART"),
 752            exp.FromTimeZone: lambda self, e: self.func(
 753                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 754            ),
 755            exp.GenerateSeries: lambda self, e: self.func(
 756                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 757            ),
 758            exp.GroupConcat: rename_func("LISTAGG"),
 759            exp.If: if_sql(name="IFF", false_value="NULL"),
 760            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 761            exp.JSONExtractScalar: lambda self, e: self.func(
 762                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 763            ),
 764            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 765            exp.JSONPathRoot: lambda *_: "",
 766            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 767            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 768            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 769            exp.Max: max_or_greatest,
 770            exp.Min: min_or_least,
 771            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 772            exp.PercentileCont: transforms.preprocess(
 773                [transforms.add_within_group_for_percentiles]
 774            ),
 775            exp.PercentileDisc: transforms.preprocess(
 776                [transforms.add_within_group_for_percentiles]
 777            ),
 778            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 779            exp.RegexpILike: _regexpilike_sql,
 780            exp.Rand: rename_func("RANDOM"),
 781            exp.Select: transforms.preprocess(
 782                [
 783                    transforms.eliminate_distinct_on,
 784                    transforms.explode_to_unnest(),
 785                    transforms.eliminate_semi_and_anti_joins,
 786                ]
 787            ),
 788            exp.SHA: rename_func("SHA1"),
 789            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 790            exp.StartsWith: rename_func("STARTSWITH"),
 791            exp.StrPosition: lambda self, e: self.func(
 792                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 793            ),
 794            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 795            exp.Stuff: rename_func("INSERT"),
 796            exp.TimestampDiff: lambda self, e: self.func(
 797                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 798            ),
 799            exp.TimestampTrunc: timestamptrunc_sql,
 800            exp.TimeStrToTime: timestrtotime_sql,
 801            exp.TimeToStr: lambda self, e: self.func(
 802                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 803            ),
 804            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 805            exp.ToArray: rename_func("TO_ARRAY"),
 806            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 807            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 808            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 809            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 810            exp.TsOrDsToDate: lambda self, e: self.func(
 811                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 812            ),
 813            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 814            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 815            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 816            exp.Xor: rename_func("BOOLXOR"),
 817        }
 818
 819        SUPPORTED_JSON_PATH_PARTS = {
 820            exp.JSONPathKey,
 821            exp.JSONPathRoot,
 822            exp.JSONPathSubscript,
 823        }
 824
 825        TYPE_MAPPING = {
 826            **generator.Generator.TYPE_MAPPING,
 827            exp.DataType.Type.NESTED: "OBJECT",
 828            exp.DataType.Type.STRUCT: "OBJECT",
 829            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 830        }
 831
 832        STAR_MAPPING = {
 833            "except": "EXCLUDE",
 834            "replace": "RENAME",
 835        }
 836
 837        PROPERTIES_LOCATION = {
 838            **generator.Generator.PROPERTIES_LOCATION,
 839            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 840            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 841        }
 842
 843        def datatype_sql(self, expression: exp.DataType) -> str:
 844            expressions = expression.expressions
 845            if (
 846                expressions
 847                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 848                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 849            ):
 850                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 851                return "OBJECT"
 852
 853            return super().datatype_sql(expression)
 854
 855        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 856            return self.func(
 857                "TO_NUMBER",
 858                expression.this,
 859                expression.args.get("format"),
 860                expression.args.get("precision"),
 861                expression.args.get("scale"),
 862            )
 863
 864        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 865            milli = expression.args.get("milli")
 866            if milli is not None:
 867                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 868                expression.set("nano", milli_to_nano)
 869
 870            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 871
 872        def trycast_sql(self, expression: exp.TryCast) -> str:
 873            value = expression.this
 874
 875            if value.type is None:
 876                from sqlglot.optimizer.annotate_types import annotate_types
 877
 878                value = annotate_types(value)
 879
 880            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 881                return super().trycast_sql(expression)
 882
 883            # TRY_CAST only works for string values in Snowflake
 884            return self.cast_sql(expression)
 885
 886        def log_sql(self, expression: exp.Log) -> str:
 887            if not expression.expression:
 888                return self.func("LN", expression.this)
 889
 890            return super().log_sql(expression)
 891
 892        def unnest_sql(self, expression: exp.Unnest) -> str:
 893            unnest_alias = expression.args.get("alias")
 894            offset = expression.args.get("offset")
 895
 896            columns = [
 897                exp.to_identifier("seq"),
 898                exp.to_identifier("key"),
 899                exp.to_identifier("path"),
 900                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 901                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 902                or exp.to_identifier("value"),
 903                exp.to_identifier("this"),
 904            ]
 905
 906            if unnest_alias:
 907                unnest_alias.set("columns", columns)
 908            else:
 909                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 910
 911            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 912            alias = self.sql(unnest_alias)
 913            alias = f" AS {alias}" if alias else ""
 914            return f"{explode}{alias}"
 915
 916        def show_sql(self, expression: exp.Show) -> str:
 917            terse = "TERSE " if expression.args.get("terse") else ""
 918            history = " HISTORY" if expression.args.get("history") else ""
 919            like = self.sql(expression, "like")
 920            like = f" LIKE {like}" if like else ""
 921
 922            scope = self.sql(expression, "scope")
 923            scope = f" {scope}" if scope else ""
 924
 925            scope_kind = self.sql(expression, "scope_kind")
 926            if scope_kind:
 927                scope_kind = f" IN {scope_kind}"
 928
 929            starts_with = self.sql(expression, "starts_with")
 930            if starts_with:
 931                starts_with = f" STARTS WITH {starts_with}"
 932
 933            limit = self.sql(expression, "limit")
 934
 935            from_ = self.sql(expression, "from")
 936            if from_:
 937                from_ = f" FROM {from_}"
 938
 939            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 940
 941        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 942            # Other dialects don't support all of the following parameters, so we need to
 943            # generate default values as necessary to ensure the transpilation is correct
 944            group = expression.args.get("group")
 945            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 946            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 947            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 948
 949            return self.func(
 950                "REGEXP_SUBSTR",
 951                expression.this,
 952                expression.expression,
 953                position,
 954                occurrence,
 955                parameters,
 956                group,
 957            )
 958
 959        def except_op(self, expression: exp.Except) -> str:
 960            if not expression.args.get("distinct"):
 961                self.unsupported("EXCEPT with All is not supported in Snowflake")
 962            return super().except_op(expression)
 963
 964        def intersect_op(self, expression: exp.Intersect) -> str:
 965            if not expression.args.get("distinct"):
 966                self.unsupported("INTERSECT with All is not supported in Snowflake")
 967            return super().intersect_op(expression)
 968
 969        def describe_sql(self, expression: exp.Describe) -> str:
 970            # Default to table if kind is unknown
 971            kind_value = expression.args.get("kind") or "TABLE"
 972            kind = f" {kind_value}" if kind_value else ""
 973            this = f" {self.sql(expression, 'this')}"
 974            expressions = self.expressions(expression, flat=True)
 975            expressions = f" {expressions}" if expressions else ""
 976            return f"DESCRIBE{kind}{this}{expressions}"
 977
 978        def generatedasidentitycolumnconstraint_sql(
 979            self, expression: exp.GeneratedAsIdentityColumnConstraint
 980        ) -> str:
 981            start = expression.args.get("start")
 982            start = f" START {start}" if start else ""
 983            increment = expression.args.get("increment")
 984            increment = f" INCREMENT {increment}" if increment else ""
 985            return f"AUTOINCREMENT{start}{increment}"
 986
 987        def swaptable_sql(self, expression: exp.SwapTable) -> str:
 988            this = self.sql(expression, "this")
 989            return f"SWAP WITH {this}"
 990
 991        def with_properties(self, properties: exp.Properties) -> str:
 992            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
 993
 994        def cluster_sql(self, expression: exp.Cluster) -> str:
 995            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
 996
 997        def struct_sql(self, expression: exp.Struct) -> str:
 998            keys = []
 999            values = []
1000
1001            for i, e in enumerate(expression.expressions):
1002                if isinstance(e, exp.PropertyEQ):
1003                    keys.append(
1004                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1005                    )
1006                    values.append(e.expression)
1007                else:
1008                    keys.append(exp.Literal.string(f"_{i}"))
1009                    values.append(e)
1010
1011            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. Default: 2.
  • indent: The indentation size in a formatted string. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
843        def datatype_sql(self, expression: exp.DataType) -> str:
844            expressions = expression.expressions
845            if (
846                expressions
847                and expression.is_type(*exp.DataType.STRUCT_TYPES)
848                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
849            ):
850                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
851                return "OBJECT"
852
853            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
855        def tonumber_sql(self, expression: exp.ToNumber) -> str:
856            return self.func(
857                "TO_NUMBER",
858                expression.this,
859                expression.args.get("format"),
860                expression.args.get("precision"),
861                expression.args.get("scale"),
862            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
864        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
865            milli = expression.args.get("milli")
866            if milli is not None:
867                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
868                expression.set("nano", milli_to_nano)
869
870            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
872        def trycast_sql(self, expression: exp.TryCast) -> str:
873            value = expression.this
874
875            if value.type is None:
876                from sqlglot.optimizer.annotate_types import annotate_types
877
878                value = annotate_types(value)
879
880            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
881                return super().trycast_sql(expression)
882
883            # TRY_CAST only works for string values in Snowflake
884            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
886        def log_sql(self, expression: exp.Log) -> str:
887            if not expression.expression:
888                return self.func("LN", expression.this)
889
890            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
892        def unnest_sql(self, expression: exp.Unnest) -> str:
893            unnest_alias = expression.args.get("alias")
894            offset = expression.args.get("offset")
895
896            columns = [
897                exp.to_identifier("seq"),
898                exp.to_identifier("key"),
899                exp.to_identifier("path"),
900                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
901                seq_get(unnest_alias.columns if unnest_alias else [], 0)
902                or exp.to_identifier("value"),
903                exp.to_identifier("this"),
904            ]
905
906            if unnest_alias:
907                unnest_alias.set("columns", columns)
908            else:
909                unnest_alias = exp.TableAlias(this="_u", columns=columns)
910
911            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
912            alias = self.sql(unnest_alias)
913            alias = f" AS {alias}" if alias else ""
914            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
916        def show_sql(self, expression: exp.Show) -> str:
917            terse = "TERSE " if expression.args.get("terse") else ""
918            history = " HISTORY" if expression.args.get("history") else ""
919            like = self.sql(expression, "like")
920            like = f" LIKE {like}" if like else ""
921
922            scope = self.sql(expression, "scope")
923            scope = f" {scope}" if scope else ""
924
925            scope_kind = self.sql(expression, "scope_kind")
926            if scope_kind:
927                scope_kind = f" IN {scope_kind}"
928
929            starts_with = self.sql(expression, "starts_with")
930            if starts_with:
931                starts_with = f" STARTS WITH {starts_with}"
932
933            limit = self.sql(expression, "limit")
934
935            from_ = self.sql(expression, "from")
936            if from_:
937                from_ = f" FROM {from_}"
938
939            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
941        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
942            # Other dialects don't support all of the following parameters, so we need to
943            # generate default values as necessary to ensure the transpilation is correct
944            group = expression.args.get("group")
945            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
946            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
947            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
948
949            return self.func(
950                "REGEXP_SUBSTR",
951                expression.this,
952                expression.expression,
953                position,
954                occurrence,
955                parameters,
956                group,
957            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
959        def except_op(self, expression: exp.Except) -> str:
960            if not expression.args.get("distinct"):
961                self.unsupported("EXCEPT with All is not supported in Snowflake")
962            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
964        def intersect_op(self, expression: exp.Intersect) -> str:
965            if not expression.args.get("distinct"):
966                self.unsupported("INTERSECT with All is not supported in Snowflake")
967            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
969        def describe_sql(self, expression: exp.Describe) -> str:
970            # Default to table if kind is unknown
971            kind_value = expression.args.get("kind") or "TABLE"
972            kind = f" {kind_value}" if kind_value else ""
973            this = f" {self.sql(expression, 'this')}"
974            expressions = self.expressions(expression, flat=True)
975            expressions = f" {expressions}" if expressions else ""
976            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
978        def generatedasidentitycolumnconstraint_sql(
979            self, expression: exp.GeneratedAsIdentityColumnConstraint
980        ) -> str:
981            start = expression.args.get("start")
982            start = f" START {start}" if start else ""
983            increment = expression.args.get("increment")
984            increment = f" INCREMENT {increment}" if increment else ""
985            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
987        def swaptable_sql(self, expression: exp.SwapTable) -> str:
988            this = self.sql(expression, "this")
989            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
991        def with_properties(self, properties: exp.Properties) -> str:
992            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
994        def cluster_sql(self, expression: exp.Cluster) -> str:
995            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
 997        def struct_sql(self, expression: exp.Struct) -> str:
 998            keys = []
 999            values = []
1000
1001            for i, e in enumerate(expression.expressions):
1002                if isinstance(e, exp.PropertyEQ):
1003                    keys.append(
1004                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1005                    )
1006                    values.append(e.expression)
1007                else:
1008                    keys.append(exp.Literal.string(f"_{i}"))
1009                    values.append(e)
1010
1011            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
SELECT_KINDS: Tuple[str, ...] = ()
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
set_operations
union_sql
union_op
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
currenttimestamp_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
arrayany_sql
generateseries_sql
partitionrange_sql
truncatetable_sql
convert_sql