Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    date_delta_sql,
  11    date_trunc_to_time,
  12    datestrtodate_sql,
  13    build_formatted_time,
  14    if_sql,
  15    inline_array_sql,
  16    max_or_greatest,
  17    min_or_least,
  18    rename_func,
  19    timestamptrunc_sql,
  20    timestrtotime_sql,
  21    var_map_sql,
  22)
  23from sqlglot.helper import flatten, is_float, is_int, seq_get
  24from sqlglot.tokens import TokenType
  25
  26if t.TYPE_CHECKING:
  27    from sqlglot._typing import E
  28
  29
  30# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  31def _build_datetime(
  32    name: str, kind: exp.DataType.Type, safe: bool = False
  33) -> t.Callable[[t.List], exp.Func]:
  34    def _builder(args: t.List) -> exp.Func:
  35        value = seq_get(args, 0)
  36
  37        if isinstance(value, exp.Literal):
  38            int_value = is_int(value.this)
  39
  40            # Converts calls like `TO_TIME('01:02:03')` into casts
  41            if len(args) == 1 and value.is_string and not int_value:
  42                return exp.cast(value, kind)
  43
  44            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  45            # cases so we can transpile them, since they're relatively common
  46            if kind == exp.DataType.Type.TIMESTAMP:
  47                if int_value:
  48                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  49                if not is_float(value.this):
  50                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  51
  52        if len(args) == 2 and kind == exp.DataType.Type.DATE:
  53            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  54            formatted_exp.set("safe", safe)
  55            return formatted_exp
  56
  57        return exp.Anonymous(this=name, expressions=args)
  58
  59    return _builder
  60
  61
  62def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  63    expression = parser.build_var_map(args)
  64
  65    if isinstance(expression, exp.StarMap):
  66        return expression
  67
  68    return exp.Struct(
  69        expressions=[
  70            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  71        ]
  72    )
  73
  74
  75def _build_datediff(args: t.List) -> exp.DateDiff:
  76    return exp.DateDiff(
  77        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
  78    )
  79
  80
  81def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  82    def _builder(args: t.List) -> E:
  83        return expr_type(
  84            this=seq_get(args, 2),
  85            expression=seq_get(args, 1),
  86            unit=_map_date_part(seq_get(args, 0)),
  87        )
  88
  89    return _builder
  90
  91
  92# https://docs.snowflake.com/en/sql-reference/functions/div0
  93def _build_if_from_div0(args: t.List) -> exp.If:
  94    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
  95    true = exp.Literal.number(0)
  96    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
  97    return exp.If(this=cond, true=true, false=false)
  98
  99
 100# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 101def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 102    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 103    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 104
 105
 106# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 107def _build_if_from_nullifzero(args: t.List) -> exp.If:
 108    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 109    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 110
 111
 112def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 113    flag = expression.text("flag")
 114
 115    if "i" not in flag:
 116        flag += "i"
 117
 118    return self.func(
 119        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 120    )
 121
 122
 123def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
 124    if len(args) == 3:
 125        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
 126    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
 127
 128
 129def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 130    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 131
 132    if not regexp_replace.args.get("replacement"):
 133        regexp_replace.set("replacement", exp.Literal.string(""))
 134
 135    return regexp_replace
 136
 137
 138def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 139    def _parse(self: Snowflake.Parser) -> exp.Show:
 140        return self._parse_show_snowflake(*args, **kwargs)
 141
 142    return _parse
 143
 144
 145DATE_PART_MAPPING = {
 146    "Y": "YEAR",
 147    "YY": "YEAR",
 148    "YYY": "YEAR",
 149    "YYYY": "YEAR",
 150    "YR": "YEAR",
 151    "YEARS": "YEAR",
 152    "YRS": "YEAR",
 153    "MM": "MONTH",
 154    "MON": "MONTH",
 155    "MONS": "MONTH",
 156    "MONTHS": "MONTH",
 157    "D": "DAY",
 158    "DD": "DAY",
 159    "DAYS": "DAY",
 160    "DAYOFMONTH": "DAY",
 161    "WEEKDAY": "DAYOFWEEK",
 162    "DOW": "DAYOFWEEK",
 163    "DW": "DAYOFWEEK",
 164    "WEEKDAY_ISO": "DAYOFWEEKISO",
 165    "DOW_ISO": "DAYOFWEEKISO",
 166    "DW_ISO": "DAYOFWEEKISO",
 167    "YEARDAY": "DAYOFYEAR",
 168    "DOY": "DAYOFYEAR",
 169    "DY": "DAYOFYEAR",
 170    "W": "WEEK",
 171    "WK": "WEEK",
 172    "WEEKOFYEAR": "WEEK",
 173    "WOY": "WEEK",
 174    "WY": "WEEK",
 175    "WEEK_ISO": "WEEKISO",
 176    "WEEKOFYEARISO": "WEEKISO",
 177    "WEEKOFYEAR_ISO": "WEEKISO",
 178    "Q": "QUARTER",
 179    "QTR": "QUARTER",
 180    "QTRS": "QUARTER",
 181    "QUARTERS": "QUARTER",
 182    "H": "HOUR",
 183    "HH": "HOUR",
 184    "HR": "HOUR",
 185    "HOURS": "HOUR",
 186    "HRS": "HOUR",
 187    "M": "MINUTE",
 188    "MI": "MINUTE",
 189    "MIN": "MINUTE",
 190    "MINUTES": "MINUTE",
 191    "MINS": "MINUTE",
 192    "S": "SECOND",
 193    "SEC": "SECOND",
 194    "SECONDS": "SECOND",
 195    "SECS": "SECOND",
 196    "MS": "MILLISECOND",
 197    "MSEC": "MILLISECOND",
 198    "MILLISECONDS": "MILLISECOND",
 199    "US": "MICROSECOND",
 200    "USEC": "MICROSECOND",
 201    "MICROSECONDS": "MICROSECOND",
 202    "NS": "NANOSECOND",
 203    "NSEC": "NANOSECOND",
 204    "NANOSEC": "NANOSECOND",
 205    "NSECOND": "NANOSECOND",
 206    "NSECONDS": "NANOSECOND",
 207    "NANOSECS": "NANOSECOND",
 208    "EPOCH": "EPOCH_SECOND",
 209    "EPOCH_SECONDS": "EPOCH_SECOND",
 210    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
 211    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
 212    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
 213    "TZH": "TIMEZONE_HOUR",
 214    "TZM": "TIMEZONE_MINUTE",
 215}
 216
 217
 218@t.overload
 219def _map_date_part(part: exp.Expression) -> exp.Var:
 220    pass
 221
 222
 223@t.overload
 224def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 225    pass
 226
 227
 228def _map_date_part(part):
 229    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
 230    return exp.var(mapped) if mapped else part
 231
 232
 233def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 234    trunc = date_trunc_to_time(args)
 235    trunc.set("unit", _map_date_part(trunc.args["unit"]))
 236    return trunc
 237
 238
 239def _build_timestamp_from_parts(args: t.List) -> exp.Func:
 240    if len(args) == 2:
 241        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
 242        # so we parse this into Anonymous for now instead of introducing complexity
 243        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
 244
 245    return exp.TimestampFromParts.from_arg_list(args)
 246
 247
 248def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 249    """
 250    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 251    so we need to unqualify them.
 252
 253    Example:
 254        >>> from sqlglot import parse_one
 255        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 256        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 257        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 258    """
 259    if isinstance(expression, exp.Pivot) and expression.unpivot:
 260        expression = transforms.unqualify_columns(expression)
 261
 262    return expression
 263
 264
 265def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 266    assert isinstance(expression, exp.Create)
 267
 268    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 269        if expression.this in exp.DataType.NESTED_TYPES:
 270            expression.set("expressions", None)
 271        return expression
 272
 273    props = expression.args.get("properties")
 274    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 275        for schema_expression in expression.this.expressions:
 276            if isinstance(schema_expression, exp.ColumnDef):
 277                column_type = schema_expression.kind
 278                if isinstance(column_type, exp.DataType):
 279                    column_type.transform(_flatten_structured_type, copy=False)
 280
 281    return expression
 282
 283
 284class Snowflake(Dialect):
 285    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 286    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 287    NULL_ORDERING = "nulls_are_large"
 288    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 289    SUPPORTS_USER_DEFINED_TYPES = False
 290    SUPPORTS_SEMI_ANTI_JOIN = False
 291    PREFER_CTE_ALIAS_COLUMN = True
 292    TABLESAMPLE_SIZE_IS_PERCENT = True
 293
 294    TIME_MAPPING = {
 295        "YYYY": "%Y",
 296        "yyyy": "%Y",
 297        "YY": "%y",
 298        "yy": "%y",
 299        "MMMM": "%B",
 300        "mmmm": "%B",
 301        "MON": "%b",
 302        "mon": "%b",
 303        "MM": "%m",
 304        "mm": "%m",
 305        "DD": "%d",
 306        "dd": "%-d",
 307        "DY": "%a",
 308        "dy": "%w",
 309        "HH24": "%H",
 310        "hh24": "%H",
 311        "HH12": "%I",
 312        "hh12": "%I",
 313        "MI": "%M",
 314        "mi": "%M",
 315        "SS": "%S",
 316        "ss": "%S",
 317        "FF": "%f",
 318        "ff": "%f",
 319        "FF6": "%f",
 320        "ff6": "%f",
 321    }
 322
 323    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 324        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 325        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 326        if (
 327            isinstance(expression, exp.Identifier)
 328            and isinstance(expression.parent, exp.Table)
 329            and expression.name.lower() == "dual"
 330        ):
 331            return expression  # type: ignore
 332
 333        return super().quote_identifier(expression, identify=identify)
 334
 335    class Parser(parser.Parser):
 336        IDENTIFY_PIVOT_STRINGS = True
 337
 338        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 339
 340        FUNCTIONS = {
 341            **parser.Parser.FUNCTIONS,
 342            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 343            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 344            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 345                this=seq_get(args, 1), expression=seq_get(args, 0)
 346            ),
 347            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 348                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 349                start=seq_get(args, 0),
 350                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 351                step=seq_get(args, 2),
 352            ),
 353            "BITXOR": binary_from_function(exp.BitwiseXor),
 354            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 355            "BOOLXOR": binary_from_function(exp.Xor),
 356            "CONVERT_TIMEZONE": _build_convert_timezone,
 357            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 358            "DATE_TRUNC": _date_trunc_to_time,
 359            "DATEADD": _build_date_time_add(exp.DateAdd),
 360            "DATEDIFF": _build_datediff,
 361            "DIV0": _build_if_from_div0,
 362            "FLATTEN": exp.Explode.from_arg_list,
 363            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 364                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 365            ),
 366            "IFF": exp.If.from_arg_list,
 367            "LAST_DAY": lambda args: exp.LastDay(
 368                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 369            ),
 370            "LISTAGG": exp.GroupConcat.from_arg_list,
 371            "MEDIAN": lambda args: exp.PercentileCont(
 372                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 373            ),
 374            "NULLIFZERO": _build_if_from_nullifzero,
 375            "OBJECT_CONSTRUCT": _build_object_construct,
 376            "REGEXP_REPLACE": _build_regexp_replace,
 377            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 378            "RLIKE": exp.RegexpLike.from_arg_list,
 379            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 380            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 381            "TIMEDIFF": _build_datediff,
 382            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 383            "TIMESTAMPDIFF": _build_datediff,
 384            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 385            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 386            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 387            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 388            "TO_NUMBER": lambda args: exp.ToNumber(
 389                this=seq_get(args, 0),
 390                format=seq_get(args, 1),
 391                precision=seq_get(args, 2),
 392                scale=seq_get(args, 3),
 393            ),
 394            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 395            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 396            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 397            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 398            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 399            "TO_VARCHAR": exp.ToChar.from_arg_list,
 400            "ZEROIFNULL": _build_if_from_zeroifnull,
 401        }
 402
 403        FUNCTION_PARSERS = {
 404            **parser.Parser.FUNCTION_PARSERS,
 405            "DATE_PART": lambda self: self._parse_date_part(),
 406            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 407        }
 408        FUNCTION_PARSERS.pop("TRIM")
 409
 410        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 411
 412        RANGE_PARSERS = {
 413            **parser.Parser.RANGE_PARSERS,
 414            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 415            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 416        }
 417
 418        ALTER_PARSERS = {
 419            **parser.Parser.ALTER_PARSERS,
 420            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 421            "UNSET": lambda self: self.expression(
 422                exp.Set,
 423                tag=self._match_text_seq("TAG"),
 424                expressions=self._parse_csv(self._parse_id_var),
 425                unset=True,
 426            ),
 427            "SWAP": lambda self: self._parse_alter_table_swap(),
 428        }
 429
 430        STATEMENT_PARSERS = {
 431            **parser.Parser.STATEMENT_PARSERS,
 432            TokenType.SHOW: lambda self: self._parse_show(),
 433        }
 434
 435        PROPERTY_PARSERS = {
 436            **parser.Parser.PROPERTY_PARSERS,
 437            "LOCATION": lambda self: self._parse_location(),
 438        }
 439
 440        SHOW_PARSERS = {
 441            "SCHEMAS": _show_parser("SCHEMAS"),
 442            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 443            "OBJECTS": _show_parser("OBJECTS"),
 444            "TERSE OBJECTS": _show_parser("OBJECTS"),
 445            "TABLES": _show_parser("TABLES"),
 446            "TERSE TABLES": _show_parser("TABLES"),
 447            "VIEWS": _show_parser("VIEWS"),
 448            "TERSE VIEWS": _show_parser("VIEWS"),
 449            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 450            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 451            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 452            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 453            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 454            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 455            "SEQUENCES": _show_parser("SEQUENCES"),
 456            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 457            "COLUMNS": _show_parser("COLUMNS"),
 458            "USERS": _show_parser("USERS"),
 459            "TERSE USERS": _show_parser("USERS"),
 460        }
 461
 462        STAGED_FILE_SINGLE_TOKENS = {
 463            TokenType.DOT,
 464            TokenType.MOD,
 465            TokenType.SLASH,
 466        }
 467
 468        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 469
 470        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 471
 472        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 473            this = super()._parse_column_ops(this)
 474
 475            casts = []
 476            json_path = []
 477
 478            while self._match(TokenType.COLON):
 479                path = super()._parse_column_ops(self._parse_field(any_token=True))
 480
 481                # The cast :: operator has a lower precedence than the extraction operator :, so
 482                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 483                while isinstance(path, exp.Cast):
 484                    casts.append(path.to)
 485                    path = path.this
 486
 487                if path:
 488                    json_path.append(path.sql(dialect="snowflake", copy=False))
 489
 490            if json_path:
 491                this = self.expression(
 492                    exp.JSONExtract,
 493                    this=this,
 494                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 495                )
 496
 497                while casts:
 498                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 499
 500            return this
 501
 502        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 503        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 504        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 505            this = self._parse_var() or self._parse_type()
 506
 507            if not this:
 508                return None
 509
 510            self._match(TokenType.COMMA)
 511            expression = self._parse_bitwise()
 512            this = _map_date_part(this)
 513            name = this.name.upper()
 514
 515            if name.startswith("EPOCH"):
 516                if name == "EPOCH_MILLISECOND":
 517                    scale = 10**3
 518                elif name == "EPOCH_MICROSECOND":
 519                    scale = 10**6
 520                elif name == "EPOCH_NANOSECOND":
 521                    scale = 10**9
 522                else:
 523                    scale = None
 524
 525                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 526                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 527
 528                if scale:
 529                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 530
 531                return to_unix
 532
 533            return self.expression(exp.Extract, this=this, expression=expression)
 534
 535        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 536            if is_map:
 537                # Keys are strings in Snowflake's objects, see also:
 538                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 539                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 540                return self._parse_slice(self._parse_string())
 541
 542            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 543
 544        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 545            lateral = super()._parse_lateral()
 546            if not lateral:
 547                return lateral
 548
 549            if isinstance(lateral.this, exp.Explode):
 550                table_alias = lateral.args.get("alias")
 551                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 552                if table_alias and not table_alias.args.get("columns"):
 553                    table_alias.set("columns", columns)
 554                elif not table_alias:
 555                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 556
 557            return lateral
 558
 559        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 560            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 561            index = self._index
 562            if self._match_texts(("AT", "BEFORE")):
 563                this = self._prev.text.upper()
 564                kind = (
 565                    self._match(TokenType.L_PAREN)
 566                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 567                    and self._prev.text.upper()
 568                )
 569                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 570
 571                if expression:
 572                    self._match_r_paren()
 573                    when = self.expression(
 574                        exp.HistoricalData, this=this, kind=kind, expression=expression
 575                    )
 576                    table.set("when", when)
 577                else:
 578                    self._retreat(index)
 579
 580            return table
 581
 582        def _parse_table_parts(
 583            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 584        ) -> exp.Table:
 585            # https://docs.snowflake.com/en/user-guide/querying-stage
 586            if self._match(TokenType.STRING, advance=False):
 587                table = self._parse_string()
 588            elif self._match_text_seq("@", advance=False):
 589                table = self._parse_location_path()
 590            else:
 591                table = None
 592
 593            if table:
 594                file_format = None
 595                pattern = None
 596
 597                self._match(TokenType.L_PAREN)
 598                while self._curr and not self._match(TokenType.R_PAREN):
 599                    if self._match_text_seq("FILE_FORMAT", "=>"):
 600                        file_format = self._parse_string() or super()._parse_table_parts(
 601                            is_db_reference=is_db_reference
 602                        )
 603                    elif self._match_text_seq("PATTERN", "=>"):
 604                        pattern = self._parse_string()
 605                    else:
 606                        break
 607
 608                    self._match(TokenType.COMMA)
 609
 610                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 611            else:
 612                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 613
 614            return self._parse_at_before(table)
 615
 616        def _parse_id_var(
 617            self,
 618            any_token: bool = True,
 619            tokens: t.Optional[t.Collection[TokenType]] = None,
 620        ) -> t.Optional[exp.Expression]:
 621            if self._match_text_seq("IDENTIFIER", "("):
 622                identifier = (
 623                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 624                    or self._parse_string()
 625                )
 626                self._match_r_paren()
 627                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 628
 629            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 630
 631        def _parse_show_snowflake(self, this: str) -> exp.Show:
 632            scope = None
 633            scope_kind = None
 634
 635            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 636            # which is syntactically valid but has no effect on the output
 637            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 638
 639            history = self._match_text_seq("HISTORY")
 640
 641            like = self._parse_string() if self._match(TokenType.LIKE) else None
 642
 643            if self._match(TokenType.IN):
 644                if self._match_text_seq("ACCOUNT"):
 645                    scope_kind = "ACCOUNT"
 646                elif self._match_set(self.DB_CREATABLES):
 647                    scope_kind = self._prev.text.upper()
 648                    if self._curr:
 649                        scope = self._parse_table_parts()
 650                elif self._curr:
 651                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 652                    scope = self._parse_table_parts()
 653
 654            return self.expression(
 655                exp.Show,
 656                **{
 657                    "terse": terse,
 658                    "this": this,
 659                    "history": history,
 660                    "like": like,
 661                    "scope": scope,
 662                    "scope_kind": scope_kind,
 663                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 664                    "limit": self._parse_limit(),
 665                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 666                },
 667            )
 668
 669        def _parse_alter_table_swap(self) -> exp.SwapTable:
 670            self._match_text_seq("WITH")
 671            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 672
 673        def _parse_location(self) -> exp.LocationProperty:
 674            self._match(TokenType.EQ)
 675            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 676
 677        def _parse_location_path(self) -> exp.Var:
 678            parts = [self._advance_any(ignore_reserved=True)]
 679
 680            # We avoid consuming a comma token because external tables like @foo and @bar
 681            # can be joined in a query with a comma separator.
 682            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 683                parts.append(self._advance_any(ignore_reserved=True))
 684
 685            return exp.var("".join(part.text for part in parts if part))
 686
 687    class Tokenizer(tokens.Tokenizer):
 688        STRING_ESCAPES = ["\\", "'"]
 689        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 690        RAW_STRINGS = ["$$"]
 691        COMMENTS = ["--", "//", ("/*", "*/")]
 692
 693        KEYWORDS = {
 694            **tokens.Tokenizer.KEYWORDS,
 695            "BYTEINT": TokenType.INT,
 696            "CHAR VARYING": TokenType.VARCHAR,
 697            "CHARACTER VARYING": TokenType.VARCHAR,
 698            "EXCLUDE": TokenType.EXCEPT,
 699            "ILIKE ANY": TokenType.ILIKE_ANY,
 700            "LIKE ANY": TokenType.LIKE_ANY,
 701            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 702            "MINUS": TokenType.EXCEPT,
 703            "NCHAR VARYING": TokenType.VARCHAR,
 704            "PUT": TokenType.COMMAND,
 705            "REMOVE": TokenType.COMMAND,
 706            "RENAME": TokenType.REPLACE,
 707            "RM": TokenType.COMMAND,
 708            "SAMPLE": TokenType.TABLE_SAMPLE,
 709            "SQL_DOUBLE": TokenType.DOUBLE,
 710            "SQL_VARCHAR": TokenType.VARCHAR,
 711            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 712            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 713            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 714            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 715            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 716            "TOP": TokenType.TOP,
 717        }
 718
 719        SINGLE_TOKENS = {
 720            **tokens.Tokenizer.SINGLE_TOKENS,
 721            "$": TokenType.PARAMETER,
 722        }
 723
 724        VAR_SINGLE_TOKENS = {"$"}
 725
 726        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 727
 728    class Generator(generator.Generator):
 729        PARAMETER_TOKEN = "$"
 730        MATCHED_BY_SOURCE = False
 731        SINGLE_STRING_INTERVAL = True
 732        JOIN_HINTS = False
 733        TABLE_HINTS = False
 734        QUERY_HINTS = False
 735        AGGREGATE_FILTER_SUPPORTED = False
 736        SUPPORTS_TABLE_COPY = False
 737        COLLATE_IS_FUNC = True
 738        LIMIT_ONLY_LITERALS = True
 739        JSON_KEY_VALUE_PAIR_SEP = ","
 740        INSERT_OVERWRITE = " OVERWRITE INTO"
 741        STRUCT_DELIMITER = ("(", ")")
 742
 743        TRANSFORMS = {
 744            **generator.Generator.TRANSFORMS,
 745            exp.ArgMax: rename_func("MAX_BY"),
 746            exp.ArgMin: rename_func("MIN_BY"),
 747            exp.Array: inline_array_sql,
 748            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 749            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 750            exp.AtTimeZone: lambda self, e: self.func(
 751                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 752            ),
 753            exp.BitwiseXor: rename_func("BITXOR"),
 754            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 755            exp.DateAdd: date_delta_sql("DATEADD"),
 756            exp.DateDiff: date_delta_sql("DATEDIFF"),
 757            exp.DateStrToDate: datestrtodate_sql,
 758            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 759            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 760            exp.DayOfYear: rename_func("DAYOFYEAR"),
 761            exp.Explode: rename_func("FLATTEN"),
 762            exp.Extract: rename_func("DATE_PART"),
 763            exp.FromTimeZone: lambda self, e: self.func(
 764                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 765            ),
 766            exp.GenerateSeries: lambda self, e: self.func(
 767                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 768            ),
 769            exp.GroupConcat: rename_func("LISTAGG"),
 770            exp.If: if_sql(name="IFF", false_value="NULL"),
 771            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 772            exp.JSONExtractScalar: lambda self, e: self.func(
 773                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 774            ),
 775            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 776            exp.JSONPathRoot: lambda *_: "",
 777            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 778            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 779            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 780            exp.Max: max_or_greatest,
 781            exp.Min: min_or_least,
 782            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 783            exp.PercentileCont: transforms.preprocess(
 784                [transforms.add_within_group_for_percentiles]
 785            ),
 786            exp.PercentileDisc: transforms.preprocess(
 787                [transforms.add_within_group_for_percentiles]
 788            ),
 789            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 790            exp.RegexpILike: _regexpilike_sql,
 791            exp.Rand: rename_func("RANDOM"),
 792            exp.Select: transforms.preprocess(
 793                [
 794                    transforms.eliminate_distinct_on,
 795                    transforms.explode_to_unnest(),
 796                    transforms.eliminate_semi_and_anti_joins,
 797                ]
 798            ),
 799            exp.SHA: rename_func("SHA1"),
 800            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 801            exp.StartsWith: rename_func("STARTSWITH"),
 802            exp.StrPosition: lambda self, e: self.func(
 803                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 804            ),
 805            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 806            exp.Stuff: rename_func("INSERT"),
 807            exp.TimeAdd: date_delta_sql("TIMEADD"),
 808            exp.TimestampDiff: lambda self, e: self.func(
 809                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 810            ),
 811            exp.TimestampTrunc: timestamptrunc_sql,
 812            exp.TimeStrToTime: timestrtotime_sql,
 813            exp.TimeToStr: lambda self, e: self.func(
 814                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 815            ),
 816            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 817            exp.ToArray: rename_func("TO_ARRAY"),
 818            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 819            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 820            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 821            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 822            exp.TsOrDsToDate: lambda self, e: self.func(
 823                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 824            ),
 825            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 826            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 827            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 828            exp.Xor: rename_func("BOOLXOR"),
 829        }
 830
 831        SUPPORTED_JSON_PATH_PARTS = {
 832            exp.JSONPathKey,
 833            exp.JSONPathRoot,
 834            exp.JSONPathSubscript,
 835        }
 836
 837        TYPE_MAPPING = {
 838            **generator.Generator.TYPE_MAPPING,
 839            exp.DataType.Type.NESTED: "OBJECT",
 840            exp.DataType.Type.STRUCT: "OBJECT",
 841            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 842        }
 843
 844        STAR_MAPPING = {
 845            "except": "EXCLUDE",
 846            "replace": "RENAME",
 847        }
 848
 849        PROPERTIES_LOCATION = {
 850            **generator.Generator.PROPERTIES_LOCATION,
 851            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 852            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 853        }
 854
 855        UNSUPPORTED_VALUES_EXPRESSIONS = {
 856            exp.Struct,
 857        }
 858
 859        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 860            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 861                values_as_table = False
 862
 863            return super().values_sql(expression, values_as_table=values_as_table)
 864
 865        def datatype_sql(self, expression: exp.DataType) -> str:
 866            expressions = expression.expressions
 867            if (
 868                expressions
 869                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 870                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 871            ):
 872                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 873                return "OBJECT"
 874
 875            return super().datatype_sql(expression)
 876
 877        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 878            return self.func(
 879                "TO_NUMBER",
 880                expression.this,
 881                expression.args.get("format"),
 882                expression.args.get("precision"),
 883                expression.args.get("scale"),
 884            )
 885
 886        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 887            milli = expression.args.get("milli")
 888            if milli is not None:
 889                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 890                expression.set("nano", milli_to_nano)
 891
 892            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 893
 894        def trycast_sql(self, expression: exp.TryCast) -> str:
 895            value = expression.this
 896
 897            if value.type is None:
 898                from sqlglot.optimizer.annotate_types import annotate_types
 899
 900                value = annotate_types(value)
 901
 902            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 903                return super().trycast_sql(expression)
 904
 905            # TRY_CAST only works for string values in Snowflake
 906            return self.cast_sql(expression)
 907
 908        def log_sql(self, expression: exp.Log) -> str:
 909            if not expression.expression:
 910                return self.func("LN", expression.this)
 911
 912            return super().log_sql(expression)
 913
 914        def unnest_sql(self, expression: exp.Unnest) -> str:
 915            unnest_alias = expression.args.get("alias")
 916            offset = expression.args.get("offset")
 917
 918            columns = [
 919                exp.to_identifier("seq"),
 920                exp.to_identifier("key"),
 921                exp.to_identifier("path"),
 922                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 923                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 924                or exp.to_identifier("value"),
 925                exp.to_identifier("this"),
 926            ]
 927
 928            if unnest_alias:
 929                unnest_alias.set("columns", columns)
 930            else:
 931                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 932
 933            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 934            alias = self.sql(unnest_alias)
 935            alias = f" AS {alias}" if alias else ""
 936            return f"{explode}{alias}"
 937
 938        def show_sql(self, expression: exp.Show) -> str:
 939            terse = "TERSE " if expression.args.get("terse") else ""
 940            history = " HISTORY" if expression.args.get("history") else ""
 941            like = self.sql(expression, "like")
 942            like = f" LIKE {like}" if like else ""
 943
 944            scope = self.sql(expression, "scope")
 945            scope = f" {scope}" if scope else ""
 946
 947            scope_kind = self.sql(expression, "scope_kind")
 948            if scope_kind:
 949                scope_kind = f" IN {scope_kind}"
 950
 951            starts_with = self.sql(expression, "starts_with")
 952            if starts_with:
 953                starts_with = f" STARTS WITH {starts_with}"
 954
 955            limit = self.sql(expression, "limit")
 956
 957            from_ = self.sql(expression, "from")
 958            if from_:
 959                from_ = f" FROM {from_}"
 960
 961            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 962
 963        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 964            # Other dialects don't support all of the following parameters, so we need to
 965            # generate default values as necessary to ensure the transpilation is correct
 966            group = expression.args.get("group")
 967            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 968            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 969            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 970
 971            return self.func(
 972                "REGEXP_SUBSTR",
 973                expression.this,
 974                expression.expression,
 975                position,
 976                occurrence,
 977                parameters,
 978                group,
 979            )
 980
 981        def except_op(self, expression: exp.Except) -> str:
 982            if not expression.args.get("distinct"):
 983                self.unsupported("EXCEPT with All is not supported in Snowflake")
 984            return super().except_op(expression)
 985
 986        def intersect_op(self, expression: exp.Intersect) -> str:
 987            if not expression.args.get("distinct"):
 988                self.unsupported("INTERSECT with All is not supported in Snowflake")
 989            return super().intersect_op(expression)
 990
 991        def describe_sql(self, expression: exp.Describe) -> str:
 992            # Default to table if kind is unknown
 993            kind_value = expression.args.get("kind") or "TABLE"
 994            kind = f" {kind_value}" if kind_value else ""
 995            this = f" {self.sql(expression, 'this')}"
 996            expressions = self.expressions(expression, flat=True)
 997            expressions = f" {expressions}" if expressions else ""
 998            return f"DESCRIBE{kind}{this}{expressions}"
 999
1000        def generatedasidentitycolumnconstraint_sql(
1001            self, expression: exp.GeneratedAsIdentityColumnConstraint
1002        ) -> str:
1003            start = expression.args.get("start")
1004            start = f" START {start}" if start else ""
1005            increment = expression.args.get("increment")
1006            increment = f" INCREMENT {increment}" if increment else ""
1007            return f"AUTOINCREMENT{start}{increment}"
1008
1009        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1010            this = self.sql(expression, "this")
1011            return f"SWAP WITH {this}"
1012
1013        def with_properties(self, properties: exp.Properties) -> str:
1014            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
1015
1016        def cluster_sql(self, expression: exp.Cluster) -> str:
1017            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1018
1019        def struct_sql(self, expression: exp.Struct) -> str:
1020            keys = []
1021            values = []
1022
1023            for i, e in enumerate(expression.expressions):
1024                if isinstance(e, exp.PropertyEQ):
1025                    keys.append(
1026                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1027                    )
1028                    values.append(e.expression)
1029                else:
1030                    keys.append(exp.Literal.string(f"_{i}"))
1031                    values.append(e)
1032
1033            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
 285class Snowflake(Dialect):
 286    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 287    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 288    NULL_ORDERING = "nulls_are_large"
 289    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 290    SUPPORTS_USER_DEFINED_TYPES = False
 291    SUPPORTS_SEMI_ANTI_JOIN = False
 292    PREFER_CTE_ALIAS_COLUMN = True
 293    TABLESAMPLE_SIZE_IS_PERCENT = True
 294
 295    TIME_MAPPING = {
 296        "YYYY": "%Y",
 297        "yyyy": "%Y",
 298        "YY": "%y",
 299        "yy": "%y",
 300        "MMMM": "%B",
 301        "mmmm": "%B",
 302        "MON": "%b",
 303        "mon": "%b",
 304        "MM": "%m",
 305        "mm": "%m",
 306        "DD": "%d",
 307        "dd": "%-d",
 308        "DY": "%a",
 309        "dy": "%w",
 310        "HH24": "%H",
 311        "hh24": "%H",
 312        "HH12": "%I",
 313        "hh12": "%I",
 314        "MI": "%M",
 315        "mi": "%M",
 316        "SS": "%S",
 317        "ss": "%S",
 318        "FF": "%f",
 319        "ff": "%f",
 320        "FF6": "%f",
 321        "ff6": "%f",
 322    }
 323
 324    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 325        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 326        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 327        if (
 328            isinstance(expression, exp.Identifier)
 329            and isinstance(expression.parent, exp.Table)
 330            and expression.name.lower() == "dual"
 331        ):
 332            return expression  # type: ignore
 333
 334        return super().quote_identifier(expression, identify=identify)
 335
 336    class Parser(parser.Parser):
 337        IDENTIFY_PIVOT_STRINGS = True
 338
 339        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 340
 341        FUNCTIONS = {
 342            **parser.Parser.FUNCTIONS,
 343            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 344            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
 345            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 346                this=seq_get(args, 1), expression=seq_get(args, 0)
 347            ),
 348            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 349                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 350                start=seq_get(args, 0),
 351                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 352                step=seq_get(args, 2),
 353            ),
 354            "BITXOR": binary_from_function(exp.BitwiseXor),
 355            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 356            "BOOLXOR": binary_from_function(exp.Xor),
 357            "CONVERT_TIMEZONE": _build_convert_timezone,
 358            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 359            "DATE_TRUNC": _date_trunc_to_time,
 360            "DATEADD": _build_date_time_add(exp.DateAdd),
 361            "DATEDIFF": _build_datediff,
 362            "DIV0": _build_if_from_div0,
 363            "FLATTEN": exp.Explode.from_arg_list,
 364            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 365                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 366            ),
 367            "IFF": exp.If.from_arg_list,
 368            "LAST_DAY": lambda args: exp.LastDay(
 369                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
 370            ),
 371            "LISTAGG": exp.GroupConcat.from_arg_list,
 372            "MEDIAN": lambda args: exp.PercentileCont(
 373                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 374            ),
 375            "NULLIFZERO": _build_if_from_nullifzero,
 376            "OBJECT_CONSTRUCT": _build_object_construct,
 377            "REGEXP_REPLACE": _build_regexp_replace,
 378            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 379            "RLIKE": exp.RegexpLike.from_arg_list,
 380            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 381            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 382            "TIMEDIFF": _build_datediff,
 383            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 384            "TIMESTAMPDIFF": _build_datediff,
 385            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
 386            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
 387            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 388            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 389            "TO_NUMBER": lambda args: exp.ToNumber(
 390                this=seq_get(args, 0),
 391                format=seq_get(args, 1),
 392                precision=seq_get(args, 2),
 393                scale=seq_get(args, 3),
 394            ),
 395            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 396            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 397            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 398            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 399            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 400            "TO_VARCHAR": exp.ToChar.from_arg_list,
 401            "ZEROIFNULL": _build_if_from_zeroifnull,
 402        }
 403
 404        FUNCTION_PARSERS = {
 405            **parser.Parser.FUNCTION_PARSERS,
 406            "DATE_PART": lambda self: self._parse_date_part(),
 407            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 408        }
 409        FUNCTION_PARSERS.pop("TRIM")
 410
 411        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 412
 413        RANGE_PARSERS = {
 414            **parser.Parser.RANGE_PARSERS,
 415            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 416            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 417        }
 418
 419        ALTER_PARSERS = {
 420            **parser.Parser.ALTER_PARSERS,
 421            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
 422            "UNSET": lambda self: self.expression(
 423                exp.Set,
 424                tag=self._match_text_seq("TAG"),
 425                expressions=self._parse_csv(self._parse_id_var),
 426                unset=True,
 427            ),
 428            "SWAP": lambda self: self._parse_alter_table_swap(),
 429        }
 430
 431        STATEMENT_PARSERS = {
 432            **parser.Parser.STATEMENT_PARSERS,
 433            TokenType.SHOW: lambda self: self._parse_show(),
 434        }
 435
 436        PROPERTY_PARSERS = {
 437            **parser.Parser.PROPERTY_PARSERS,
 438            "LOCATION": lambda self: self._parse_location(),
 439        }
 440
 441        SHOW_PARSERS = {
 442            "SCHEMAS": _show_parser("SCHEMAS"),
 443            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 444            "OBJECTS": _show_parser("OBJECTS"),
 445            "TERSE OBJECTS": _show_parser("OBJECTS"),
 446            "TABLES": _show_parser("TABLES"),
 447            "TERSE TABLES": _show_parser("TABLES"),
 448            "VIEWS": _show_parser("VIEWS"),
 449            "TERSE VIEWS": _show_parser("VIEWS"),
 450            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 451            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 452            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 453            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 454            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 455            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 456            "SEQUENCES": _show_parser("SEQUENCES"),
 457            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 458            "COLUMNS": _show_parser("COLUMNS"),
 459            "USERS": _show_parser("USERS"),
 460            "TERSE USERS": _show_parser("USERS"),
 461        }
 462
 463        STAGED_FILE_SINGLE_TOKENS = {
 464            TokenType.DOT,
 465            TokenType.MOD,
 466            TokenType.SLASH,
 467        }
 468
 469        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 470
 471        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 472
 473        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
 474            this = super()._parse_column_ops(this)
 475
 476            casts = []
 477            json_path = []
 478
 479            while self._match(TokenType.COLON):
 480                path = super()._parse_column_ops(self._parse_field(any_token=True))
 481
 482                # The cast :: operator has a lower precedence than the extraction operator :, so
 483                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
 484                while isinstance(path, exp.Cast):
 485                    casts.append(path.to)
 486                    path = path.this
 487
 488                if path:
 489                    json_path.append(path.sql(dialect="snowflake", copy=False))
 490
 491            if json_path:
 492                this = self.expression(
 493                    exp.JSONExtract,
 494                    this=this,
 495                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
 496                )
 497
 498                while casts:
 499                    this = self.expression(exp.Cast, this=this, to=casts.pop())
 500
 501            return this
 502
 503        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 504        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 505        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 506            this = self._parse_var() or self._parse_type()
 507
 508            if not this:
 509                return None
 510
 511            self._match(TokenType.COMMA)
 512            expression = self._parse_bitwise()
 513            this = _map_date_part(this)
 514            name = this.name.upper()
 515
 516            if name.startswith("EPOCH"):
 517                if name == "EPOCH_MILLISECOND":
 518                    scale = 10**3
 519                elif name == "EPOCH_MICROSECOND":
 520                    scale = 10**6
 521                elif name == "EPOCH_NANOSECOND":
 522                    scale = 10**9
 523                else:
 524                    scale = None
 525
 526                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 527                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 528
 529                if scale:
 530                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 531
 532                return to_unix
 533
 534            return self.expression(exp.Extract, this=this, expression=expression)
 535
 536        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 537            if is_map:
 538                # Keys are strings in Snowflake's objects, see also:
 539                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 540                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 541                return self._parse_slice(self._parse_string())
 542
 543            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
 544
 545        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 546            lateral = super()._parse_lateral()
 547            if not lateral:
 548                return lateral
 549
 550            if isinstance(lateral.this, exp.Explode):
 551                table_alias = lateral.args.get("alias")
 552                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 553                if table_alias and not table_alias.args.get("columns"):
 554                    table_alias.set("columns", columns)
 555                elif not table_alias:
 556                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 557
 558            return lateral
 559
 560        def _parse_at_before(self, table: exp.Table) -> exp.Table:
 561            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
 562            index = self._index
 563            if self._match_texts(("AT", "BEFORE")):
 564                this = self._prev.text.upper()
 565                kind = (
 566                    self._match(TokenType.L_PAREN)
 567                    and self._match_texts(self.HISTORICAL_DATA_KIND)
 568                    and self._prev.text.upper()
 569                )
 570                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
 571
 572                if expression:
 573                    self._match_r_paren()
 574                    when = self.expression(
 575                        exp.HistoricalData, this=this, kind=kind, expression=expression
 576                    )
 577                    table.set("when", when)
 578                else:
 579                    self._retreat(index)
 580
 581            return table
 582
 583        def _parse_table_parts(
 584            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 585        ) -> exp.Table:
 586            # https://docs.snowflake.com/en/user-guide/querying-stage
 587            if self._match(TokenType.STRING, advance=False):
 588                table = self._parse_string()
 589            elif self._match_text_seq("@", advance=False):
 590                table = self._parse_location_path()
 591            else:
 592                table = None
 593
 594            if table:
 595                file_format = None
 596                pattern = None
 597
 598                self._match(TokenType.L_PAREN)
 599                while self._curr and not self._match(TokenType.R_PAREN):
 600                    if self._match_text_seq("FILE_FORMAT", "=>"):
 601                        file_format = self._parse_string() or super()._parse_table_parts(
 602                            is_db_reference=is_db_reference
 603                        )
 604                    elif self._match_text_seq("PATTERN", "=>"):
 605                        pattern = self._parse_string()
 606                    else:
 607                        break
 608
 609                    self._match(TokenType.COMMA)
 610
 611                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 612            else:
 613                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 614
 615            return self._parse_at_before(table)
 616
 617        def _parse_id_var(
 618            self,
 619            any_token: bool = True,
 620            tokens: t.Optional[t.Collection[TokenType]] = None,
 621        ) -> t.Optional[exp.Expression]:
 622            if self._match_text_seq("IDENTIFIER", "("):
 623                identifier = (
 624                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 625                    or self._parse_string()
 626                )
 627                self._match_r_paren()
 628                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 629
 630            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 631
 632        def _parse_show_snowflake(self, this: str) -> exp.Show:
 633            scope = None
 634            scope_kind = None
 635
 636            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 637            # which is syntactically valid but has no effect on the output
 638            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 639
 640            history = self._match_text_seq("HISTORY")
 641
 642            like = self._parse_string() if self._match(TokenType.LIKE) else None
 643
 644            if self._match(TokenType.IN):
 645                if self._match_text_seq("ACCOUNT"):
 646                    scope_kind = "ACCOUNT"
 647                elif self._match_set(self.DB_CREATABLES):
 648                    scope_kind = self._prev.text.upper()
 649                    if self._curr:
 650                        scope = self._parse_table_parts()
 651                elif self._curr:
 652                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 653                    scope = self._parse_table_parts()
 654
 655            return self.expression(
 656                exp.Show,
 657                **{
 658                    "terse": terse,
 659                    "this": this,
 660                    "history": history,
 661                    "like": like,
 662                    "scope": scope,
 663                    "scope_kind": scope_kind,
 664                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 665                    "limit": self._parse_limit(),
 666                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 667                },
 668            )
 669
 670        def _parse_alter_table_swap(self) -> exp.SwapTable:
 671            self._match_text_seq("WITH")
 672            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 673
 674        def _parse_location(self) -> exp.LocationProperty:
 675            self._match(TokenType.EQ)
 676            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 677
 678        def _parse_location_path(self) -> exp.Var:
 679            parts = [self._advance_any(ignore_reserved=True)]
 680
 681            # We avoid consuming a comma token because external tables like @foo and @bar
 682            # can be joined in a query with a comma separator.
 683            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
 684                parts.append(self._advance_any(ignore_reserved=True))
 685
 686            return exp.var("".join(part.text for part in parts if part))
 687
 688    class Tokenizer(tokens.Tokenizer):
 689        STRING_ESCAPES = ["\\", "'"]
 690        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 691        RAW_STRINGS = ["$$"]
 692        COMMENTS = ["--", "//", ("/*", "*/")]
 693
 694        KEYWORDS = {
 695            **tokens.Tokenizer.KEYWORDS,
 696            "BYTEINT": TokenType.INT,
 697            "CHAR VARYING": TokenType.VARCHAR,
 698            "CHARACTER VARYING": TokenType.VARCHAR,
 699            "EXCLUDE": TokenType.EXCEPT,
 700            "ILIKE ANY": TokenType.ILIKE_ANY,
 701            "LIKE ANY": TokenType.LIKE_ANY,
 702            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 703            "MINUS": TokenType.EXCEPT,
 704            "NCHAR VARYING": TokenType.VARCHAR,
 705            "PUT": TokenType.COMMAND,
 706            "REMOVE": TokenType.COMMAND,
 707            "RENAME": TokenType.REPLACE,
 708            "RM": TokenType.COMMAND,
 709            "SAMPLE": TokenType.TABLE_SAMPLE,
 710            "SQL_DOUBLE": TokenType.DOUBLE,
 711            "SQL_VARCHAR": TokenType.VARCHAR,
 712            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 713            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
 714            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
 715            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 716            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
 717            "TOP": TokenType.TOP,
 718        }
 719
 720        SINGLE_TOKENS = {
 721            **tokens.Tokenizer.SINGLE_TOKENS,
 722            "$": TokenType.PARAMETER,
 723        }
 724
 725        VAR_SINGLE_TOKENS = {"$"}
 726
 727        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 728
 729    class Generator(generator.Generator):
 730        PARAMETER_TOKEN = "$"
 731        MATCHED_BY_SOURCE = False
 732        SINGLE_STRING_INTERVAL = True
 733        JOIN_HINTS = False
 734        TABLE_HINTS = False
 735        QUERY_HINTS = False
 736        AGGREGATE_FILTER_SUPPORTED = False
 737        SUPPORTS_TABLE_COPY = False
 738        COLLATE_IS_FUNC = True
 739        LIMIT_ONLY_LITERALS = True
 740        JSON_KEY_VALUE_PAIR_SEP = ","
 741        INSERT_OVERWRITE = " OVERWRITE INTO"
 742        STRUCT_DELIMITER = ("(", ")")
 743
 744        TRANSFORMS = {
 745            **generator.Generator.TRANSFORMS,
 746            exp.ArgMax: rename_func("MAX_BY"),
 747            exp.ArgMin: rename_func("MIN_BY"),
 748            exp.Array: inline_array_sql,
 749            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 750            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 751            exp.AtTimeZone: lambda self, e: self.func(
 752                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 753            ),
 754            exp.BitwiseXor: rename_func("BITXOR"),
 755            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 756            exp.DateAdd: date_delta_sql("DATEADD"),
 757            exp.DateDiff: date_delta_sql("DATEDIFF"),
 758            exp.DateStrToDate: datestrtodate_sql,
 759            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 760            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 761            exp.DayOfYear: rename_func("DAYOFYEAR"),
 762            exp.Explode: rename_func("FLATTEN"),
 763            exp.Extract: rename_func("DATE_PART"),
 764            exp.FromTimeZone: lambda self, e: self.func(
 765                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 766            ),
 767            exp.GenerateSeries: lambda self, e: self.func(
 768                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 769            ),
 770            exp.GroupConcat: rename_func("LISTAGG"),
 771            exp.If: if_sql(name="IFF", false_value="NULL"),
 772            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 773            exp.JSONExtractScalar: lambda self, e: self.func(
 774                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 775            ),
 776            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 777            exp.JSONPathRoot: lambda *_: "",
 778            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 779            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 780            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 781            exp.Max: max_or_greatest,
 782            exp.Min: min_or_least,
 783            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 784            exp.PercentileCont: transforms.preprocess(
 785                [transforms.add_within_group_for_percentiles]
 786            ),
 787            exp.PercentileDisc: transforms.preprocess(
 788                [transforms.add_within_group_for_percentiles]
 789            ),
 790            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 791            exp.RegexpILike: _regexpilike_sql,
 792            exp.Rand: rename_func("RANDOM"),
 793            exp.Select: transforms.preprocess(
 794                [
 795                    transforms.eliminate_distinct_on,
 796                    transforms.explode_to_unnest(),
 797                    transforms.eliminate_semi_and_anti_joins,
 798                ]
 799            ),
 800            exp.SHA: rename_func("SHA1"),
 801            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 802            exp.StartsWith: rename_func("STARTSWITH"),
 803            exp.StrPosition: lambda self, e: self.func(
 804                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 805            ),
 806            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 807            exp.Stuff: rename_func("INSERT"),
 808            exp.TimeAdd: date_delta_sql("TIMEADD"),
 809            exp.TimestampDiff: lambda self, e: self.func(
 810                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 811            ),
 812            exp.TimestampTrunc: timestamptrunc_sql,
 813            exp.TimeStrToTime: timestrtotime_sql,
 814            exp.TimeToStr: lambda self, e: self.func(
 815                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 816            ),
 817            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 818            exp.ToArray: rename_func("TO_ARRAY"),
 819            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 820            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 821            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 822            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 823            exp.TsOrDsToDate: lambda self, e: self.func(
 824                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 825            ),
 826            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 827            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 828            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 829            exp.Xor: rename_func("BOOLXOR"),
 830        }
 831
 832        SUPPORTED_JSON_PATH_PARTS = {
 833            exp.JSONPathKey,
 834            exp.JSONPathRoot,
 835            exp.JSONPathSubscript,
 836        }
 837
 838        TYPE_MAPPING = {
 839            **generator.Generator.TYPE_MAPPING,
 840            exp.DataType.Type.NESTED: "OBJECT",
 841            exp.DataType.Type.STRUCT: "OBJECT",
 842            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 843        }
 844
 845        STAR_MAPPING = {
 846            "except": "EXCLUDE",
 847            "replace": "RENAME",
 848        }
 849
 850        PROPERTIES_LOCATION = {
 851            **generator.Generator.PROPERTIES_LOCATION,
 852            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 853            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 854        }
 855
 856        UNSUPPORTED_VALUES_EXPRESSIONS = {
 857            exp.Struct,
 858        }
 859
 860        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 861            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 862                values_as_table = False
 863
 864            return super().values_sql(expression, values_as_table=values_as_table)
 865
 866        def datatype_sql(self, expression: exp.DataType) -> str:
 867            expressions = expression.expressions
 868            if (
 869                expressions
 870                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 871                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 872            ):
 873                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 874                return "OBJECT"
 875
 876            return super().datatype_sql(expression)
 877
 878        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 879            return self.func(
 880                "TO_NUMBER",
 881                expression.this,
 882                expression.args.get("format"),
 883                expression.args.get("precision"),
 884                expression.args.get("scale"),
 885            )
 886
 887        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 888            milli = expression.args.get("milli")
 889            if milli is not None:
 890                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 891                expression.set("nano", milli_to_nano)
 892
 893            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 894
 895        def trycast_sql(self, expression: exp.TryCast) -> str:
 896            value = expression.this
 897
 898            if value.type is None:
 899                from sqlglot.optimizer.annotate_types import annotate_types
 900
 901                value = annotate_types(value)
 902
 903            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 904                return super().trycast_sql(expression)
 905
 906            # TRY_CAST only works for string values in Snowflake
 907            return self.cast_sql(expression)
 908
 909        def log_sql(self, expression: exp.Log) -> str:
 910            if not expression.expression:
 911                return self.func("LN", expression.this)
 912
 913            return super().log_sql(expression)
 914
 915        def unnest_sql(self, expression: exp.Unnest) -> str:
 916            unnest_alias = expression.args.get("alias")
 917            offset = expression.args.get("offset")
 918
 919            columns = [
 920                exp.to_identifier("seq"),
 921                exp.to_identifier("key"),
 922                exp.to_identifier("path"),
 923                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 924                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 925                or exp.to_identifier("value"),
 926                exp.to_identifier("this"),
 927            ]
 928
 929            if unnest_alias:
 930                unnest_alias.set("columns", columns)
 931            else:
 932                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 933
 934            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 935            alias = self.sql(unnest_alias)
 936            alias = f" AS {alias}" if alias else ""
 937            return f"{explode}{alias}"
 938
 939        def show_sql(self, expression: exp.Show) -> str:
 940            terse = "TERSE " if expression.args.get("terse") else ""
 941            history = " HISTORY" if expression.args.get("history") else ""
 942            like = self.sql(expression, "like")
 943            like = f" LIKE {like}" if like else ""
 944
 945            scope = self.sql(expression, "scope")
 946            scope = f" {scope}" if scope else ""
 947
 948            scope_kind = self.sql(expression, "scope_kind")
 949            if scope_kind:
 950                scope_kind = f" IN {scope_kind}"
 951
 952            starts_with = self.sql(expression, "starts_with")
 953            if starts_with:
 954                starts_with = f" STARTS WITH {starts_with}"
 955
 956            limit = self.sql(expression, "limit")
 957
 958            from_ = self.sql(expression, "from")
 959            if from_:
 960                from_ = f" FROM {from_}"
 961
 962            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 963
 964        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 965            # Other dialects don't support all of the following parameters, so we need to
 966            # generate default values as necessary to ensure the transpilation is correct
 967            group = expression.args.get("group")
 968            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 969            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 970            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 971
 972            return self.func(
 973                "REGEXP_SUBSTR",
 974                expression.this,
 975                expression.expression,
 976                position,
 977                occurrence,
 978                parameters,
 979                group,
 980            )
 981
 982        def except_op(self, expression: exp.Except) -> str:
 983            if not expression.args.get("distinct"):
 984                self.unsupported("EXCEPT with All is not supported in Snowflake")
 985            return super().except_op(expression)
 986
 987        def intersect_op(self, expression: exp.Intersect) -> str:
 988            if not expression.args.get("distinct"):
 989                self.unsupported("INTERSECT with All is not supported in Snowflake")
 990            return super().intersect_op(expression)
 991
 992        def describe_sql(self, expression: exp.Describe) -> str:
 993            # Default to table if kind is unknown
 994            kind_value = expression.args.get("kind") or "TABLE"
 995            kind = f" {kind_value}" if kind_value else ""
 996            this = f" {self.sql(expression, 'this')}"
 997            expressions = self.expressions(expression, flat=True)
 998            expressions = f" {expressions}" if expressions else ""
 999            return f"DESCRIBE{kind}{this}{expressions}"
1000
1001        def generatedasidentitycolumnconstraint_sql(
1002            self, expression: exp.GeneratedAsIdentityColumnConstraint
1003        ) -> str:
1004            start = expression.args.get("start")
1005            start = f" START {start}" if start else ""
1006            increment = expression.args.get("increment")
1007            increment = f" INCREMENT {increment}" if increment else ""
1008            return f"AUTOINCREMENT{start}{increment}"
1009
1010        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1011            this = self.sql(expression, "this")
1012            return f"SWAP WITH {this}"
1013
1014        def with_properties(self, properties: exp.Properties) -> str:
1015            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
1016
1017        def cluster_sql(self, expression: exp.Cluster) -> str:
1018            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1019
1020        def struct_sql(self, expression: exp.Struct) -> str:
1021            keys = []
1022            values = []
1023
1024            for i, e in enumerate(expression.expressions):
1025                if isinstance(e, exp.PropertyEQ):
1026                    keys.append(
1027                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1028                    )
1029                    values.append(e.expression)
1030                else:
1031                    keys.append(exp.Literal.string(f"_{i}"))
1032                    values.append(e)
1033
1034            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
324    def quote_identifier(self, expression: E, identify: bool = True) -> E:
325        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
326        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
327        if (
328            isinstance(expression, exp.Identifier)
329            and isinstance(expression.parent, exp.Table)
330            and expression.name.lower() == "dual"
331        ):
332            return expression  # type: ignore
333
334        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
336    class Parser(parser.Parser):
337        IDENTIFY_PIVOT_STRINGS = True
338
339        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
340
341        FUNCTIONS = {
342            **parser.Parser.FUNCTIONS,
343            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
344            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
345            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
346                this=seq_get(args, 1), expression=seq_get(args, 0)
347            ),
348            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
349                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
350                start=seq_get(args, 0),
351                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
352                step=seq_get(args, 2),
353            ),
354            "BITXOR": binary_from_function(exp.BitwiseXor),
355            "BIT_XOR": binary_from_function(exp.BitwiseXor),
356            "BOOLXOR": binary_from_function(exp.Xor),
357            "CONVERT_TIMEZONE": _build_convert_timezone,
358            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
359            "DATE_TRUNC": _date_trunc_to_time,
360            "DATEADD": _build_date_time_add(exp.DateAdd),
361            "DATEDIFF": _build_datediff,
362            "DIV0": _build_if_from_div0,
363            "FLATTEN": exp.Explode.from_arg_list,
364            "GET_PATH": lambda args, dialect: exp.JSONExtract(
365                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
366            ),
367            "IFF": exp.If.from_arg_list,
368            "LAST_DAY": lambda args: exp.LastDay(
369                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
370            ),
371            "LISTAGG": exp.GroupConcat.from_arg_list,
372            "MEDIAN": lambda args: exp.PercentileCont(
373                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
374            ),
375            "NULLIFZERO": _build_if_from_nullifzero,
376            "OBJECT_CONSTRUCT": _build_object_construct,
377            "REGEXP_REPLACE": _build_regexp_replace,
378            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
379            "RLIKE": exp.RegexpLike.from_arg_list,
380            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
381            "TIMEADD": _build_date_time_add(exp.TimeAdd),
382            "TIMEDIFF": _build_datediff,
383            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
384            "TIMESTAMPDIFF": _build_datediff,
385            "TIMESTAMPFROMPARTS": _build_timestamp_from_parts,
386            "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts,
387            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
388            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
389            "TO_NUMBER": lambda args: exp.ToNumber(
390                this=seq_get(args, 0),
391                format=seq_get(args, 1),
392                precision=seq_get(args, 2),
393                scale=seq_get(args, 3),
394            ),
395            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
396            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
397            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
398            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
399            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
400            "TO_VARCHAR": exp.ToChar.from_arg_list,
401            "ZEROIFNULL": _build_if_from_zeroifnull,
402        }
403
404        FUNCTION_PARSERS = {
405            **parser.Parser.FUNCTION_PARSERS,
406            "DATE_PART": lambda self: self._parse_date_part(),
407            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
408        }
409        FUNCTION_PARSERS.pop("TRIM")
410
411        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
412
413        RANGE_PARSERS = {
414            **parser.Parser.RANGE_PARSERS,
415            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
416            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
417        }
418
419        ALTER_PARSERS = {
420            **parser.Parser.ALTER_PARSERS,
421            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
422            "UNSET": lambda self: self.expression(
423                exp.Set,
424                tag=self._match_text_seq("TAG"),
425                expressions=self._parse_csv(self._parse_id_var),
426                unset=True,
427            ),
428            "SWAP": lambda self: self._parse_alter_table_swap(),
429        }
430
431        STATEMENT_PARSERS = {
432            **parser.Parser.STATEMENT_PARSERS,
433            TokenType.SHOW: lambda self: self._parse_show(),
434        }
435
436        PROPERTY_PARSERS = {
437            **parser.Parser.PROPERTY_PARSERS,
438            "LOCATION": lambda self: self._parse_location(),
439        }
440
441        SHOW_PARSERS = {
442            "SCHEMAS": _show_parser("SCHEMAS"),
443            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
444            "OBJECTS": _show_parser("OBJECTS"),
445            "TERSE OBJECTS": _show_parser("OBJECTS"),
446            "TABLES": _show_parser("TABLES"),
447            "TERSE TABLES": _show_parser("TABLES"),
448            "VIEWS": _show_parser("VIEWS"),
449            "TERSE VIEWS": _show_parser("VIEWS"),
450            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
451            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
452            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
453            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
454            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
455            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
456            "SEQUENCES": _show_parser("SEQUENCES"),
457            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
458            "COLUMNS": _show_parser("COLUMNS"),
459            "USERS": _show_parser("USERS"),
460            "TERSE USERS": _show_parser("USERS"),
461        }
462
463        STAGED_FILE_SINGLE_TOKENS = {
464            TokenType.DOT,
465            TokenType.MOD,
466            TokenType.SLASH,
467        }
468
469        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
470
471        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
472
473        def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
474            this = super()._parse_column_ops(this)
475
476            casts = []
477            json_path = []
478
479            while self._match(TokenType.COLON):
480                path = super()._parse_column_ops(self._parse_field(any_token=True))
481
482                # The cast :: operator has a lower precedence than the extraction operator :, so
483                # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
484                while isinstance(path, exp.Cast):
485                    casts.append(path.to)
486                    path = path.this
487
488                if path:
489                    json_path.append(path.sql(dialect="snowflake", copy=False))
490
491            if json_path:
492                this = self.expression(
493                    exp.JSONExtract,
494                    this=this,
495                    expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))),
496                )
497
498                while casts:
499                    this = self.expression(exp.Cast, this=this, to=casts.pop())
500
501            return this
502
503        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
504        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
505        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
506            this = self._parse_var() or self._parse_type()
507
508            if not this:
509                return None
510
511            self._match(TokenType.COMMA)
512            expression = self._parse_bitwise()
513            this = _map_date_part(this)
514            name = this.name.upper()
515
516            if name.startswith("EPOCH"):
517                if name == "EPOCH_MILLISECOND":
518                    scale = 10**3
519                elif name == "EPOCH_MICROSECOND":
520                    scale = 10**6
521                elif name == "EPOCH_NANOSECOND":
522                    scale = 10**9
523                else:
524                    scale = None
525
526                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
527                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
528
529                if scale:
530                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
531
532                return to_unix
533
534            return self.expression(exp.Extract, this=this, expression=expression)
535
536        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
537            if is_map:
538                # Keys are strings in Snowflake's objects, see also:
539                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
540                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
541                return self._parse_slice(self._parse_string())
542
543            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
544
545        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
546            lateral = super()._parse_lateral()
547            if not lateral:
548                return lateral
549
550            if isinstance(lateral.this, exp.Explode):
551                table_alias = lateral.args.get("alias")
552                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
553                if table_alias and not table_alias.args.get("columns"):
554                    table_alias.set("columns", columns)
555                elif not table_alias:
556                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
557
558            return lateral
559
560        def _parse_at_before(self, table: exp.Table) -> exp.Table:
561            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
562            index = self._index
563            if self._match_texts(("AT", "BEFORE")):
564                this = self._prev.text.upper()
565                kind = (
566                    self._match(TokenType.L_PAREN)
567                    and self._match_texts(self.HISTORICAL_DATA_KIND)
568                    and self._prev.text.upper()
569                )
570                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
571
572                if expression:
573                    self._match_r_paren()
574                    when = self.expression(
575                        exp.HistoricalData, this=this, kind=kind, expression=expression
576                    )
577                    table.set("when", when)
578                else:
579                    self._retreat(index)
580
581            return table
582
583        def _parse_table_parts(
584            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
585        ) -> exp.Table:
586            # https://docs.snowflake.com/en/user-guide/querying-stage
587            if self._match(TokenType.STRING, advance=False):
588                table = self._parse_string()
589            elif self._match_text_seq("@", advance=False):
590                table = self._parse_location_path()
591            else:
592                table = None
593
594            if table:
595                file_format = None
596                pattern = None
597
598                self._match(TokenType.L_PAREN)
599                while self._curr and not self._match(TokenType.R_PAREN):
600                    if self._match_text_seq("FILE_FORMAT", "=>"):
601                        file_format = self._parse_string() or super()._parse_table_parts(
602                            is_db_reference=is_db_reference
603                        )
604                    elif self._match_text_seq("PATTERN", "=>"):
605                        pattern = self._parse_string()
606                    else:
607                        break
608
609                    self._match(TokenType.COMMA)
610
611                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
612            else:
613                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
614
615            return self._parse_at_before(table)
616
617        def _parse_id_var(
618            self,
619            any_token: bool = True,
620            tokens: t.Optional[t.Collection[TokenType]] = None,
621        ) -> t.Optional[exp.Expression]:
622            if self._match_text_seq("IDENTIFIER", "("):
623                identifier = (
624                    super()._parse_id_var(any_token=any_token, tokens=tokens)
625                    or self._parse_string()
626                )
627                self._match_r_paren()
628                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
629
630            return super()._parse_id_var(any_token=any_token, tokens=tokens)
631
632        def _parse_show_snowflake(self, this: str) -> exp.Show:
633            scope = None
634            scope_kind = None
635
636            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
637            # which is syntactically valid but has no effect on the output
638            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
639
640            history = self._match_text_seq("HISTORY")
641
642            like = self._parse_string() if self._match(TokenType.LIKE) else None
643
644            if self._match(TokenType.IN):
645                if self._match_text_seq("ACCOUNT"):
646                    scope_kind = "ACCOUNT"
647                elif self._match_set(self.DB_CREATABLES):
648                    scope_kind = self._prev.text.upper()
649                    if self._curr:
650                        scope = self._parse_table_parts()
651                elif self._curr:
652                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
653                    scope = self._parse_table_parts()
654
655            return self.expression(
656                exp.Show,
657                **{
658                    "terse": terse,
659                    "this": this,
660                    "history": history,
661                    "like": like,
662                    "scope": scope,
663                    "scope_kind": scope_kind,
664                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
665                    "limit": self._parse_limit(),
666                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
667                },
668            )
669
670        def _parse_alter_table_swap(self) -> exp.SwapTable:
671            self._match_text_seq("WITH")
672            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
673
674        def _parse_location(self) -> exp.LocationProperty:
675            self._match(TokenType.EQ)
676            return self.expression(exp.LocationProperty, this=self._parse_location_path())
677
678        def _parse_location_path(self) -> exp.Var:
679            parts = [self._advance_any(ignore_reserved=True)]
680
681            # We avoid consuming a comma token because external tables like @foo and @bar
682            # can be joined in a query with a comma separator.
683            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
684                parts.append(self._advance_any(ignore_reserved=True))
685
686            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
TABLE_ALIAS_TOKENS = {<TokenType.DECIMAL: 'DECIMAL'>, <TokenType.ASC: 'ASC'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.INDEX: 'INDEX'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.UUID: 'UUID'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.ANY: 'ANY'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.UINT128: 'UINT128'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.SEMI: 'SEMI'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.FALSE: 'FALSE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.VAR: 'VAR'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.FINAL: 'FINAL'>, <TokenType.INET: 'INET'>, <TokenType.ANTI: 'ANTI'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.INT: 'INT'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.KILL: 'KILL'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.SET: 'SET'>, <TokenType.MAP: 'MAP'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.USE: 'USE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.INT128: 'INT128'>, <TokenType.DATE: 'DATE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.BINARY: 'BINARY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.VIEW: 'VIEW'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.IPV6: 'IPV6'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.UINT: 'UINT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.CASE: 'CASE'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.END: 'END'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MODEL: 'MODEL'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.BIT: 'BIT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.SUPER: 'SUPER'>, <TokenType.YEAR: 'YEAR'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.IS: 'IS'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.MONEY: 'MONEY'>, <TokenType.TIME: 'TIME'>, <TokenType.ROWS: 'ROWS'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.TABLE: 'TABLE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.IPV4: 'IPV4'>, <TokenType.NULL: 'NULL'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.NAME: 'NAME'>, <TokenType.XML: 'XML'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.JSON: 'JSON'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INT256: 'INT256'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.DIV: 'DIV'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.RANGE: 'RANGE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.TOP: 'TOP'>, <TokenType.TEXT: 'TEXT'>, <TokenType.DESC: 'DESC'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CACHE: 'CACHE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.DATE32: 'DATE32'>, <TokenType.ALL: 'ALL'>, <TokenType.SOME: 'SOME'>, <TokenType.ROW: 'ROW'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.NEXT: 'NEXT'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_DATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateDateArray'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'MOD': <function Parser.<lambda>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _build_convert_timezone>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'OBJECTS', 'TABLES', 'IMPORTED KEYS', 'SEQUENCES', 'VIEWS', 'UNIQUE KEYS'}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ID_VAR_TOKENS
INTERVAL_VARS
ALIAS_TOKENS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
CONSTRAINT_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_UNION
UNION_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
688    class Tokenizer(tokens.Tokenizer):
689        STRING_ESCAPES = ["\\", "'"]
690        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
691        RAW_STRINGS = ["$$"]
692        COMMENTS = ["--", "//", ("/*", "*/")]
693
694        KEYWORDS = {
695            **tokens.Tokenizer.KEYWORDS,
696            "BYTEINT": TokenType.INT,
697            "CHAR VARYING": TokenType.VARCHAR,
698            "CHARACTER VARYING": TokenType.VARCHAR,
699            "EXCLUDE": TokenType.EXCEPT,
700            "ILIKE ANY": TokenType.ILIKE_ANY,
701            "LIKE ANY": TokenType.LIKE_ANY,
702            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
703            "MINUS": TokenType.EXCEPT,
704            "NCHAR VARYING": TokenType.VARCHAR,
705            "PUT": TokenType.COMMAND,
706            "REMOVE": TokenType.COMMAND,
707            "RENAME": TokenType.REPLACE,
708            "RM": TokenType.COMMAND,
709            "SAMPLE": TokenType.TABLE_SAMPLE,
710            "SQL_DOUBLE": TokenType.DOUBLE,
711            "SQL_VARCHAR": TokenType.VARCHAR,
712            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
713            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
714            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
715            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
716            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
717            "TOP": TokenType.TOP,
718        }
719
720        SINGLE_TOKENS = {
721            **tokens.Tokenizer.SINGLE_TOKENS,
722            "$": TokenType.PARAMETER,
723        }
724
725        VAR_SINGLE_TOKENS = {"$"}
726
727        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 729    class Generator(generator.Generator):
 730        PARAMETER_TOKEN = "$"
 731        MATCHED_BY_SOURCE = False
 732        SINGLE_STRING_INTERVAL = True
 733        JOIN_HINTS = False
 734        TABLE_HINTS = False
 735        QUERY_HINTS = False
 736        AGGREGATE_FILTER_SUPPORTED = False
 737        SUPPORTS_TABLE_COPY = False
 738        COLLATE_IS_FUNC = True
 739        LIMIT_ONLY_LITERALS = True
 740        JSON_KEY_VALUE_PAIR_SEP = ","
 741        INSERT_OVERWRITE = " OVERWRITE INTO"
 742        STRUCT_DELIMITER = ("(", ")")
 743
 744        TRANSFORMS = {
 745            **generator.Generator.TRANSFORMS,
 746            exp.ArgMax: rename_func("MAX_BY"),
 747            exp.ArgMin: rename_func("MIN_BY"),
 748            exp.Array: inline_array_sql,
 749            exp.ArrayConcat: rename_func("ARRAY_CAT"),
 750            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 751            exp.AtTimeZone: lambda self, e: self.func(
 752                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 753            ),
 754            exp.BitwiseXor: rename_func("BITXOR"),
 755            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 756            exp.DateAdd: date_delta_sql("DATEADD"),
 757            exp.DateDiff: date_delta_sql("DATEDIFF"),
 758            exp.DateStrToDate: datestrtodate_sql,
 759            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 760            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 761            exp.DayOfYear: rename_func("DAYOFYEAR"),
 762            exp.Explode: rename_func("FLATTEN"),
 763            exp.Extract: rename_func("DATE_PART"),
 764            exp.FromTimeZone: lambda self, e: self.func(
 765                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 766            ),
 767            exp.GenerateSeries: lambda self, e: self.func(
 768                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 769            ),
 770            exp.GroupConcat: rename_func("LISTAGG"),
 771            exp.If: if_sql(name="IFF", false_value="NULL"),
 772            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 773            exp.JSONExtractScalar: lambda self, e: self.func(
 774                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 775            ),
 776            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 777            exp.JSONPathRoot: lambda *_: "",
 778            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 779            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 780            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 781            exp.Max: max_or_greatest,
 782            exp.Min: min_or_least,
 783            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 784            exp.PercentileCont: transforms.preprocess(
 785                [transforms.add_within_group_for_percentiles]
 786            ),
 787            exp.PercentileDisc: transforms.preprocess(
 788                [transforms.add_within_group_for_percentiles]
 789            ),
 790            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 791            exp.RegexpILike: _regexpilike_sql,
 792            exp.Rand: rename_func("RANDOM"),
 793            exp.Select: transforms.preprocess(
 794                [
 795                    transforms.eliminate_distinct_on,
 796                    transforms.explode_to_unnest(),
 797                    transforms.eliminate_semi_and_anti_joins,
 798                ]
 799            ),
 800            exp.SHA: rename_func("SHA1"),
 801            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 802            exp.StartsWith: rename_func("STARTSWITH"),
 803            exp.StrPosition: lambda self, e: self.func(
 804                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 805            ),
 806            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 807            exp.Stuff: rename_func("INSERT"),
 808            exp.TimeAdd: date_delta_sql("TIMEADD"),
 809            exp.TimestampDiff: lambda self, e: self.func(
 810                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 811            ),
 812            exp.TimestampTrunc: timestamptrunc_sql,
 813            exp.TimeStrToTime: timestrtotime_sql,
 814            exp.TimeToStr: lambda self, e: self.func(
 815                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
 816            ),
 817            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 818            exp.ToArray: rename_func("TO_ARRAY"),
 819            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 820            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 821            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 822            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 823            exp.TsOrDsToDate: lambda self, e: self.func(
 824                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 825            ),
 826            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 827            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 828            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 829            exp.Xor: rename_func("BOOLXOR"),
 830        }
 831
 832        SUPPORTED_JSON_PATH_PARTS = {
 833            exp.JSONPathKey,
 834            exp.JSONPathRoot,
 835            exp.JSONPathSubscript,
 836        }
 837
 838        TYPE_MAPPING = {
 839            **generator.Generator.TYPE_MAPPING,
 840            exp.DataType.Type.NESTED: "OBJECT",
 841            exp.DataType.Type.STRUCT: "OBJECT",
 842            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
 843        }
 844
 845        STAR_MAPPING = {
 846            "except": "EXCLUDE",
 847            "replace": "RENAME",
 848        }
 849
 850        PROPERTIES_LOCATION = {
 851            **generator.Generator.PROPERTIES_LOCATION,
 852            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 853            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 854        }
 855
 856        UNSUPPORTED_VALUES_EXPRESSIONS = {
 857            exp.Struct,
 858        }
 859
 860        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 861            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 862                values_as_table = False
 863
 864            return super().values_sql(expression, values_as_table=values_as_table)
 865
 866        def datatype_sql(self, expression: exp.DataType) -> str:
 867            expressions = expression.expressions
 868            if (
 869                expressions
 870                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 871                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 872            ):
 873                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 874                return "OBJECT"
 875
 876            return super().datatype_sql(expression)
 877
 878        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 879            return self.func(
 880                "TO_NUMBER",
 881                expression.this,
 882                expression.args.get("format"),
 883                expression.args.get("precision"),
 884                expression.args.get("scale"),
 885            )
 886
 887        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 888            milli = expression.args.get("milli")
 889            if milli is not None:
 890                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 891                expression.set("nano", milli_to_nano)
 892
 893            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 894
 895        def trycast_sql(self, expression: exp.TryCast) -> str:
 896            value = expression.this
 897
 898            if value.type is None:
 899                from sqlglot.optimizer.annotate_types import annotate_types
 900
 901                value = annotate_types(value)
 902
 903            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 904                return super().trycast_sql(expression)
 905
 906            # TRY_CAST only works for string values in Snowflake
 907            return self.cast_sql(expression)
 908
 909        def log_sql(self, expression: exp.Log) -> str:
 910            if not expression.expression:
 911                return self.func("LN", expression.this)
 912
 913            return super().log_sql(expression)
 914
 915        def unnest_sql(self, expression: exp.Unnest) -> str:
 916            unnest_alias = expression.args.get("alias")
 917            offset = expression.args.get("offset")
 918
 919            columns = [
 920                exp.to_identifier("seq"),
 921                exp.to_identifier("key"),
 922                exp.to_identifier("path"),
 923                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 924                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 925                or exp.to_identifier("value"),
 926                exp.to_identifier("this"),
 927            ]
 928
 929            if unnest_alias:
 930                unnest_alias.set("columns", columns)
 931            else:
 932                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 933
 934            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 935            alias = self.sql(unnest_alias)
 936            alias = f" AS {alias}" if alias else ""
 937            return f"{explode}{alias}"
 938
 939        def show_sql(self, expression: exp.Show) -> str:
 940            terse = "TERSE " if expression.args.get("terse") else ""
 941            history = " HISTORY" if expression.args.get("history") else ""
 942            like = self.sql(expression, "like")
 943            like = f" LIKE {like}" if like else ""
 944
 945            scope = self.sql(expression, "scope")
 946            scope = f" {scope}" if scope else ""
 947
 948            scope_kind = self.sql(expression, "scope_kind")
 949            if scope_kind:
 950                scope_kind = f" IN {scope_kind}"
 951
 952            starts_with = self.sql(expression, "starts_with")
 953            if starts_with:
 954                starts_with = f" STARTS WITH {starts_with}"
 955
 956            limit = self.sql(expression, "limit")
 957
 958            from_ = self.sql(expression, "from")
 959            if from_:
 960                from_ = f" FROM {from_}"
 961
 962            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 963
 964        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 965            # Other dialects don't support all of the following parameters, so we need to
 966            # generate default values as necessary to ensure the transpilation is correct
 967            group = expression.args.get("group")
 968            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 969            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 970            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 971
 972            return self.func(
 973                "REGEXP_SUBSTR",
 974                expression.this,
 975                expression.expression,
 976                position,
 977                occurrence,
 978                parameters,
 979                group,
 980            )
 981
 982        def except_op(self, expression: exp.Except) -> str:
 983            if not expression.args.get("distinct"):
 984                self.unsupported("EXCEPT with All is not supported in Snowflake")
 985            return super().except_op(expression)
 986
 987        def intersect_op(self, expression: exp.Intersect) -> str:
 988            if not expression.args.get("distinct"):
 989                self.unsupported("INTERSECT with All is not supported in Snowflake")
 990            return super().intersect_op(expression)
 991
 992        def describe_sql(self, expression: exp.Describe) -> str:
 993            # Default to table if kind is unknown
 994            kind_value = expression.args.get("kind") or "TABLE"
 995            kind = f" {kind_value}" if kind_value else ""
 996            this = f" {self.sql(expression, 'this')}"
 997            expressions = self.expressions(expression, flat=True)
 998            expressions = f" {expressions}" if expressions else ""
 999            return f"DESCRIBE{kind}{this}{expressions}"
1000
1001        def generatedasidentitycolumnconstraint_sql(
1002            self, expression: exp.GeneratedAsIdentityColumnConstraint
1003        ) -> str:
1004            start = expression.args.get("start")
1005            start = f" START {start}" if start else ""
1006            increment = expression.args.get("increment")
1007            increment = f" INCREMENT {increment}" if increment else ""
1008            return f"AUTOINCREMENT{start}{increment}"
1009
1010        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1011            this = self.sql(expression, "this")
1012            return f"SWAP WITH {this}"
1013
1014        def with_properties(self, properties: exp.Properties) -> str:
1015            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
1016
1017        def cluster_sql(self, expression: exp.Cluster) -> str:
1018            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1019
1020        def struct_sql(self, expression: exp.Struct) -> str:
1021            keys = []
1022            values = []
1023
1024            for i, e in enumerate(expression.expressions):
1025                if isinstance(e, exp.PropertyEQ):
1026                    keys.append(
1027                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1028                    )
1029                    values.append(e.expression)
1030                else:
1031                    keys.append(exp.Literal.string(f"_{i}"))
1032                    values.append(e)
1033
1034            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Timestamp'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Struct'>}
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
860        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
861            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
862                values_as_table = False
863
864            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
866        def datatype_sql(self, expression: exp.DataType) -> str:
867            expressions = expression.expressions
868            if (
869                expressions
870                and expression.is_type(*exp.DataType.STRUCT_TYPES)
871                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
872            ):
873                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
874                return "OBJECT"
875
876            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
878        def tonumber_sql(self, expression: exp.ToNumber) -> str:
879            return self.func(
880                "TO_NUMBER",
881                expression.this,
882                expression.args.get("format"),
883                expression.args.get("precision"),
884                expression.args.get("scale"),
885            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
887        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
888            milli = expression.args.get("milli")
889            if milli is not None:
890                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
891                expression.set("nano", milli_to_nano)
892
893            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
895        def trycast_sql(self, expression: exp.TryCast) -> str:
896            value = expression.this
897
898            if value.type is None:
899                from sqlglot.optimizer.annotate_types import annotate_types
900
901                value = annotate_types(value)
902
903            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
904                return super().trycast_sql(expression)
905
906            # TRY_CAST only works for string values in Snowflake
907            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
909        def log_sql(self, expression: exp.Log) -> str:
910            if not expression.expression:
911                return self.func("LN", expression.this)
912
913            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
915        def unnest_sql(self, expression: exp.Unnest) -> str:
916            unnest_alias = expression.args.get("alias")
917            offset = expression.args.get("offset")
918
919            columns = [
920                exp.to_identifier("seq"),
921                exp.to_identifier("key"),
922                exp.to_identifier("path"),
923                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
924                seq_get(unnest_alias.columns if unnest_alias else [], 0)
925                or exp.to_identifier("value"),
926                exp.to_identifier("this"),
927            ]
928
929            if unnest_alias:
930                unnest_alias.set("columns", columns)
931            else:
932                unnest_alias = exp.TableAlias(this="_u", columns=columns)
933
934            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
935            alias = self.sql(unnest_alias)
936            alias = f" AS {alias}" if alias else ""
937            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
939        def show_sql(self, expression: exp.Show) -> str:
940            terse = "TERSE " if expression.args.get("terse") else ""
941            history = " HISTORY" if expression.args.get("history") else ""
942            like = self.sql(expression, "like")
943            like = f" LIKE {like}" if like else ""
944
945            scope = self.sql(expression, "scope")
946            scope = f" {scope}" if scope else ""
947
948            scope_kind = self.sql(expression, "scope_kind")
949            if scope_kind:
950                scope_kind = f" IN {scope_kind}"
951
952            starts_with = self.sql(expression, "starts_with")
953            if starts_with:
954                starts_with = f" STARTS WITH {starts_with}"
955
956            limit = self.sql(expression, "limit")
957
958            from_ = self.sql(expression, "from")
959            if from_:
960                from_ = f" FROM {from_}"
961
962            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
964        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
965            # Other dialects don't support all of the following parameters, so we need to
966            # generate default values as necessary to ensure the transpilation is correct
967            group = expression.args.get("group")
968            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
969            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
970            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
971
972            return self.func(
973                "REGEXP_SUBSTR",
974                expression.this,
975                expression.expression,
976                position,
977                occurrence,
978                parameters,
979                group,
980            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
982        def except_op(self, expression: exp.Except) -> str:
983            if not expression.args.get("distinct"):
984                self.unsupported("EXCEPT with All is not supported in Snowflake")
985            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
987        def intersect_op(self, expression: exp.Intersect) -> str:
988            if not expression.args.get("distinct"):
989                self.unsupported("INTERSECT with All is not supported in Snowflake")
990            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
992        def describe_sql(self, expression: exp.Describe) -> str:
993            # Default to table if kind is unknown
994            kind_value = expression.args.get("kind") or "TABLE"
995            kind = f" {kind_value}" if kind_value else ""
996            this = f" {self.sql(expression, 'this')}"
997            expressions = self.expressions(expression, flat=True)
998            expressions = f" {expressions}" if expressions else ""
999            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1001        def generatedasidentitycolumnconstraint_sql(
1002            self, expression: exp.GeneratedAsIdentityColumnConstraint
1003        ) -> str:
1004            start = expression.args.get("start")
1005            start = f" START {start}" if start else ""
1006            increment = expression.args.get("increment")
1007            increment = f" INCREMENT {increment}" if increment else ""
1008            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
1010        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1011            this = self.sql(expression, "this")
1012            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
1014        def with_properties(self, properties: exp.Properties) -> str:
1015            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1017        def cluster_sql(self, expression: exp.Cluster) -> str:
1018            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1020        def struct_sql(self, expression: exp.Struct) -> str:
1021            keys = []
1022            values = []
1023
1024            for i, e in enumerate(expression.expressions):
1025                if isinstance(e, exp.PropertyEQ):
1026                    keys.append(
1027                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1028                    )
1029                    values.append(e.expression)
1030                else:
1031                    keys.append(exp.Literal.string(f"_{i}"))
1032                    values.append(e)
1033
1034            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
SELECT_KINDS: Tuple[str, ...] = ()
AFTER_HAVING_MODIFIER_TRANSFORMS = {'qualify': <function Generator.<lambda>>, 'windows': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
set_operations
union_sql
union_op
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
currenttimestamp_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
arrayany_sql
generateseries_sql
partitionrange_sql
truncatetable_sql
convert_sql