Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25    no_safe_divide_sql,
  26    no_timestamp_sql,
  27    timestampdiff_sql,
  28    no_make_interval_sql,
  29)
  30from sqlglot.generator import unsupported_args
  31from sqlglot.helper import flatten, is_float, is_int, seq_get
  32from sqlglot.tokens import TokenType
  33
  34if t.TYPE_CHECKING:
  35    from sqlglot._typing import E
  36
  37
  38# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  39def _build_datetime(
  40    name: str, kind: exp.DataType.Type, safe: bool = False
  41) -> t.Callable[[t.List], exp.Func]:
  42    def _builder(args: t.List) -> exp.Func:
  43        value = seq_get(args, 0)
  44        int_value = value is not None and is_int(value.name)
  45
  46        if isinstance(value, exp.Literal):
  47            # Converts calls like `TO_TIME('01:02:03')` into casts
  48            if len(args) == 1 and value.is_string and not int_value:
  49                return (
  50                    exp.TryCast(this=value, to=exp.DataType.build(kind))
  51                    if safe
  52                    else exp.cast(value, kind)
  53                )
  54
  55            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  56            # cases so we can transpile them, since they're relatively common
  57            if kind == exp.DataType.Type.TIMESTAMP:
  58                if int_value and not safe:
  59                    # TRY_TO_TIMESTAMP('integer') is not parsed into exp.UnixToTime as
  60                    # it's not easily transpilable
  61                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  62                if not is_float(value.this):
  63                    expr = build_formatted_time(exp.StrToTime, "snowflake")(args)
  64                    expr.set("safe", safe)
  65                    return expr
  66
  67        if kind == exp.DataType.Type.DATE and not int_value:
  68            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  69            formatted_exp.set("safe", safe)
  70            return formatted_exp
  71
  72        return exp.Anonymous(this=name, expressions=args)
  73
  74    return _builder
  75
  76
  77def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  78    expression = parser.build_var_map(args)
  79
  80    if isinstance(expression, exp.StarMap):
  81        return expression
  82
  83    return exp.Struct(
  84        expressions=[
  85            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  86        ]
  87    )
  88
  89
  90def _build_datediff(args: t.List) -> exp.DateDiff:
  91    return exp.DateDiff(
  92        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  93    )
  94
  95
  96def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  97    def _builder(args: t.List) -> E:
  98        return expr_type(
  99            this=seq_get(args, 2),
 100            expression=seq_get(args, 1),
 101            unit=map_date_part(seq_get(args, 0)),
 102        )
 103
 104    return _builder
 105
 106
 107# https://docs.snowflake.com/en/sql-reference/functions/div0
 108def _build_if_from_div0(args: t.List) -> exp.If:
 109    lhs = exp._wrap(seq_get(args, 0), exp.Binary)
 110    rhs = exp._wrap(seq_get(args, 1), exp.Binary)
 111
 112    cond = exp.EQ(this=rhs, expression=exp.Literal.number(0)).and_(
 113        exp.Is(this=lhs, expression=exp.null()).not_()
 114    )
 115    true = exp.Literal.number(0)
 116    false = exp.Div(this=lhs, expression=rhs)
 117    return exp.If(this=cond, true=true, false=false)
 118
 119
 120# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 121def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 122    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 123    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 124
 125
 126# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 127def _build_if_from_nullifzero(args: t.List) -> exp.If:
 128    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 129    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 130
 131
 132def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 133    flag = expression.text("flag")
 134
 135    if "i" not in flag:
 136        flag += "i"
 137
 138    return self.func(
 139        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 140    )
 141
 142
 143def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 144    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 145
 146    if not regexp_replace.args.get("replacement"):
 147        regexp_replace.set("replacement", exp.Literal.string(""))
 148
 149    return regexp_replace
 150
 151
 152def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 153    def _parse(self: Snowflake.Parser) -> exp.Show:
 154        return self._parse_show_snowflake(*args, **kwargs)
 155
 156    return _parse
 157
 158
 159def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 160    trunc = date_trunc_to_time(args)
 161    trunc.set("unit", map_date_part(trunc.args["unit"]))
 162    return trunc
 163
 164
 165def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 166    """
 167    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 168    so we need to unqualify them.
 169
 170    Example:
 171        >>> from sqlglot import parse_one
 172        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 173        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 174        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 175    """
 176    if isinstance(expression, exp.Pivot) and expression.unpivot:
 177        expression = transforms.unqualify_columns(expression)
 178
 179    return expression
 180
 181
 182def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 183    assert isinstance(expression, exp.Create)
 184
 185    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 186        if expression.this in exp.DataType.NESTED_TYPES:
 187            expression.set("expressions", None)
 188        return expression
 189
 190    props = expression.args.get("properties")
 191    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 192        for schema_expression in expression.this.expressions:
 193            if isinstance(schema_expression, exp.ColumnDef):
 194                column_type = schema_expression.kind
 195                if isinstance(column_type, exp.DataType):
 196                    column_type.transform(_flatten_structured_type, copy=False)
 197
 198    return expression
 199
 200
 201def _unnest_generate_date_array(expression: exp.Expression) -> exp.Expression:
 202    if isinstance(expression, exp.Select):
 203        for unnest in expression.find_all(exp.Unnest):
 204            if (
 205                isinstance(unnest.parent, (exp.From, exp.Join))
 206                and len(unnest.expressions) == 1
 207                and isinstance(unnest.expressions[0], exp.GenerateDateArray)
 208            ):
 209                generate_date_array = unnest.expressions[0]
 210                start = generate_date_array.args.get("start")
 211                end = generate_date_array.args.get("end")
 212                step = generate_date_array.args.get("step")
 213
 214                if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 215                    continue
 216
 217                unit = step.args.get("unit")
 218
 219                unnest_alias = unnest.args.get("alias")
 220                if unnest_alias:
 221                    unnest_alias = unnest_alias.copy()
 222                    sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 223                else:
 224                    sequence_value_name = "value"
 225
 226                # We'll add the next sequence value to the starting date and project the result
 227                date_add = _build_date_time_add(exp.DateAdd)(
 228                    [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 229                ).as_(sequence_value_name)
 230
 231                # We use DATEDIFF to compute the number of sequence values needed
 232                number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 233                    [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 234                )
 235
 236                unnest.set("expressions", [number_sequence])
 237                unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 238
 239    return expression
 240
 241
 242def _build_regexp_extract(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
 243    def _builder(args: t.List) -> E:
 244        return expr_type(
 245            this=seq_get(args, 0),
 246            expression=seq_get(args, 1),
 247            position=seq_get(args, 2),
 248            occurrence=seq_get(args, 3),
 249            parameters=seq_get(args, 4),
 250            group=seq_get(args, 5) or exp.Literal.number(0),
 251        )
 252
 253    return _builder
 254
 255
 256def _regexpextract_sql(self, expression: exp.RegexpExtract | exp.RegexpExtractAll) -> str:
 257    # Other dialects don't support all of the following parameters, so we need to
 258    # generate default values as necessary to ensure the transpilation is correct
 259    group = expression.args.get("group")
 260
 261    # To avoid generating all these default values, we set group to None if
 262    # it's 0 (also default value) which doesn't trigger the following chain
 263    if group and group.name == "0":
 264        group = None
 265
 266    parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 267    occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 268    position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 269
 270    return self.func(
 271        "REGEXP_SUBSTR" if isinstance(expression, exp.RegexpExtract) else "REGEXP_EXTRACT_ALL",
 272        expression.this,
 273        expression.expression,
 274        position,
 275        occurrence,
 276        parameters,
 277        group,
 278    )
 279
 280
 281def _json_extract_value_array_sql(
 282    self: Snowflake.Generator, expression: exp.JSONValueArray | exp.JSONExtractArray
 283) -> str:
 284    json_extract = exp.JSONExtract(this=expression.this, expression=expression.expression)
 285    ident = exp.to_identifier("x")
 286
 287    if isinstance(expression, exp.JSONValueArray):
 288        this: exp.Expression = exp.cast(ident, to=exp.DataType.Type.VARCHAR)
 289    else:
 290        this = exp.ParseJSON(this=f"TO_JSON({ident})")
 291
 292    transform_lambda = exp.Lambda(expressions=[ident], this=this)
 293
 294    return self.func("TRANSFORM", json_extract, transform_lambda)
 295
 296
 297class Snowflake(Dialect):
 298    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 299    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 300    NULL_ORDERING = "nulls_are_large"
 301    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 302    SUPPORTS_USER_DEFINED_TYPES = False
 303    SUPPORTS_SEMI_ANTI_JOIN = False
 304    PREFER_CTE_ALIAS_COLUMN = True
 305    TABLESAMPLE_SIZE_IS_PERCENT = True
 306    COPY_PARAMS_ARE_CSV = False
 307    ARRAY_AGG_INCLUDES_NULLS = None
 308
 309    TIME_MAPPING = {
 310        "YYYY": "%Y",
 311        "yyyy": "%Y",
 312        "YY": "%y",
 313        "yy": "%y",
 314        "MMMM": "%B",
 315        "mmmm": "%B",
 316        "MON": "%b",
 317        "mon": "%b",
 318        "MM": "%m",
 319        "mm": "%m",
 320        "DD": "%d",
 321        "dd": "%-d",
 322        "DY": "%a",
 323        "dy": "%w",
 324        "HH24": "%H",
 325        "hh24": "%H",
 326        "HH12": "%I",
 327        "hh12": "%I",
 328        "MI": "%M",
 329        "mi": "%M",
 330        "SS": "%S",
 331        "ss": "%S",
 332        "FF": "%f",
 333        "ff": "%f",
 334        "FF6": "%f",
 335        "ff6": "%f",
 336    }
 337
 338    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 339        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 340        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 341        if (
 342            isinstance(expression, exp.Identifier)
 343            and isinstance(expression.parent, exp.Table)
 344            and expression.name.lower() == "dual"
 345        ):
 346            return expression  # type: ignore
 347
 348        return super().quote_identifier(expression, identify=identify)
 349
 350    class Parser(parser.Parser):
 351        IDENTIFY_PIVOT_STRINGS = True
 352        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 353        COLON_IS_VARIANT_EXTRACT = True
 354
 355        ID_VAR_TOKENS = {
 356            *parser.Parser.ID_VAR_TOKENS,
 357            TokenType.MATCH_CONDITION,
 358        }
 359
 360        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 361        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 362
 363        FUNCTIONS = {
 364            **parser.Parser.FUNCTIONS,
 365            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 366            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 367            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 368                this=seq_get(args, 1), expression=seq_get(args, 0)
 369            ),
 370            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 371                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 372                start=seq_get(args, 0),
 373                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 374                step=seq_get(args, 2),
 375            ),
 376            "BITXOR": binary_from_function(exp.BitwiseXor),
 377            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 378            "BOOLXOR": binary_from_function(exp.Xor),
 379            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 380            "DATE_TRUNC": _date_trunc_to_time,
 381            "DATEADD": _build_date_time_add(exp.DateAdd),
 382            "DATEDIFF": _build_datediff,
 383            "DIV0": _build_if_from_div0,
 384            "EDITDISTANCE": lambda args: exp.Levenshtein(
 385                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 386            ),
 387            "FLATTEN": exp.Explode.from_arg_list,
 388            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 389                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 390            ),
 391            "IFF": exp.If.from_arg_list,
 392            "LAST_DAY": lambda args: exp.LastDay(
 393                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 394            ),
 395            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 396            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 397            "LISTAGG": exp.GroupConcat.from_arg_list,
 398            "NULLIFZERO": _build_if_from_nullifzero,
 399            "OBJECT_CONSTRUCT": _build_object_construct,
 400            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 401            "REGEXP_REPLACE": _build_regexp_replace,
 402            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 403            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 404            "RLIKE": exp.RegexpLike.from_arg_list,
 405            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 406            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 407            "TIMEDIFF": _build_datediff,
 408            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 409            "TIMESTAMPDIFF": _build_datediff,
 410            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 411            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 412            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 413            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 414            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 415            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 416            "TRY_TO_TIMESTAMP": _build_datetime(
 417                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 418            ),
 419            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 420            "TO_NUMBER": lambda args: exp.ToNumber(
 421                this=seq_get(args, 0),
 422                format=seq_get(args, 1),
 423                precision=seq_get(args, 2),
 424                scale=seq_get(args, 3),
 425            ),
 426            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 427            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 428            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 429            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 430            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 431            "TO_VARCHAR": exp.ToChar.from_arg_list,
 432            "ZEROIFNULL": _build_if_from_zeroifnull,
 433        }
 434
 435        FUNCTION_PARSERS = {
 436            **parser.Parser.FUNCTION_PARSERS,
 437            "DATE_PART": lambda self: self._parse_date_part(),
 438            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 439        }
 440        FUNCTION_PARSERS.pop("TRIM")
 441
 442        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 443
 444        RANGE_PARSERS = {
 445            **parser.Parser.RANGE_PARSERS,
 446            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 447            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 448        }
 449
 450        ALTER_PARSERS = {
 451            **parser.Parser.ALTER_PARSERS,
 452            "UNSET": lambda self: self.expression(
 453                exp.Set,
 454                tag=self._match_text_seq("TAG"),
 455                expressions=self._parse_csv(self._parse_id_var),
 456                unset=True,
 457            ),
 458        }
 459
 460        STATEMENT_PARSERS = {
 461            **parser.Parser.STATEMENT_PARSERS,
 462            TokenType.SHOW: lambda self: self._parse_show(),
 463        }
 464
 465        PROPERTY_PARSERS = {
 466            **parser.Parser.PROPERTY_PARSERS,
 467            "LOCATION": lambda self: self._parse_location_property(),
 468        }
 469
 470        TYPE_CONVERTERS = {
 471            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 472            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 473        }
 474
 475        SHOW_PARSERS = {
 476            "SCHEMAS": _show_parser("SCHEMAS"),
 477            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 478            "OBJECTS": _show_parser("OBJECTS"),
 479            "TERSE OBJECTS": _show_parser("OBJECTS"),
 480            "TABLES": _show_parser("TABLES"),
 481            "TERSE TABLES": _show_parser("TABLES"),
 482            "VIEWS": _show_parser("VIEWS"),
 483            "TERSE VIEWS": _show_parser("VIEWS"),
 484            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 485            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 486            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 487            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 488            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 489            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 490            "SEQUENCES": _show_parser("SEQUENCES"),
 491            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 492            "COLUMNS": _show_parser("COLUMNS"),
 493            "USERS": _show_parser("USERS"),
 494            "TERSE USERS": _show_parser("USERS"),
 495        }
 496
 497        CONSTRAINT_PARSERS = {
 498            **parser.Parser.CONSTRAINT_PARSERS,
 499            "WITH": lambda self: self._parse_with_constraint(),
 500            "MASKING": lambda self: self._parse_with_constraint(),
 501            "PROJECTION": lambda self: self._parse_with_constraint(),
 502            "TAG": lambda self: self._parse_with_constraint(),
 503        }
 504
 505        STAGED_FILE_SINGLE_TOKENS = {
 506            TokenType.DOT,
 507            TokenType.MOD,
 508            TokenType.SLASH,
 509        }
 510
 511        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 512
 513        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 514
 515        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 516
 517        LAMBDAS = {
 518            **parser.Parser.LAMBDAS,
 519            TokenType.ARROW: lambda self, expressions: self.expression(
 520                exp.Lambda,
 521                this=self._replace_lambda(
 522                    self._parse_assignment(),
 523                    expressions,
 524                ),
 525                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 526            ),
 527        }
 528
 529        def _negate_range(
 530            self, this: t.Optional[exp.Expression] = None
 531        ) -> t.Optional[exp.Expression]:
 532            if not this:
 533                return this
 534
 535            query = this.args.get("query")
 536            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 537                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 538                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 539                # which can produce different results (most likely a SnowFlake bug).
 540                #
 541                # https://docs.snowflake.com/en/sql-reference/functions/in
 542                # Context: https://github.com/tobymao/sqlglot/issues/3890
 543                return self.expression(
 544                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 545                )
 546
 547            return self.expression(exp.Not, this=this)
 548
 549        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 550            if self._prev.token_type != TokenType.WITH:
 551                self._retreat(self._index - 1)
 552
 553            if self._match_text_seq("MASKING", "POLICY"):
 554                policy = self._parse_column()
 555                return self.expression(
 556                    exp.MaskingPolicyColumnConstraint,
 557                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 558                    expressions=self._match(TokenType.USING)
 559                    and self._parse_wrapped_csv(self._parse_id_var),
 560                )
 561            if self._match_text_seq("PROJECTION", "POLICY"):
 562                policy = self._parse_column()
 563                return self.expression(
 564                    exp.ProjectionPolicyColumnConstraint,
 565                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 566                )
 567            if self._match(TokenType.TAG):
 568                return self.expression(
 569                    exp.TagColumnConstraint,
 570                    expressions=self._parse_wrapped_csv(self._parse_property),
 571                )
 572
 573            return None
 574
 575        def _parse_create(self) -> exp.Create | exp.Command:
 576            expression = super()._parse_create()
 577            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 578                # Replace the Table node with the enclosed Identifier
 579                expression.this.replace(expression.this.this)
 580
 581            return expression
 582
 583        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 584        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 585        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 586            this = self._parse_var() or self._parse_type()
 587
 588            if not this:
 589                return None
 590
 591            self._match(TokenType.COMMA)
 592            expression = self._parse_bitwise()
 593            this = map_date_part(this)
 594            name = this.name.upper()
 595
 596            if name.startswith("EPOCH"):
 597                if name == "EPOCH_MILLISECOND":
 598                    scale = 10**3
 599                elif name == "EPOCH_MICROSECOND":
 600                    scale = 10**6
 601                elif name == "EPOCH_NANOSECOND":
 602                    scale = 10**9
 603                else:
 604                    scale = None
 605
 606                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 607                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 608
 609                if scale:
 610                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 611
 612                return to_unix
 613
 614            return self.expression(exp.Extract, this=this, expression=expression)
 615
 616        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 617            if is_map:
 618                # Keys are strings in Snowflake's objects, see also:
 619                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 620                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 621                return self._parse_slice(self._parse_string())
 622
 623            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 624
 625        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 626            lateral = super()._parse_lateral()
 627            if not lateral:
 628                return lateral
 629
 630            if isinstance(lateral.this, exp.Explode):
 631                table_alias = lateral.args.get("alias")
 632                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 633                if table_alias and not table_alias.args.get("columns"):
 634                    table_alias.set("columns", columns)
 635                elif not table_alias:
 636                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 637
 638            return lateral
 639
 640        def _parse_table_parts(
 641            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 642        ) -> exp.Table:
 643            # https://docs.snowflake.com/en/user-guide/querying-stage
 644            if self._match(TokenType.STRING, advance=False):
 645                table = self._parse_string()
 646            elif self._match_text_seq("@", advance=False):
 647                table = self._parse_location_path()
 648            else:
 649                table = None
 650
 651            if table:
 652                file_format = None
 653                pattern = None
 654
 655                wrapped = self._match(TokenType.L_PAREN)
 656                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 657                    if self._match_text_seq("FILE_FORMAT", "=>"):
 658                        file_format = self._parse_string() or super()._parse_table_parts(
 659                            is_db_reference=is_db_reference
 660                        )
 661                    elif self._match_text_seq("PATTERN", "=>"):
 662                        pattern = self._parse_string()
 663                    else:
 664                        break
 665
 666                    self._match(TokenType.COMMA)
 667
 668                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 669            else:
 670                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 671
 672            return table
 673
 674        def _parse_id_var(
 675            self,
 676            any_token: bool = True,
 677            tokens: t.Optional[t.Collection[TokenType]] = None,
 678        ) -> t.Optional[exp.Expression]:
 679            if self._match_text_seq("IDENTIFIER", "("):
 680                identifier = (
 681                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 682                    or self._parse_string()
 683                )
 684                self._match_r_paren()
 685                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 686
 687            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 688
 689        def _parse_show_snowflake(self, this: str) -> exp.Show:
 690            scope = None
 691            scope_kind = None
 692
 693            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 694            # which is syntactically valid but has no effect on the output
 695            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 696
 697            history = self._match_text_seq("HISTORY")
 698
 699            like = self._parse_string() if self._match(TokenType.LIKE) else None
 700
 701            if self._match(TokenType.IN):
 702                if self._match_text_seq("ACCOUNT"):
 703                    scope_kind = "ACCOUNT"
 704                elif self._match_set(self.DB_CREATABLES):
 705                    scope_kind = self._prev.text.upper()
 706                    if self._curr:
 707                        scope = self._parse_table_parts()
 708                elif self._curr:
 709                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 710                    scope = self._parse_table_parts()
 711
 712            return self.expression(
 713                exp.Show,
 714                **{
 715                    "terse": terse,
 716                    "this": this,
 717                    "history": history,
 718                    "like": like,
 719                    "scope": scope,
 720                    "scope_kind": scope_kind,
 721                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 722                    "limit": self._parse_limit(),
 723                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 724                },
 725            )
 726
 727        def _parse_location_property(self) -> exp.LocationProperty:
 728            self._match(TokenType.EQ)
 729            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 730
 731        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 732            # Parse either a subquery or a staged file
 733            return (
 734                self._parse_select(table=True, parse_subquery_alias=False)
 735                if self._match(TokenType.L_PAREN, advance=False)
 736                else self._parse_table_parts()
 737            )
 738
 739        def _parse_location_path(self) -> exp.Var:
 740            parts = [self._advance_any(ignore_reserved=True)]
 741
 742            # We avoid consuming a comma token because external tables like @foo and @bar
 743            # can be joined in a query with a comma separator, as well as closing paren
 744            # in case of subqueries
 745            while self._is_connected() and not self._match_set(
 746                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 747            ):
 748                parts.append(self._advance_any(ignore_reserved=True))
 749
 750            return exp.var("".join(part.text for part in parts if part))
 751
 752        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 753            this = super()._parse_lambda_arg()
 754
 755            if not this:
 756                return this
 757
 758            typ = self._parse_types()
 759
 760            if typ:
 761                return self.expression(exp.Cast, this=this, to=typ)
 762
 763            return this
 764
 765    class Tokenizer(tokens.Tokenizer):
 766        STRING_ESCAPES = ["\\", "'"]
 767        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 768        RAW_STRINGS = ["$$"]
 769        COMMENTS = ["--", "//", ("/*", "*/")]
 770        NESTED_COMMENTS = False
 771
 772        KEYWORDS = {
 773            **tokens.Tokenizer.KEYWORDS,
 774            "BYTEINT": TokenType.INT,
 775            "CHAR VARYING": TokenType.VARCHAR,
 776            "CHARACTER VARYING": TokenType.VARCHAR,
 777            "EXCLUDE": TokenType.EXCEPT,
 778            "ILIKE ANY": TokenType.ILIKE_ANY,
 779            "LIKE ANY": TokenType.LIKE_ANY,
 780            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 781            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 782            "MINUS": TokenType.EXCEPT,
 783            "NCHAR VARYING": TokenType.VARCHAR,
 784            "PUT": TokenType.COMMAND,
 785            "REMOVE": TokenType.COMMAND,
 786            "RM": TokenType.COMMAND,
 787            "SAMPLE": TokenType.TABLE_SAMPLE,
 788            "SQL_DOUBLE": TokenType.DOUBLE,
 789            "SQL_VARCHAR": TokenType.VARCHAR,
 790            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 791            "TAG": TokenType.TAG,
 792            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 793            "TOP": TokenType.TOP,
 794            "WAREHOUSE": TokenType.WAREHOUSE,
 795            "STREAMLIT": TokenType.STREAMLIT,
 796        }
 797        KEYWORDS.pop("/*+")
 798
 799        SINGLE_TOKENS = {
 800            **tokens.Tokenizer.SINGLE_TOKENS,
 801            "$": TokenType.PARAMETER,
 802        }
 803
 804        VAR_SINGLE_TOKENS = {"$"}
 805
 806        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 807
 808    class Generator(generator.Generator):
 809        PARAMETER_TOKEN = "$"
 810        MATCHED_BY_SOURCE = False
 811        SINGLE_STRING_INTERVAL = True
 812        JOIN_HINTS = False
 813        TABLE_HINTS = False
 814        QUERY_HINTS = False
 815        AGGREGATE_FILTER_SUPPORTED = False
 816        SUPPORTS_TABLE_COPY = False
 817        COLLATE_IS_FUNC = True
 818        LIMIT_ONLY_LITERALS = True
 819        JSON_KEY_VALUE_PAIR_SEP = ","
 820        INSERT_OVERWRITE = " OVERWRITE INTO"
 821        STRUCT_DELIMITER = ("(", ")")
 822        COPY_PARAMS_ARE_WRAPPED = False
 823        COPY_PARAMS_EQ_REQUIRED = True
 824        STAR_EXCEPT = "EXCLUDE"
 825        SUPPORTS_EXPLODING_PROJECTIONS = False
 826        ARRAY_CONCAT_IS_VAR_LEN = False
 827        SUPPORTS_CONVERT_TIMEZONE = True
 828        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 829        SUPPORTS_MEDIAN = True
 830        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 831
 832        TRANSFORMS = {
 833            **generator.Generator.TRANSFORMS,
 834            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 835            exp.ArgMax: rename_func("MAX_BY"),
 836            exp.ArgMin: rename_func("MIN_BY"),
 837            exp.Array: inline_array_sql,
 838            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 839            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 840            exp.AtTimeZone: lambda self, e: self.func(
 841                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 842            ),
 843            exp.BitwiseXor: rename_func("BITXOR"),
 844            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 845            exp.DateAdd: date_delta_sql("DATEADD"),
 846            exp.DateDiff: date_delta_sql("DATEDIFF"),
 847            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 848            exp.DatetimeDiff: timestampdiff_sql,
 849            exp.DateStrToDate: datestrtodate_sql,
 850            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 851            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 852            exp.DayOfYear: rename_func("DAYOFYEAR"),
 853            exp.Explode: rename_func("FLATTEN"),
 854            exp.Extract: rename_func("DATE_PART"),
 855            exp.FromTimeZone: lambda self, e: self.func(
 856                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 857            ),
 858            exp.GenerateSeries: lambda self, e: self.func(
 859                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 860            ),
 861            exp.GroupConcat: rename_func("LISTAGG"),
 862            exp.If: if_sql(name="IFF", false_value="NULL"),
 863            exp.JSONExtractArray: _json_extract_value_array_sql,
 864            exp.JSONExtractScalar: lambda self, e: self.func(
 865                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 866            ),
 867            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 868            exp.JSONPathRoot: lambda *_: "",
 869            exp.JSONValueArray: _json_extract_value_array_sql,
 870            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 871            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 872            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 873            exp.MakeInterval: no_make_interval_sql,
 874            exp.Max: max_or_greatest,
 875            exp.Min: min_or_least,
 876            exp.ParseJSON: lambda self, e: self.func(
 877                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 878            ),
 879            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 880            exp.PercentileCont: transforms.preprocess(
 881                [transforms.add_within_group_for_percentiles]
 882            ),
 883            exp.PercentileDisc: transforms.preprocess(
 884                [transforms.add_within_group_for_percentiles]
 885            ),
 886            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 887            exp.RegexpExtract: _regexpextract_sql,
 888            exp.RegexpExtractAll: _regexpextract_sql,
 889            exp.RegexpILike: _regexpilike_sql,
 890            exp.Rand: rename_func("RANDOM"),
 891            exp.Select: transforms.preprocess(
 892                [
 893                    transforms.eliminate_distinct_on,
 894                    transforms.explode_to_unnest(),
 895                    transforms.eliminate_semi_and_anti_joins,
 896                    _unnest_generate_date_array,
 897                ]
 898            ),
 899            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 900            exp.SHA: rename_func("SHA1"),
 901            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 902            exp.StartsWith: rename_func("STARTSWITH"),
 903            exp.StrPosition: lambda self, e: self.func(
 904                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 905            ),
 906            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 907            exp.Stuff: rename_func("INSERT"),
 908            exp.TimeAdd: date_delta_sql("TIMEADD"),
 909            exp.Timestamp: no_timestamp_sql,
 910            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 911            exp.TimestampDiff: lambda self, e: self.func(
 912                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 913            ),
 914            exp.TimestampTrunc: timestamptrunc_sql(),
 915            exp.TimeStrToTime: timestrtotime_sql,
 916            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 917            exp.ToArray: rename_func("TO_ARRAY"),
 918            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 919            exp.ToDouble: rename_func("TO_DOUBLE"),
 920            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 921            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 922            exp.TsOrDsToDate: lambda self, e: self.func(
 923                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 924            ),
 925            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 926            exp.Uuid: rename_func("UUID_STRING"),
 927            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 928            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 929            exp.Xor: rename_func("BOOLXOR"),
 930            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 931                rename_func("EDITDISTANCE")
 932            ),
 933        }
 934
 935        SUPPORTED_JSON_PATH_PARTS = {
 936            exp.JSONPathKey,
 937            exp.JSONPathRoot,
 938            exp.JSONPathSubscript,
 939        }
 940
 941        TYPE_MAPPING = {
 942            **generator.Generator.TYPE_MAPPING,
 943            exp.DataType.Type.NESTED: "OBJECT",
 944            exp.DataType.Type.STRUCT: "OBJECT",
 945        }
 946
 947        PROPERTIES_LOCATION = {
 948            **generator.Generator.PROPERTIES_LOCATION,
 949            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 950            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 951        }
 952
 953        UNSUPPORTED_VALUES_EXPRESSIONS = {
 954            exp.Map,
 955            exp.StarMap,
 956            exp.Struct,
 957            exp.VarMap,
 958        }
 959
 960        def with_properties(self, properties: exp.Properties) -> str:
 961            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 962
 963        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 964            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 965                values_as_table = False
 966
 967            return super().values_sql(expression, values_as_table=values_as_table)
 968
 969        def datatype_sql(self, expression: exp.DataType) -> str:
 970            expressions = expression.expressions
 971            if (
 972                expressions
 973                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 974                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 975            ):
 976                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 977                return "OBJECT"
 978
 979            return super().datatype_sql(expression)
 980
 981        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 982            return self.func(
 983                "TO_NUMBER",
 984                expression.this,
 985                expression.args.get("format"),
 986                expression.args.get("precision"),
 987                expression.args.get("scale"),
 988            )
 989
 990        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 991            milli = expression.args.get("milli")
 992            if milli is not None:
 993                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 994                expression.set("nano", milli_to_nano)
 995
 996            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 997
 998        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
 999            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1000                return self.func("TO_GEOGRAPHY", expression.this)
1001            if expression.is_type(exp.DataType.Type.GEOMETRY):
1002                return self.func("TO_GEOMETRY", expression.this)
1003
1004            return super().cast_sql(expression, safe_prefix=safe_prefix)
1005
1006        def trycast_sql(self, expression: exp.TryCast) -> str:
1007            value = expression.this
1008
1009            if value.type is None:
1010                from sqlglot.optimizer.annotate_types import annotate_types
1011
1012                value = annotate_types(value)
1013
1014            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1015                return super().trycast_sql(expression)
1016
1017            # TRY_CAST only works for string values in Snowflake
1018            return self.cast_sql(expression)
1019
1020        def log_sql(self, expression: exp.Log) -> str:
1021            if not expression.expression:
1022                return self.func("LN", expression.this)
1023
1024            return super().log_sql(expression)
1025
1026        def unnest_sql(self, expression: exp.Unnest) -> str:
1027            unnest_alias = expression.args.get("alias")
1028            offset = expression.args.get("offset")
1029
1030            columns = [
1031                exp.to_identifier("seq"),
1032                exp.to_identifier("key"),
1033                exp.to_identifier("path"),
1034                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1035                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1036                or exp.to_identifier("value"),
1037                exp.to_identifier("this"),
1038            ]
1039
1040            if unnest_alias:
1041                unnest_alias.set("columns", columns)
1042            else:
1043                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1044
1045            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1046            alias = self.sql(unnest_alias)
1047            alias = f" AS {alias}" if alias else ""
1048            return f"{explode}{alias}"
1049
1050        def show_sql(self, expression: exp.Show) -> str:
1051            terse = "TERSE " if expression.args.get("terse") else ""
1052            history = " HISTORY" if expression.args.get("history") else ""
1053            like = self.sql(expression, "like")
1054            like = f" LIKE {like}" if like else ""
1055
1056            scope = self.sql(expression, "scope")
1057            scope = f" {scope}" if scope else ""
1058
1059            scope_kind = self.sql(expression, "scope_kind")
1060            if scope_kind:
1061                scope_kind = f" IN {scope_kind}"
1062
1063            starts_with = self.sql(expression, "starts_with")
1064            if starts_with:
1065                starts_with = f" STARTS WITH {starts_with}"
1066
1067            limit = self.sql(expression, "limit")
1068
1069            from_ = self.sql(expression, "from")
1070            if from_:
1071                from_ = f" FROM {from_}"
1072
1073            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1074
1075        def describe_sql(self, expression: exp.Describe) -> str:
1076            # Default to table if kind is unknown
1077            kind_value = expression.args.get("kind") or "TABLE"
1078            kind = f" {kind_value}" if kind_value else ""
1079            this = f" {self.sql(expression, 'this')}"
1080            expressions = self.expressions(expression, flat=True)
1081            expressions = f" {expressions}" if expressions else ""
1082            return f"DESCRIBE{kind}{this}{expressions}"
1083
1084        def generatedasidentitycolumnconstraint_sql(
1085            self, expression: exp.GeneratedAsIdentityColumnConstraint
1086        ) -> str:
1087            start = expression.args.get("start")
1088            start = f" START {start}" if start else ""
1089            increment = expression.args.get("increment")
1090            increment = f" INCREMENT {increment}" if increment else ""
1091            return f"AUTOINCREMENT{start}{increment}"
1092
1093        def cluster_sql(self, expression: exp.Cluster) -> str:
1094            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1095
1096        def struct_sql(self, expression: exp.Struct) -> str:
1097            keys = []
1098            values = []
1099
1100            for i, e in enumerate(expression.expressions):
1101                if isinstance(e, exp.PropertyEQ):
1102                    keys.append(
1103                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1104                    )
1105                    values.append(e.expression)
1106                else:
1107                    keys.append(exp.Literal.string(f"_{i}"))
1108                    values.append(e)
1109
1110            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1111
1112        @unsupported_args("weight", "accuracy")
1113        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1114            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1115
1116        def alterset_sql(self, expression: exp.AlterSet) -> str:
1117            exprs = self.expressions(expression, flat=True)
1118            exprs = f" {exprs}" if exprs else ""
1119            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1120            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1121            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1122            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1123            tag = self.expressions(expression, key="tag", flat=True)
1124            tag = f" TAG {tag}" if tag else ""
1125
1126            return f"SET{exprs}{file_format}{copy_options}{tag}"
1127
1128        def strtotime_sql(self, expression: exp.StrToTime):
1129            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1130            return self.func(
1131                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1132            )
1133
1134        def timestampsub_sql(self, expression: exp.TimestampSub):
1135            return self.sql(
1136                exp.TimestampAdd(
1137                    this=expression.this,
1138                    expression=expression.expression * -1,
1139                    unit=expression.unit,
1140                )
1141            )
1142
1143        def jsonextract_sql(self, expression: exp.JSONExtract):
1144            this = expression.this
1145
1146            # JSON strings are valid coming from other dialects such as BQ
1147            return self.func(
1148                "GET_PATH",
1149                exp.ParseJSON(this=this) if this.is_string else this,
1150                expression.expression,
1151            )
1152
1153        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1154            this = expression.this
1155
1156            if this.is_string:
1157                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1158
1159            return self.func("TO_CHAR", this, self.format_time(expression))
class Snowflake(sqlglot.dialects.dialect.Dialect):
 298class Snowflake(Dialect):
 299    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 300    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 301    NULL_ORDERING = "nulls_are_large"
 302    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 303    SUPPORTS_USER_DEFINED_TYPES = False
 304    SUPPORTS_SEMI_ANTI_JOIN = False
 305    PREFER_CTE_ALIAS_COLUMN = True
 306    TABLESAMPLE_SIZE_IS_PERCENT = True
 307    COPY_PARAMS_ARE_CSV = False
 308    ARRAY_AGG_INCLUDES_NULLS = None
 309
 310    TIME_MAPPING = {
 311        "YYYY": "%Y",
 312        "yyyy": "%Y",
 313        "YY": "%y",
 314        "yy": "%y",
 315        "MMMM": "%B",
 316        "mmmm": "%B",
 317        "MON": "%b",
 318        "mon": "%b",
 319        "MM": "%m",
 320        "mm": "%m",
 321        "DD": "%d",
 322        "dd": "%-d",
 323        "DY": "%a",
 324        "dy": "%w",
 325        "HH24": "%H",
 326        "hh24": "%H",
 327        "HH12": "%I",
 328        "hh12": "%I",
 329        "MI": "%M",
 330        "mi": "%M",
 331        "SS": "%S",
 332        "ss": "%S",
 333        "FF": "%f",
 334        "ff": "%f",
 335        "FF6": "%f",
 336        "ff6": "%f",
 337    }
 338
 339    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 340        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 341        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 342        if (
 343            isinstance(expression, exp.Identifier)
 344            and isinstance(expression.parent, exp.Table)
 345            and expression.name.lower() == "dual"
 346        ):
 347            return expression  # type: ignore
 348
 349        return super().quote_identifier(expression, identify=identify)
 350
 351    class Parser(parser.Parser):
 352        IDENTIFY_PIVOT_STRINGS = True
 353        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 354        COLON_IS_VARIANT_EXTRACT = True
 355
 356        ID_VAR_TOKENS = {
 357            *parser.Parser.ID_VAR_TOKENS,
 358            TokenType.MATCH_CONDITION,
 359        }
 360
 361        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 362        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 363
 364        FUNCTIONS = {
 365            **parser.Parser.FUNCTIONS,
 366            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 367            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 368            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 369                this=seq_get(args, 1), expression=seq_get(args, 0)
 370            ),
 371            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 372                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 373                start=seq_get(args, 0),
 374                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 375                step=seq_get(args, 2),
 376            ),
 377            "BITXOR": binary_from_function(exp.BitwiseXor),
 378            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 379            "BOOLXOR": binary_from_function(exp.Xor),
 380            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 381            "DATE_TRUNC": _date_trunc_to_time,
 382            "DATEADD": _build_date_time_add(exp.DateAdd),
 383            "DATEDIFF": _build_datediff,
 384            "DIV0": _build_if_from_div0,
 385            "EDITDISTANCE": lambda args: exp.Levenshtein(
 386                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
 387            ),
 388            "FLATTEN": exp.Explode.from_arg_list,
 389            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 390                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 391            ),
 392            "IFF": exp.If.from_arg_list,
 393            "LAST_DAY": lambda args: exp.LastDay(
 394                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 395            ),
 396            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 397            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 398            "LISTAGG": exp.GroupConcat.from_arg_list,
 399            "NULLIFZERO": _build_if_from_nullifzero,
 400            "OBJECT_CONSTRUCT": _build_object_construct,
 401            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 402            "REGEXP_REPLACE": _build_regexp_replace,
 403            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
 404            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
 405            "RLIKE": exp.RegexpLike.from_arg_list,
 406            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 407            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 408            "TIMEDIFF": _build_datediff,
 409            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 410            "TIMESTAMPDIFF": _build_datediff,
 411            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 412            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 413            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
 414            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
 415            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 416            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 417            "TRY_TO_TIMESTAMP": _build_datetime(
 418                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
 419            ),
 420            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 421            "TO_NUMBER": lambda args: exp.ToNumber(
 422                this=seq_get(args, 0),
 423                format=seq_get(args, 1),
 424                precision=seq_get(args, 2),
 425                scale=seq_get(args, 3),
 426            ),
 427            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 428            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 429            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 430            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 431            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 432            "TO_VARCHAR": exp.ToChar.from_arg_list,
 433            "ZEROIFNULL": _build_if_from_zeroifnull,
 434        }
 435
 436        FUNCTION_PARSERS = {
 437            **parser.Parser.FUNCTION_PARSERS,
 438            "DATE_PART": lambda self: self._parse_date_part(),
 439            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 440        }
 441        FUNCTION_PARSERS.pop("TRIM")
 442
 443        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 444
 445        RANGE_PARSERS = {
 446            **parser.Parser.RANGE_PARSERS,
 447            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 448            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 449        }
 450
 451        ALTER_PARSERS = {
 452            **parser.Parser.ALTER_PARSERS,
 453            "UNSET": lambda self: self.expression(
 454                exp.Set,
 455                tag=self._match_text_seq("TAG"),
 456                expressions=self._parse_csv(self._parse_id_var),
 457                unset=True,
 458            ),
 459        }
 460
 461        STATEMENT_PARSERS = {
 462            **parser.Parser.STATEMENT_PARSERS,
 463            TokenType.SHOW: lambda self: self._parse_show(),
 464        }
 465
 466        PROPERTY_PARSERS = {
 467            **parser.Parser.PROPERTY_PARSERS,
 468            "LOCATION": lambda self: self._parse_location_property(),
 469        }
 470
 471        TYPE_CONVERTERS = {
 472            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 473            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 474        }
 475
 476        SHOW_PARSERS = {
 477            "SCHEMAS": _show_parser("SCHEMAS"),
 478            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 479            "OBJECTS": _show_parser("OBJECTS"),
 480            "TERSE OBJECTS": _show_parser("OBJECTS"),
 481            "TABLES": _show_parser("TABLES"),
 482            "TERSE TABLES": _show_parser("TABLES"),
 483            "VIEWS": _show_parser("VIEWS"),
 484            "TERSE VIEWS": _show_parser("VIEWS"),
 485            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 486            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 487            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 488            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 489            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 490            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 491            "SEQUENCES": _show_parser("SEQUENCES"),
 492            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 493            "COLUMNS": _show_parser("COLUMNS"),
 494            "USERS": _show_parser("USERS"),
 495            "TERSE USERS": _show_parser("USERS"),
 496        }
 497
 498        CONSTRAINT_PARSERS = {
 499            **parser.Parser.CONSTRAINT_PARSERS,
 500            "WITH": lambda self: self._parse_with_constraint(),
 501            "MASKING": lambda self: self._parse_with_constraint(),
 502            "PROJECTION": lambda self: self._parse_with_constraint(),
 503            "TAG": lambda self: self._parse_with_constraint(),
 504        }
 505
 506        STAGED_FILE_SINGLE_TOKENS = {
 507            TokenType.DOT,
 508            TokenType.MOD,
 509            TokenType.SLASH,
 510        }
 511
 512        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 513
 514        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 515
 516        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 517
 518        LAMBDAS = {
 519            **parser.Parser.LAMBDAS,
 520            TokenType.ARROW: lambda self, expressions: self.expression(
 521                exp.Lambda,
 522                this=self._replace_lambda(
 523                    self._parse_assignment(),
 524                    expressions,
 525                ),
 526                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 527            ),
 528        }
 529
 530        def _negate_range(
 531            self, this: t.Optional[exp.Expression] = None
 532        ) -> t.Optional[exp.Expression]:
 533            if not this:
 534                return this
 535
 536            query = this.args.get("query")
 537            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 538                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 539                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 540                # which can produce different results (most likely a SnowFlake bug).
 541                #
 542                # https://docs.snowflake.com/en/sql-reference/functions/in
 543                # Context: https://github.com/tobymao/sqlglot/issues/3890
 544                return self.expression(
 545                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 546                )
 547
 548            return self.expression(exp.Not, this=this)
 549
 550        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 551            if self._prev.token_type != TokenType.WITH:
 552                self._retreat(self._index - 1)
 553
 554            if self._match_text_seq("MASKING", "POLICY"):
 555                policy = self._parse_column()
 556                return self.expression(
 557                    exp.MaskingPolicyColumnConstraint,
 558                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 559                    expressions=self._match(TokenType.USING)
 560                    and self._parse_wrapped_csv(self._parse_id_var),
 561                )
 562            if self._match_text_seq("PROJECTION", "POLICY"):
 563                policy = self._parse_column()
 564                return self.expression(
 565                    exp.ProjectionPolicyColumnConstraint,
 566                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 567                )
 568            if self._match(TokenType.TAG):
 569                return self.expression(
 570                    exp.TagColumnConstraint,
 571                    expressions=self._parse_wrapped_csv(self._parse_property),
 572                )
 573
 574            return None
 575
 576        def _parse_create(self) -> exp.Create | exp.Command:
 577            expression = super()._parse_create()
 578            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 579                # Replace the Table node with the enclosed Identifier
 580                expression.this.replace(expression.this.this)
 581
 582            return expression
 583
 584        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 585        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 586        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 587            this = self._parse_var() or self._parse_type()
 588
 589            if not this:
 590                return None
 591
 592            self._match(TokenType.COMMA)
 593            expression = self._parse_bitwise()
 594            this = map_date_part(this)
 595            name = this.name.upper()
 596
 597            if name.startswith("EPOCH"):
 598                if name == "EPOCH_MILLISECOND":
 599                    scale = 10**3
 600                elif name == "EPOCH_MICROSECOND":
 601                    scale = 10**6
 602                elif name == "EPOCH_NANOSECOND":
 603                    scale = 10**9
 604                else:
 605                    scale = None
 606
 607                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 608                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 609
 610                if scale:
 611                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 612
 613                return to_unix
 614
 615            return self.expression(exp.Extract, this=this, expression=expression)
 616
 617        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 618            if is_map:
 619                # Keys are strings in Snowflake's objects, see also:
 620                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 621                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 622                return self._parse_slice(self._parse_string())
 623
 624            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 625
 626        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 627            lateral = super()._parse_lateral()
 628            if not lateral:
 629                return lateral
 630
 631            if isinstance(lateral.this, exp.Explode):
 632                table_alias = lateral.args.get("alias")
 633                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 634                if table_alias and not table_alias.args.get("columns"):
 635                    table_alias.set("columns", columns)
 636                elif not table_alias:
 637                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 638
 639            return lateral
 640
 641        def _parse_table_parts(
 642            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 643        ) -> exp.Table:
 644            # https://docs.snowflake.com/en/user-guide/querying-stage
 645            if self._match(TokenType.STRING, advance=False):
 646                table = self._parse_string()
 647            elif self._match_text_seq("@", advance=False):
 648                table = self._parse_location_path()
 649            else:
 650                table = None
 651
 652            if table:
 653                file_format = None
 654                pattern = None
 655
 656                wrapped = self._match(TokenType.L_PAREN)
 657                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 658                    if self._match_text_seq("FILE_FORMAT", "=>"):
 659                        file_format = self._parse_string() or super()._parse_table_parts(
 660                            is_db_reference=is_db_reference
 661                        )
 662                    elif self._match_text_seq("PATTERN", "=>"):
 663                        pattern = self._parse_string()
 664                    else:
 665                        break
 666
 667                    self._match(TokenType.COMMA)
 668
 669                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 670            else:
 671                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 672
 673            return table
 674
 675        def _parse_id_var(
 676            self,
 677            any_token: bool = True,
 678            tokens: t.Optional[t.Collection[TokenType]] = None,
 679        ) -> t.Optional[exp.Expression]:
 680            if self._match_text_seq("IDENTIFIER", "("):
 681                identifier = (
 682                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 683                    or self._parse_string()
 684                )
 685                self._match_r_paren()
 686                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 687
 688            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 689
 690        def _parse_show_snowflake(self, this: str) -> exp.Show:
 691            scope = None
 692            scope_kind = None
 693
 694            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 695            # which is syntactically valid but has no effect on the output
 696            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 697
 698            history = self._match_text_seq("HISTORY")
 699
 700            like = self._parse_string() if self._match(TokenType.LIKE) else None
 701
 702            if self._match(TokenType.IN):
 703                if self._match_text_seq("ACCOUNT"):
 704                    scope_kind = "ACCOUNT"
 705                elif self._match_set(self.DB_CREATABLES):
 706                    scope_kind = self._prev.text.upper()
 707                    if self._curr:
 708                        scope = self._parse_table_parts()
 709                elif self._curr:
 710                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 711                    scope = self._parse_table_parts()
 712
 713            return self.expression(
 714                exp.Show,
 715                **{
 716                    "terse": terse,
 717                    "this": this,
 718                    "history": history,
 719                    "like": like,
 720                    "scope": scope,
 721                    "scope_kind": scope_kind,
 722                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 723                    "limit": self._parse_limit(),
 724                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 725                },
 726            )
 727
 728        def _parse_location_property(self) -> exp.LocationProperty:
 729            self._match(TokenType.EQ)
 730            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 731
 732        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 733            # Parse either a subquery or a staged file
 734            return (
 735                self._parse_select(table=True, parse_subquery_alias=False)
 736                if self._match(TokenType.L_PAREN, advance=False)
 737                else self._parse_table_parts()
 738            )
 739
 740        def _parse_location_path(self) -> exp.Var:
 741            parts = [self._advance_any(ignore_reserved=True)]
 742
 743            # We avoid consuming a comma token because external tables like @foo and @bar
 744            # can be joined in a query with a comma separator, as well as closing paren
 745            # in case of subqueries
 746            while self._is_connected() and not self._match_set(
 747                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 748            ):
 749                parts.append(self._advance_any(ignore_reserved=True))
 750
 751            return exp.var("".join(part.text for part in parts if part))
 752
 753        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 754            this = super()._parse_lambda_arg()
 755
 756            if not this:
 757                return this
 758
 759            typ = self._parse_types()
 760
 761            if typ:
 762                return self.expression(exp.Cast, this=this, to=typ)
 763
 764            return this
 765
 766    class Tokenizer(tokens.Tokenizer):
 767        STRING_ESCAPES = ["\\", "'"]
 768        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 769        RAW_STRINGS = ["$$"]
 770        COMMENTS = ["--", "//", ("/*", "*/")]
 771        NESTED_COMMENTS = False
 772
 773        KEYWORDS = {
 774            **tokens.Tokenizer.KEYWORDS,
 775            "BYTEINT": TokenType.INT,
 776            "CHAR VARYING": TokenType.VARCHAR,
 777            "CHARACTER VARYING": TokenType.VARCHAR,
 778            "EXCLUDE": TokenType.EXCEPT,
 779            "ILIKE ANY": TokenType.ILIKE_ANY,
 780            "LIKE ANY": TokenType.LIKE_ANY,
 781            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 782            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 783            "MINUS": TokenType.EXCEPT,
 784            "NCHAR VARYING": TokenType.VARCHAR,
 785            "PUT": TokenType.COMMAND,
 786            "REMOVE": TokenType.COMMAND,
 787            "RM": TokenType.COMMAND,
 788            "SAMPLE": TokenType.TABLE_SAMPLE,
 789            "SQL_DOUBLE": TokenType.DOUBLE,
 790            "SQL_VARCHAR": TokenType.VARCHAR,
 791            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 792            "TAG": TokenType.TAG,
 793            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 794            "TOP": TokenType.TOP,
 795            "WAREHOUSE": TokenType.WAREHOUSE,
 796            "STREAMLIT": TokenType.STREAMLIT,
 797        }
 798        KEYWORDS.pop("/*+")
 799
 800        SINGLE_TOKENS = {
 801            **tokens.Tokenizer.SINGLE_TOKENS,
 802            "$": TokenType.PARAMETER,
 803        }
 804
 805        VAR_SINGLE_TOKENS = {"$"}
 806
 807        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 808
 809    class Generator(generator.Generator):
 810        PARAMETER_TOKEN = "$"
 811        MATCHED_BY_SOURCE = False
 812        SINGLE_STRING_INTERVAL = True
 813        JOIN_HINTS = False
 814        TABLE_HINTS = False
 815        QUERY_HINTS = False
 816        AGGREGATE_FILTER_SUPPORTED = False
 817        SUPPORTS_TABLE_COPY = False
 818        COLLATE_IS_FUNC = True
 819        LIMIT_ONLY_LITERALS = True
 820        JSON_KEY_VALUE_PAIR_SEP = ","
 821        INSERT_OVERWRITE = " OVERWRITE INTO"
 822        STRUCT_DELIMITER = ("(", ")")
 823        COPY_PARAMS_ARE_WRAPPED = False
 824        COPY_PARAMS_EQ_REQUIRED = True
 825        STAR_EXCEPT = "EXCLUDE"
 826        SUPPORTS_EXPLODING_PROJECTIONS = False
 827        ARRAY_CONCAT_IS_VAR_LEN = False
 828        SUPPORTS_CONVERT_TIMEZONE = True
 829        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 830        SUPPORTS_MEDIAN = True
 831        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 832
 833        TRANSFORMS = {
 834            **generator.Generator.TRANSFORMS,
 835            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 836            exp.ArgMax: rename_func("MAX_BY"),
 837            exp.ArgMin: rename_func("MIN_BY"),
 838            exp.Array: inline_array_sql,
 839            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 840            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 841            exp.AtTimeZone: lambda self, e: self.func(
 842                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 843            ),
 844            exp.BitwiseXor: rename_func("BITXOR"),
 845            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 846            exp.DateAdd: date_delta_sql("DATEADD"),
 847            exp.DateDiff: date_delta_sql("DATEDIFF"),
 848            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 849            exp.DatetimeDiff: timestampdiff_sql,
 850            exp.DateStrToDate: datestrtodate_sql,
 851            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 852            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 853            exp.DayOfYear: rename_func("DAYOFYEAR"),
 854            exp.Explode: rename_func("FLATTEN"),
 855            exp.Extract: rename_func("DATE_PART"),
 856            exp.FromTimeZone: lambda self, e: self.func(
 857                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 858            ),
 859            exp.GenerateSeries: lambda self, e: self.func(
 860                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 861            ),
 862            exp.GroupConcat: rename_func("LISTAGG"),
 863            exp.If: if_sql(name="IFF", false_value="NULL"),
 864            exp.JSONExtractArray: _json_extract_value_array_sql,
 865            exp.JSONExtractScalar: lambda self, e: self.func(
 866                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 867            ),
 868            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 869            exp.JSONPathRoot: lambda *_: "",
 870            exp.JSONValueArray: _json_extract_value_array_sql,
 871            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 872            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 873            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 874            exp.MakeInterval: no_make_interval_sql,
 875            exp.Max: max_or_greatest,
 876            exp.Min: min_or_least,
 877            exp.ParseJSON: lambda self, e: self.func(
 878                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 879            ),
 880            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 881            exp.PercentileCont: transforms.preprocess(
 882                [transforms.add_within_group_for_percentiles]
 883            ),
 884            exp.PercentileDisc: transforms.preprocess(
 885                [transforms.add_within_group_for_percentiles]
 886            ),
 887            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 888            exp.RegexpExtract: _regexpextract_sql,
 889            exp.RegexpExtractAll: _regexpextract_sql,
 890            exp.RegexpILike: _regexpilike_sql,
 891            exp.Rand: rename_func("RANDOM"),
 892            exp.Select: transforms.preprocess(
 893                [
 894                    transforms.eliminate_distinct_on,
 895                    transforms.explode_to_unnest(),
 896                    transforms.eliminate_semi_and_anti_joins,
 897                    _unnest_generate_date_array,
 898                ]
 899            ),
 900            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 901            exp.SHA: rename_func("SHA1"),
 902            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 903            exp.StartsWith: rename_func("STARTSWITH"),
 904            exp.StrPosition: lambda self, e: self.func(
 905                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 906            ),
 907            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 908            exp.Stuff: rename_func("INSERT"),
 909            exp.TimeAdd: date_delta_sql("TIMEADD"),
 910            exp.Timestamp: no_timestamp_sql,
 911            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 912            exp.TimestampDiff: lambda self, e: self.func(
 913                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 914            ),
 915            exp.TimestampTrunc: timestamptrunc_sql(),
 916            exp.TimeStrToTime: timestrtotime_sql,
 917            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 918            exp.ToArray: rename_func("TO_ARRAY"),
 919            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 920            exp.ToDouble: rename_func("TO_DOUBLE"),
 921            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 922            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 923            exp.TsOrDsToDate: lambda self, e: self.func(
 924                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 925            ),
 926            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 927            exp.Uuid: rename_func("UUID_STRING"),
 928            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 929            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 930            exp.Xor: rename_func("BOOLXOR"),
 931            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 932                rename_func("EDITDISTANCE")
 933            ),
 934        }
 935
 936        SUPPORTED_JSON_PATH_PARTS = {
 937            exp.JSONPathKey,
 938            exp.JSONPathRoot,
 939            exp.JSONPathSubscript,
 940        }
 941
 942        TYPE_MAPPING = {
 943            **generator.Generator.TYPE_MAPPING,
 944            exp.DataType.Type.NESTED: "OBJECT",
 945            exp.DataType.Type.STRUCT: "OBJECT",
 946        }
 947
 948        PROPERTIES_LOCATION = {
 949            **generator.Generator.PROPERTIES_LOCATION,
 950            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 951            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 952        }
 953
 954        UNSUPPORTED_VALUES_EXPRESSIONS = {
 955            exp.Map,
 956            exp.StarMap,
 957            exp.Struct,
 958            exp.VarMap,
 959        }
 960
 961        def with_properties(self, properties: exp.Properties) -> str:
 962            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 963
 964        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 965            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 966                values_as_table = False
 967
 968            return super().values_sql(expression, values_as_table=values_as_table)
 969
 970        def datatype_sql(self, expression: exp.DataType) -> str:
 971            expressions = expression.expressions
 972            if (
 973                expressions
 974                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 975                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 976            ):
 977                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 978                return "OBJECT"
 979
 980            return super().datatype_sql(expression)
 981
 982        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 983            return self.func(
 984                "TO_NUMBER",
 985                expression.this,
 986                expression.args.get("format"),
 987                expression.args.get("precision"),
 988                expression.args.get("scale"),
 989            )
 990
 991        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 992            milli = expression.args.get("milli")
 993            if milli is not None:
 994                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 995                expression.set("nano", milli_to_nano)
 996
 997            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 998
 999        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1000            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1001                return self.func("TO_GEOGRAPHY", expression.this)
1002            if expression.is_type(exp.DataType.Type.GEOMETRY):
1003                return self.func("TO_GEOMETRY", expression.this)
1004
1005            return super().cast_sql(expression, safe_prefix=safe_prefix)
1006
1007        def trycast_sql(self, expression: exp.TryCast) -> str:
1008            value = expression.this
1009
1010            if value.type is None:
1011                from sqlglot.optimizer.annotate_types import annotate_types
1012
1013                value = annotate_types(value)
1014
1015            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1016                return super().trycast_sql(expression)
1017
1018            # TRY_CAST only works for string values in Snowflake
1019            return self.cast_sql(expression)
1020
1021        def log_sql(self, expression: exp.Log) -> str:
1022            if not expression.expression:
1023                return self.func("LN", expression.this)
1024
1025            return super().log_sql(expression)
1026
1027        def unnest_sql(self, expression: exp.Unnest) -> str:
1028            unnest_alias = expression.args.get("alias")
1029            offset = expression.args.get("offset")
1030
1031            columns = [
1032                exp.to_identifier("seq"),
1033                exp.to_identifier("key"),
1034                exp.to_identifier("path"),
1035                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1036                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1037                or exp.to_identifier("value"),
1038                exp.to_identifier("this"),
1039            ]
1040
1041            if unnest_alias:
1042                unnest_alias.set("columns", columns)
1043            else:
1044                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1045
1046            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1047            alias = self.sql(unnest_alias)
1048            alias = f" AS {alias}" if alias else ""
1049            return f"{explode}{alias}"
1050
1051        def show_sql(self, expression: exp.Show) -> str:
1052            terse = "TERSE " if expression.args.get("terse") else ""
1053            history = " HISTORY" if expression.args.get("history") else ""
1054            like = self.sql(expression, "like")
1055            like = f" LIKE {like}" if like else ""
1056
1057            scope = self.sql(expression, "scope")
1058            scope = f" {scope}" if scope else ""
1059
1060            scope_kind = self.sql(expression, "scope_kind")
1061            if scope_kind:
1062                scope_kind = f" IN {scope_kind}"
1063
1064            starts_with = self.sql(expression, "starts_with")
1065            if starts_with:
1066                starts_with = f" STARTS WITH {starts_with}"
1067
1068            limit = self.sql(expression, "limit")
1069
1070            from_ = self.sql(expression, "from")
1071            if from_:
1072                from_ = f" FROM {from_}"
1073
1074            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1075
1076        def describe_sql(self, expression: exp.Describe) -> str:
1077            # Default to table if kind is unknown
1078            kind_value = expression.args.get("kind") or "TABLE"
1079            kind = f" {kind_value}" if kind_value else ""
1080            this = f" {self.sql(expression, 'this')}"
1081            expressions = self.expressions(expression, flat=True)
1082            expressions = f" {expressions}" if expressions else ""
1083            return f"DESCRIBE{kind}{this}{expressions}"
1084
1085        def generatedasidentitycolumnconstraint_sql(
1086            self, expression: exp.GeneratedAsIdentityColumnConstraint
1087        ) -> str:
1088            start = expression.args.get("start")
1089            start = f" START {start}" if start else ""
1090            increment = expression.args.get("increment")
1091            increment = f" INCREMENT {increment}" if increment else ""
1092            return f"AUTOINCREMENT{start}{increment}"
1093
1094        def cluster_sql(self, expression: exp.Cluster) -> str:
1095            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1096
1097        def struct_sql(self, expression: exp.Struct) -> str:
1098            keys = []
1099            values = []
1100
1101            for i, e in enumerate(expression.expressions):
1102                if isinstance(e, exp.PropertyEQ):
1103                    keys.append(
1104                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1105                    )
1106                    values.append(e.expression)
1107                else:
1108                    keys.append(exp.Literal.string(f"_{i}"))
1109                    values.append(e)
1110
1111            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1112
1113        @unsupported_args("weight", "accuracy")
1114        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1115            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1116
1117        def alterset_sql(self, expression: exp.AlterSet) -> str:
1118            exprs = self.expressions(expression, flat=True)
1119            exprs = f" {exprs}" if exprs else ""
1120            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1121            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1122            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1123            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1124            tag = self.expressions(expression, key="tag", flat=True)
1125            tag = f" TAG {tag}" if tag else ""
1126
1127            return f"SET{exprs}{file_format}{copy_options}{tag}"
1128
1129        def strtotime_sql(self, expression: exp.StrToTime):
1130            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1131            return self.func(
1132                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1133            )
1134
1135        def timestampsub_sql(self, expression: exp.TimestampSub):
1136            return self.sql(
1137                exp.TimestampAdd(
1138                    this=expression.this,
1139                    expression=expression.expression * -1,
1140                    unit=expression.unit,
1141                )
1142            )
1143
1144        def jsonextract_sql(self, expression: exp.JSONExtract):
1145            this = expression.this
1146
1147            # JSON strings are valid coming from other dialects such as BQ
1148            return self.func(
1149                "GET_PATH",
1150                exp.ParseJSON(this=this) if this.is_string else this,
1151                expression.expression,
1152            )
1153
1154        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1155            this = expression.this
1156
1157            if this.is_string:
1158                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1159
1160            return self.func("TO_CHAR", this, self.format_time(expression))
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

ARRAY_AGG_INCLUDES_NULLS: Optional[bool] = None

Whether ArrayAgg needs to filter NULL values.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
339    def quote_identifier(self, expression: E, identify: bool = True) -> E:
340        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
341        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
342        if (
343            isinstance(expression, exp.Identifier)
344            and isinstance(expression.parent, exp.Table)
345            and expression.name.lower() == "dual"
346        ):
347            return expression  # type: ignore
348
349        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
351    class Parser(parser.Parser):
352        IDENTIFY_PIVOT_STRINGS = True
353        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
354        COLON_IS_VARIANT_EXTRACT = True
355
356        ID_VAR_TOKENS = {
357            *parser.Parser.ID_VAR_TOKENS,
358            TokenType.MATCH_CONDITION,
359        }
360
361        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
362        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
363
364        FUNCTIONS = {
365            **parser.Parser.FUNCTIONS,
366            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
367            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
368            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
369                this=seq_get(args, 1), expression=seq_get(args, 0)
370            ),
371            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
372                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
373                start=seq_get(args, 0),
374                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
375                step=seq_get(args, 2),
376            ),
377            "BITXOR": binary_from_function(exp.BitwiseXor),
378            "BIT_XOR": binary_from_function(exp.BitwiseXor),
379            "BOOLXOR": binary_from_function(exp.Xor),
380            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
381            "DATE_TRUNC": _date_trunc_to_time,
382            "DATEADD": _build_date_time_add(exp.DateAdd),
383            "DATEDIFF": _build_datediff,
384            "DIV0": _build_if_from_div0,
385            "EDITDISTANCE": lambda args: exp.Levenshtein(
386                this=seq_get(args, 0), expression=seq_get(args, 1), max_dist=seq_get(args, 2)
387            ),
388            "FLATTEN": exp.Explode.from_arg_list,
389            "GET_PATH": lambda args, dialect: exp.JSONExtract(
390                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
391            ),
392            "IFF": exp.If.from_arg_list,
393            "LAST_DAY": lambda args: exp.LastDay(
394                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
395            ),
396            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
397            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
398            "LISTAGG": exp.GroupConcat.from_arg_list,
399            "NULLIFZERO": _build_if_from_nullifzero,
400            "OBJECT_CONSTRUCT": _build_object_construct,
401            "REGEXP_EXTRACT_ALL": _build_regexp_extract(exp.RegexpExtractAll),
402            "REGEXP_REPLACE": _build_regexp_replace,
403            "REGEXP_SUBSTR": _build_regexp_extract(exp.RegexpExtract),
404            "REGEXP_SUBSTR_ALL": _build_regexp_extract(exp.RegexpExtractAll),
405            "RLIKE": exp.RegexpLike.from_arg_list,
406            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
407            "TIMEADD": _build_date_time_add(exp.TimeAdd),
408            "TIMEDIFF": _build_datediff,
409            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
410            "TIMESTAMPDIFF": _build_datediff,
411            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
412            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
413            "TIMESTAMPNTZFROMPARTS": build_timestamp_from_parts,
414            "TIMESTAMP_NTZ_FROM_PARTS": build_timestamp_from_parts,
415            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
416            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
417            "TRY_TO_TIMESTAMP": _build_datetime(
418                "TRY_TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP, safe=True
419            ),
420            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
421            "TO_NUMBER": lambda args: exp.ToNumber(
422                this=seq_get(args, 0),
423                format=seq_get(args, 1),
424                precision=seq_get(args, 2),
425                scale=seq_get(args, 3),
426            ),
427            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
428            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
429            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
430            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
431            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
432            "TO_VARCHAR": exp.ToChar.from_arg_list,
433            "ZEROIFNULL": _build_if_from_zeroifnull,
434        }
435
436        FUNCTION_PARSERS = {
437            **parser.Parser.FUNCTION_PARSERS,
438            "DATE_PART": lambda self: self._parse_date_part(),
439            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
440        }
441        FUNCTION_PARSERS.pop("TRIM")
442
443        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
444
445        RANGE_PARSERS = {
446            **parser.Parser.RANGE_PARSERS,
447            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
448            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
449        }
450
451        ALTER_PARSERS = {
452            **parser.Parser.ALTER_PARSERS,
453            "UNSET": lambda self: self.expression(
454                exp.Set,
455                tag=self._match_text_seq("TAG"),
456                expressions=self._parse_csv(self._parse_id_var),
457                unset=True,
458            ),
459        }
460
461        STATEMENT_PARSERS = {
462            **parser.Parser.STATEMENT_PARSERS,
463            TokenType.SHOW: lambda self: self._parse_show(),
464        }
465
466        PROPERTY_PARSERS = {
467            **parser.Parser.PROPERTY_PARSERS,
468            "LOCATION": lambda self: self._parse_location_property(),
469        }
470
471        TYPE_CONVERTERS = {
472            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
473            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
474        }
475
476        SHOW_PARSERS = {
477            "SCHEMAS": _show_parser("SCHEMAS"),
478            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
479            "OBJECTS": _show_parser("OBJECTS"),
480            "TERSE OBJECTS": _show_parser("OBJECTS"),
481            "TABLES": _show_parser("TABLES"),
482            "TERSE TABLES": _show_parser("TABLES"),
483            "VIEWS": _show_parser("VIEWS"),
484            "TERSE VIEWS": _show_parser("VIEWS"),
485            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
486            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
487            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
488            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
489            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
490            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
491            "SEQUENCES": _show_parser("SEQUENCES"),
492            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
493            "COLUMNS": _show_parser("COLUMNS"),
494            "USERS": _show_parser("USERS"),
495            "TERSE USERS": _show_parser("USERS"),
496        }
497
498        CONSTRAINT_PARSERS = {
499            **parser.Parser.CONSTRAINT_PARSERS,
500            "WITH": lambda self: self._parse_with_constraint(),
501            "MASKING": lambda self: self._parse_with_constraint(),
502            "PROJECTION": lambda self: self._parse_with_constraint(),
503            "TAG": lambda self: self._parse_with_constraint(),
504        }
505
506        STAGED_FILE_SINGLE_TOKENS = {
507            TokenType.DOT,
508            TokenType.MOD,
509            TokenType.SLASH,
510        }
511
512        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
513
514        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
515
516        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
517
518        LAMBDAS = {
519            **parser.Parser.LAMBDAS,
520            TokenType.ARROW: lambda self, expressions: self.expression(
521                exp.Lambda,
522                this=self._replace_lambda(
523                    self._parse_assignment(),
524                    expressions,
525                ),
526                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
527            ),
528        }
529
530        def _negate_range(
531            self, this: t.Optional[exp.Expression] = None
532        ) -> t.Optional[exp.Expression]:
533            if not this:
534                return this
535
536            query = this.args.get("query")
537            if isinstance(this, exp.In) and isinstance(query, exp.Query):
538                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
539                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
540                # which can produce different results (most likely a SnowFlake bug).
541                #
542                # https://docs.snowflake.com/en/sql-reference/functions/in
543                # Context: https://github.com/tobymao/sqlglot/issues/3890
544                return self.expression(
545                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
546                )
547
548            return self.expression(exp.Not, this=this)
549
550        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
551            if self._prev.token_type != TokenType.WITH:
552                self._retreat(self._index - 1)
553
554            if self._match_text_seq("MASKING", "POLICY"):
555                policy = self._parse_column()
556                return self.expression(
557                    exp.MaskingPolicyColumnConstraint,
558                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
559                    expressions=self._match(TokenType.USING)
560                    and self._parse_wrapped_csv(self._parse_id_var),
561                )
562            if self._match_text_seq("PROJECTION", "POLICY"):
563                policy = self._parse_column()
564                return self.expression(
565                    exp.ProjectionPolicyColumnConstraint,
566                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
567                )
568            if self._match(TokenType.TAG):
569                return self.expression(
570                    exp.TagColumnConstraint,
571                    expressions=self._parse_wrapped_csv(self._parse_property),
572                )
573
574            return None
575
576        def _parse_create(self) -> exp.Create | exp.Command:
577            expression = super()._parse_create()
578            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
579                # Replace the Table node with the enclosed Identifier
580                expression.this.replace(expression.this.this)
581
582            return expression
583
584        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
585        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
586        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
587            this = self._parse_var() or self._parse_type()
588
589            if not this:
590                return None
591
592            self._match(TokenType.COMMA)
593            expression = self._parse_bitwise()
594            this = map_date_part(this)
595            name = this.name.upper()
596
597            if name.startswith("EPOCH"):
598                if name == "EPOCH_MILLISECOND":
599                    scale = 10**3
600                elif name == "EPOCH_MICROSECOND":
601                    scale = 10**6
602                elif name == "EPOCH_NANOSECOND":
603                    scale = 10**9
604                else:
605                    scale = None
606
607                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
608                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
609
610                if scale:
611                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
612
613                return to_unix
614
615            return self.expression(exp.Extract, this=this, expression=expression)
616
617        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
618            if is_map:
619                # Keys are strings in Snowflake's objects, see also:
620                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
621                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
622                return self._parse_slice(self._parse_string())
623
624            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
625
626        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
627            lateral = super()._parse_lateral()
628            if not lateral:
629                return lateral
630
631            if isinstance(lateral.this, exp.Explode):
632                table_alias = lateral.args.get("alias")
633                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
634                if table_alias and not table_alias.args.get("columns"):
635                    table_alias.set("columns", columns)
636                elif not table_alias:
637                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
638
639            return lateral
640
641        def _parse_table_parts(
642            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
643        ) -> exp.Table:
644            # https://docs.snowflake.com/en/user-guide/querying-stage
645            if self._match(TokenType.STRING, advance=False):
646                table = self._parse_string()
647            elif self._match_text_seq("@", advance=False):
648                table = self._parse_location_path()
649            else:
650                table = None
651
652            if table:
653                file_format = None
654                pattern = None
655
656                wrapped = self._match(TokenType.L_PAREN)
657                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
658                    if self._match_text_seq("FILE_FORMAT", "=>"):
659                        file_format = self._parse_string() or super()._parse_table_parts(
660                            is_db_reference=is_db_reference
661                        )
662                    elif self._match_text_seq("PATTERN", "=>"):
663                        pattern = self._parse_string()
664                    else:
665                        break
666
667                    self._match(TokenType.COMMA)
668
669                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
670            else:
671                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
672
673            return table
674
675        def _parse_id_var(
676            self,
677            any_token: bool = True,
678            tokens: t.Optional[t.Collection[TokenType]] = None,
679        ) -> t.Optional[exp.Expression]:
680            if self._match_text_seq("IDENTIFIER", "("):
681                identifier = (
682                    super()._parse_id_var(any_token=any_token, tokens=tokens)
683                    or self._parse_string()
684                )
685                self._match_r_paren()
686                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
687
688            return super()._parse_id_var(any_token=any_token, tokens=tokens)
689
690        def _parse_show_snowflake(self, this: str) -> exp.Show:
691            scope = None
692            scope_kind = None
693
694            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
695            # which is syntactically valid but has no effect on the output
696            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
697
698            history = self._match_text_seq("HISTORY")
699
700            like = self._parse_string() if self._match(TokenType.LIKE) else None
701
702            if self._match(TokenType.IN):
703                if self._match_text_seq("ACCOUNT"):
704                    scope_kind = "ACCOUNT"
705                elif self._match_set(self.DB_CREATABLES):
706                    scope_kind = self._prev.text.upper()
707                    if self._curr:
708                        scope = self._parse_table_parts()
709                elif self._curr:
710                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
711                    scope = self._parse_table_parts()
712
713            return self.expression(
714                exp.Show,
715                **{
716                    "terse": terse,
717                    "this": this,
718                    "history": history,
719                    "like": like,
720                    "scope": scope,
721                    "scope_kind": scope_kind,
722                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
723                    "limit": self._parse_limit(),
724                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
725                },
726            )
727
728        def _parse_location_property(self) -> exp.LocationProperty:
729            self._match(TokenType.EQ)
730            return self.expression(exp.LocationProperty, this=self._parse_location_path())
731
732        def _parse_file_location(self) -> t.Optional[exp.Expression]:
733            # Parse either a subquery or a staged file
734            return (
735                self._parse_select(table=True, parse_subquery_alias=False)
736                if self._match(TokenType.L_PAREN, advance=False)
737                else self._parse_table_parts()
738            )
739
740        def _parse_location_path(self) -> exp.Var:
741            parts = [self._advance_any(ignore_reserved=True)]
742
743            # We avoid consuming a comma token because external tables like @foo and @bar
744            # can be joined in a query with a comma separator, as well as closing paren
745            # in case of subqueries
746            while self._is_connected() and not self._match_set(
747                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
748            ):
749                parts.append(self._advance_any(ignore_reserved=True))
750
751            return exp.var("".join(part.text for part in parts if part))
752
753        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
754            this = super()._parse_lambda_arg()
755
756            if not this:
757                return this
758
759            typ = self._parse_types()
760
761            if typ:
762                return self.expression(exp.Cast, this=this, to=typ)
763
764            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.RANGE: 'RANGE'>, <TokenType.RING: 'RING'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.IS: 'IS'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.ANY: 'ANY'>, <TokenType.MAP: 'MAP'>, <TokenType.MERGE: 'MERGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.LEFT: 'LEFT'>, <TokenType.ASOF: 'ASOF'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.NAME: 'NAME'>, <TokenType.UINT256: 'UINT256'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.APPLY: 'APPLY'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.NEXT: 'NEXT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.END: 'END'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TAG: 'TAG'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.COPY: 'COPY'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.FULL: 'FULL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ROW: 'ROW'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.NULL: 'NULL'>, <TokenType.SET: 'SET'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.CUBE: 'CUBE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INET: 'INET'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CACHE: 'CACHE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.INT256: 'INT256'>, <TokenType.SEMI: 'SEMI'>, <TokenType.VIEW: 'VIEW'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.KILL: 'KILL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FIRST: 'FIRST'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.VAR: 'VAR'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DATE: 'DATE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.INDEX: 'INDEX'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TOP: 'TOP'>, <TokenType.ASC: 'ASC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TIME: 'TIME'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TRUE: 'TRUE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DIV: 'DIV'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.POINT: 'POINT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.LIST: 'LIST'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ROWS: 'ROWS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.BIT: 'BIT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.XML: 'XML'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.JSON: 'JSON'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TABLE: 'TABLE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TEXT: 'TEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.CASE: 'CASE'>, <TokenType.INT128: 'INT128'>, <TokenType.INT: 'INT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.USE: 'USE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SOME: 'SOME'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.CHAR: 'CHAR'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SHOW: 'SHOW'>, <TokenType.DESC: 'DESC'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATE32: 'DATE32'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.ALL: 'ALL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.SUPER: 'SUPER'>}
TABLE_ALIAS_TOKENS = {<TokenType.UINT: 'UINT'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.RANGE: 'RANGE'>, <TokenType.RING: 'RING'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.IS: 'IS'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.MAP: 'MAP'>, <TokenType.ANY: 'ANY'>, <TokenType.MERGE: 'MERGE'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.IPV4: 'IPV4'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.NAME: 'NAME'>, <TokenType.UINT256: 'UINT256'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.ENUM: 'ENUM'>, <TokenType.NEXT: 'NEXT'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SUPER: 'SUPER'>, <TokenType.END: 'END'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.TAG: 'TAG'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.MULTILINESTRING: 'MULTILINESTRING'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.LOAD: 'LOAD'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.COPY: 'COPY'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.KEEP: 'KEEP'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ROW: 'ROW'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.NULL: 'NULL'>, <TokenType.SET: 'SET'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.DELETE: 'DELETE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.CUBE: 'CUBE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.INET: 'INET'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.CACHE: 'CACHE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.INT256: 'INT256'>, <TokenType.SEMI: 'SEMI'>, <TokenType.VIEW: 'VIEW'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.KILL: 'KILL'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.FIRST: 'FIRST'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.VAR: 'VAR'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.DATE: 'DATE'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.INDEX: 'INDEX'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.TOP: 'TOP'>, <TokenType.ASC: 'ASC'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.TIME: 'TIME'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TRUE: 'TRUE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.DECIMAL64: 'DECIMAL64'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DIV: 'DIV'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.POINT: 'POINT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.ANTI: 'ANTI'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.LIST: 'LIST'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.ROWS: 'ROWS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.POLYGON: 'POLYGON'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.BIT: 'BIT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.XML: 'XML'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.JSONB: 'JSONB'>, <TokenType.JSON: 'JSON'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TABLE: 'TABLE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.MODEL: 'MODEL'>, <TokenType.DECIMAL32: 'DECIMAL32'>, <TokenType.TEXT: 'TEXT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.DECIMAL256: 'DECIMAL256'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.CASE: 'CASE'>, <TokenType.INT128: 'INT128'>, <TokenType.INT: 'INT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.USE: 'USE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.SOME: 'SOME'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.DECIMAL128: 'DECIMAL128'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.CHAR: 'CHAR'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.LINESTRING: 'LINESTRING'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SHOW: 'SHOW'>, <TokenType.DESC: 'DESC'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.MULTIPOLYGON: 'MULTIPOLYGON'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.DATE32: 'DATE32'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.ALL: 'ALL'>, <TokenType.RENAME: 'RENAME'>, <TokenType.UNIQUE: 'UNIQUE'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPLY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Apply'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <function Parser.<lambda>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <function Parser.<lambda>>, 'CHAR': <function Parser.<lambda>>, 'COALESCE': <function build_coalesce>, 'IFNULL': <function build_coalesce>, 'NVL': <function build_coalesce>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COLUMNS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Columns'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Contains'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <function Parser.<lambda>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK_ISO': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'ISODOW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeekIso'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exists'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'INLINE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Inline'>>, 'INT64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Int64'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExists'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'J_S_O_N_EXISTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExists'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'J_S_O_N_EXTRACT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractArray'>>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'J_S_O_N_VALUE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONValueArray'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAKE_INTERVAL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MakeInterval'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MEDIAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Median'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NORMALIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Normalize'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'OVERLAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Overlay'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_EXTRACT_ALL': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SPLIT_PART': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SplitPart'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.String'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_DOUBLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDouble'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDatetime'>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_SECONDS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixSeconds'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GEN_RANDOM_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'GENERATE_UUID': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'UUID_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Uuid'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'ARRAYAGG': <function Parser.<lambda>>, 'GLOB': <function Parser.<lambda>>, 'INSTR': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'EDITDISTANCE': <function Snowflake.Parser.<lambda>>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <function _build_regexp_extract.<locals>._builder>, 'REGEXP_SUBSTR_ALL': <function _build_regexp_extract.<locals>._builder>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TIMESTAMPNTZFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_NTZ_FROM_PARTS': <function build_timestamp_from_parts>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TRY_TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'NORMALIZE': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'OVERLAY': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.AT_GT: 'AT_GT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.LT_AT: 'LT_AT'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SWAP': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.GRANT: 'GRANT'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTRIBUTED': <function Parser.<lambda>>, 'DUPLICATE': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SECURITY': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'OBJECTS', 'UNIQUE KEYS', 'VIEWS', 'TABLES', 'SEQUENCES', 'IMPORTED KEYS'}
NON_TABLE_CREATABLES = {'STREAMLIT', 'TAG', 'STORAGE INTEGRATION', 'WAREHOUSE'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
PROCEDURE_OPTIONS
EXECUTE_AS_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
ODBC_DATETIME_LITERALS
ON_CONDITION_TOKENS
PRIVILEGE_FOLLOW_TOKENS
DESCRIBE_STYLES
OPERATION_MODIFIERS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
766    class Tokenizer(tokens.Tokenizer):
767        STRING_ESCAPES = ["\\", "'"]
768        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
769        RAW_STRINGS = ["$$"]
770        COMMENTS = ["--", "//", ("/*", "*/")]
771        NESTED_COMMENTS = False
772
773        KEYWORDS = {
774            **tokens.Tokenizer.KEYWORDS,
775            "BYTEINT": TokenType.INT,
776            "CHAR VARYING": TokenType.VARCHAR,
777            "CHARACTER VARYING": TokenType.VARCHAR,
778            "EXCLUDE": TokenType.EXCEPT,
779            "ILIKE ANY": TokenType.ILIKE_ANY,
780            "LIKE ANY": TokenType.LIKE_ANY,
781            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
782            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
783            "MINUS": TokenType.EXCEPT,
784            "NCHAR VARYING": TokenType.VARCHAR,
785            "PUT": TokenType.COMMAND,
786            "REMOVE": TokenType.COMMAND,
787            "RM": TokenType.COMMAND,
788            "SAMPLE": TokenType.TABLE_SAMPLE,
789            "SQL_DOUBLE": TokenType.DOUBLE,
790            "SQL_VARCHAR": TokenType.VARCHAR,
791            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
792            "TAG": TokenType.TAG,
793            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
794            "TOP": TokenType.TOP,
795            "WAREHOUSE": TokenType.WAREHOUSE,
796            "STREAMLIT": TokenType.STREAMLIT,
797        }
798        KEYWORDS.pop("/*+")
799
800        SINGLE_TOKENS = {
801            **tokens.Tokenizer.SINGLE_TOKENS,
802            "$": TokenType.PARAMETER,
803        }
804
805        VAR_SINGLE_TOKENS = {"$"}
806
807        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, '~~~': <TokenType.GLOB: 'GLOB'>, '~~': <TokenType.LIKE: 'LIKE'>, '~~*': <TokenType.ILIKE: 'ILIKE'>, '~*': <TokenType.IRLIKE: 'IRLIKE'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'UHUGEINT': <TokenType.UINT128: 'UINT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL32': <TokenType.DECIMAL32: 'DECIMAL32'>, 'DECIMAL64': <TokenType.DECIMAL64: 'DECIMAL64'>, 'DECIMAL128': <TokenType.DECIMAL128: 'DECIMAL128'>, 'DECIMAL256': <TokenType.DECIMAL256: 'DECIMAL256'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.GRANT: 'GRANT'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>, <TokenType.RENAME: 'RENAME'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 809    class Generator(generator.Generator):
 810        PARAMETER_TOKEN = "$"
 811        MATCHED_BY_SOURCE = False
 812        SINGLE_STRING_INTERVAL = True
 813        JOIN_HINTS = False
 814        TABLE_HINTS = False
 815        QUERY_HINTS = False
 816        AGGREGATE_FILTER_SUPPORTED = False
 817        SUPPORTS_TABLE_COPY = False
 818        COLLATE_IS_FUNC = True
 819        LIMIT_ONLY_LITERALS = True
 820        JSON_KEY_VALUE_PAIR_SEP = ","
 821        INSERT_OVERWRITE = " OVERWRITE INTO"
 822        STRUCT_DELIMITER = ("(", ")")
 823        COPY_PARAMS_ARE_WRAPPED = False
 824        COPY_PARAMS_EQ_REQUIRED = True
 825        STAR_EXCEPT = "EXCLUDE"
 826        SUPPORTS_EXPLODING_PROJECTIONS = False
 827        ARRAY_CONCAT_IS_VAR_LEN = False
 828        SUPPORTS_CONVERT_TIMEZONE = True
 829        EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
 830        SUPPORTS_MEDIAN = True
 831        ARRAY_SIZE_NAME = "ARRAY_SIZE"
 832
 833        TRANSFORMS = {
 834            **generator.Generator.TRANSFORMS,
 835            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 836            exp.ArgMax: rename_func("MAX_BY"),
 837            exp.ArgMin: rename_func("MIN_BY"),
 838            exp.Array: inline_array_sql,
 839            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 840            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 841            exp.AtTimeZone: lambda self, e: self.func(
 842                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 843            ),
 844            exp.BitwiseXor: rename_func("BITXOR"),
 845            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 846            exp.DateAdd: date_delta_sql("DATEADD"),
 847            exp.DateDiff: date_delta_sql("DATEDIFF"),
 848            exp.DatetimeAdd: date_delta_sql("TIMESTAMPADD"),
 849            exp.DatetimeDiff: timestampdiff_sql,
 850            exp.DateStrToDate: datestrtodate_sql,
 851            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 852            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 853            exp.DayOfYear: rename_func("DAYOFYEAR"),
 854            exp.Explode: rename_func("FLATTEN"),
 855            exp.Extract: rename_func("DATE_PART"),
 856            exp.FromTimeZone: lambda self, e: self.func(
 857                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 858            ),
 859            exp.GenerateSeries: lambda self, e: self.func(
 860                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 861            ),
 862            exp.GroupConcat: rename_func("LISTAGG"),
 863            exp.If: if_sql(name="IFF", false_value="NULL"),
 864            exp.JSONExtractArray: _json_extract_value_array_sql,
 865            exp.JSONExtractScalar: lambda self, e: self.func(
 866                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 867            ),
 868            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 869            exp.JSONPathRoot: lambda *_: "",
 870            exp.JSONValueArray: _json_extract_value_array_sql,
 871            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 872            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 873            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 874            exp.MakeInterval: no_make_interval_sql,
 875            exp.Max: max_or_greatest,
 876            exp.Min: min_or_least,
 877            exp.ParseJSON: lambda self, e: self.func(
 878                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 879            ),
 880            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 881            exp.PercentileCont: transforms.preprocess(
 882                [transforms.add_within_group_for_percentiles]
 883            ),
 884            exp.PercentileDisc: transforms.preprocess(
 885                [transforms.add_within_group_for_percentiles]
 886            ),
 887            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 888            exp.RegexpExtract: _regexpextract_sql,
 889            exp.RegexpExtractAll: _regexpextract_sql,
 890            exp.RegexpILike: _regexpilike_sql,
 891            exp.Rand: rename_func("RANDOM"),
 892            exp.Select: transforms.preprocess(
 893                [
 894                    transforms.eliminate_distinct_on,
 895                    transforms.explode_to_unnest(),
 896                    transforms.eliminate_semi_and_anti_joins,
 897                    _unnest_generate_date_array,
 898                ]
 899            ),
 900            exp.SafeDivide: lambda self, e: no_safe_divide_sql(self, e, "IFF"),
 901            exp.SHA: rename_func("SHA1"),
 902            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 903            exp.StartsWith: rename_func("STARTSWITH"),
 904            exp.StrPosition: lambda self, e: self.func(
 905                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 906            ),
 907            exp.StrToDate: lambda self, e: self.func("DATE", e.this, self.format_time(e)),
 908            exp.Stuff: rename_func("INSERT"),
 909            exp.TimeAdd: date_delta_sql("TIMEADD"),
 910            exp.Timestamp: no_timestamp_sql,
 911            exp.TimestampAdd: date_delta_sql("TIMESTAMPADD"),
 912            exp.TimestampDiff: lambda self, e: self.func(
 913                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 914            ),
 915            exp.TimestampTrunc: timestamptrunc_sql(),
 916            exp.TimeStrToTime: timestrtotime_sql,
 917            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 918            exp.ToArray: rename_func("TO_ARRAY"),
 919            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 920            exp.ToDouble: rename_func("TO_DOUBLE"),
 921            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 922            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 923            exp.TsOrDsToDate: lambda self, e: self.func(
 924                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 925            ),
 926            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 927            exp.Uuid: rename_func("UUID_STRING"),
 928            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 929            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 930            exp.Xor: rename_func("BOOLXOR"),
 931            exp.Levenshtein: unsupported_args("ins_cost", "del_cost", "sub_cost")(
 932                rename_func("EDITDISTANCE")
 933            ),
 934        }
 935
 936        SUPPORTED_JSON_PATH_PARTS = {
 937            exp.JSONPathKey,
 938            exp.JSONPathRoot,
 939            exp.JSONPathSubscript,
 940        }
 941
 942        TYPE_MAPPING = {
 943            **generator.Generator.TYPE_MAPPING,
 944            exp.DataType.Type.NESTED: "OBJECT",
 945            exp.DataType.Type.STRUCT: "OBJECT",
 946        }
 947
 948        PROPERTIES_LOCATION = {
 949            **generator.Generator.PROPERTIES_LOCATION,
 950            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 951            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 952        }
 953
 954        UNSUPPORTED_VALUES_EXPRESSIONS = {
 955            exp.Map,
 956            exp.StarMap,
 957            exp.Struct,
 958            exp.VarMap,
 959        }
 960
 961        def with_properties(self, properties: exp.Properties) -> str:
 962            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 963
 964        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 965            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 966                values_as_table = False
 967
 968            return super().values_sql(expression, values_as_table=values_as_table)
 969
 970        def datatype_sql(self, expression: exp.DataType) -> str:
 971            expressions = expression.expressions
 972            if (
 973                expressions
 974                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 975                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 976            ):
 977                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 978                return "OBJECT"
 979
 980            return super().datatype_sql(expression)
 981
 982        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 983            return self.func(
 984                "TO_NUMBER",
 985                expression.this,
 986                expression.args.get("format"),
 987                expression.args.get("precision"),
 988                expression.args.get("scale"),
 989            )
 990
 991        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 992            milli = expression.args.get("milli")
 993            if milli is not None:
 994                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 995                expression.set("nano", milli_to_nano)
 996
 997            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 998
 999        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1000            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1001                return self.func("TO_GEOGRAPHY", expression.this)
1002            if expression.is_type(exp.DataType.Type.GEOMETRY):
1003                return self.func("TO_GEOMETRY", expression.this)
1004
1005            return super().cast_sql(expression, safe_prefix=safe_prefix)
1006
1007        def trycast_sql(self, expression: exp.TryCast) -> str:
1008            value = expression.this
1009
1010            if value.type is None:
1011                from sqlglot.optimizer.annotate_types import annotate_types
1012
1013                value = annotate_types(value)
1014
1015            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1016                return super().trycast_sql(expression)
1017
1018            # TRY_CAST only works for string values in Snowflake
1019            return self.cast_sql(expression)
1020
1021        def log_sql(self, expression: exp.Log) -> str:
1022            if not expression.expression:
1023                return self.func("LN", expression.this)
1024
1025            return super().log_sql(expression)
1026
1027        def unnest_sql(self, expression: exp.Unnest) -> str:
1028            unnest_alias = expression.args.get("alias")
1029            offset = expression.args.get("offset")
1030
1031            columns = [
1032                exp.to_identifier("seq"),
1033                exp.to_identifier("key"),
1034                exp.to_identifier("path"),
1035                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1036                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1037                or exp.to_identifier("value"),
1038                exp.to_identifier("this"),
1039            ]
1040
1041            if unnest_alias:
1042                unnest_alias.set("columns", columns)
1043            else:
1044                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1045
1046            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1047            alias = self.sql(unnest_alias)
1048            alias = f" AS {alias}" if alias else ""
1049            return f"{explode}{alias}"
1050
1051        def show_sql(self, expression: exp.Show) -> str:
1052            terse = "TERSE " if expression.args.get("terse") else ""
1053            history = " HISTORY" if expression.args.get("history") else ""
1054            like = self.sql(expression, "like")
1055            like = f" LIKE {like}" if like else ""
1056
1057            scope = self.sql(expression, "scope")
1058            scope = f" {scope}" if scope else ""
1059
1060            scope_kind = self.sql(expression, "scope_kind")
1061            if scope_kind:
1062                scope_kind = f" IN {scope_kind}"
1063
1064            starts_with = self.sql(expression, "starts_with")
1065            if starts_with:
1066                starts_with = f" STARTS WITH {starts_with}"
1067
1068            limit = self.sql(expression, "limit")
1069
1070            from_ = self.sql(expression, "from")
1071            if from_:
1072                from_ = f" FROM {from_}"
1073
1074            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
1075
1076        def describe_sql(self, expression: exp.Describe) -> str:
1077            # Default to table if kind is unknown
1078            kind_value = expression.args.get("kind") or "TABLE"
1079            kind = f" {kind_value}" if kind_value else ""
1080            this = f" {self.sql(expression, 'this')}"
1081            expressions = self.expressions(expression, flat=True)
1082            expressions = f" {expressions}" if expressions else ""
1083            return f"DESCRIBE{kind}{this}{expressions}"
1084
1085        def generatedasidentitycolumnconstraint_sql(
1086            self, expression: exp.GeneratedAsIdentityColumnConstraint
1087        ) -> str:
1088            start = expression.args.get("start")
1089            start = f" START {start}" if start else ""
1090            increment = expression.args.get("increment")
1091            increment = f" INCREMENT {increment}" if increment else ""
1092            return f"AUTOINCREMENT{start}{increment}"
1093
1094        def cluster_sql(self, expression: exp.Cluster) -> str:
1095            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1096
1097        def struct_sql(self, expression: exp.Struct) -> str:
1098            keys = []
1099            values = []
1100
1101            for i, e in enumerate(expression.expressions):
1102                if isinstance(e, exp.PropertyEQ):
1103                    keys.append(
1104                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1105                    )
1106                    values.append(e.expression)
1107                else:
1108                    keys.append(exp.Literal.string(f"_{i}"))
1109                    values.append(e)
1110
1111            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1112
1113        @unsupported_args("weight", "accuracy")
1114        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1115            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1116
1117        def alterset_sql(self, expression: exp.AlterSet) -> str:
1118            exprs = self.expressions(expression, flat=True)
1119            exprs = f" {exprs}" if exprs else ""
1120            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1121            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1122            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1123            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1124            tag = self.expressions(expression, key="tag", flat=True)
1125            tag = f" TAG {tag}" if tag else ""
1126
1127            return f"SET{exprs}{file_format}{copy_options}{tag}"
1128
1129        def strtotime_sql(self, expression: exp.StrToTime):
1130            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1131            return self.func(
1132                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1133            )
1134
1135        def timestampsub_sql(self, expression: exp.TimestampSub):
1136            return self.sql(
1137                exp.TimestampAdd(
1138                    this=expression.this,
1139                    expression=expression.expression * -1,
1140                    unit=expression.unit,
1141                )
1142            )
1143
1144        def jsonextract_sql(self, expression: exp.JSONExtract):
1145            this = expression.this
1146
1147            # JSON strings are valid coming from other dialects such as BQ
1148            return self.func(
1149                "GET_PATH",
1150                exp.ParseJSON(this=this) if this.is_string else this,
1151                expression.expression,
1152            )
1153
1154        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1155            this = expression.this
1156
1157            if this.is_string:
1158                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1159
1160            return self.func("TO_CHAR", this, self.format_time(expression))

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_MEDIAN = True
ARRAY_SIZE_NAME = 'ARRAY_SIZE'
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContainsAll'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArrayOverlaps'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Except'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Intersect'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Int64'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SwapTable'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Union'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Uuid'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithProcedureOptions'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DatetimeDiff'>: <function timestampdiff_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtractArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONValueArray'>: <function _json_extract_value_array_sql>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.MakeInterval'>: <function no_make_interval_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpExtract'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpExtractAll'>: <function _regexpextract_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SafeDivide'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.Timestamp'>: <function no_timestamp_sql>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToDouble'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Levenshtein'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistributedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DuplicateKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SecurityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithProcedureOptions'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.StarMap'>, <class 'sqlglot.expressions.Map'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
961        def with_properties(self, properties: exp.Properties) -> str:
962            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
964        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
965            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
966                values_as_table = False
967
968            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
970        def datatype_sql(self, expression: exp.DataType) -> str:
971            expressions = expression.expressions
972            if (
973                expressions
974                and expression.is_type(*exp.DataType.STRUCT_TYPES)
975                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
976            ):
977                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
978                return "OBJECT"
979
980            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
982        def tonumber_sql(self, expression: exp.ToNumber) -> str:
983            return self.func(
984                "TO_NUMBER",
985                expression.this,
986                expression.args.get("format"),
987                expression.args.get("precision"),
988                expression.args.get("scale"),
989            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
991        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
992            milli = expression.args.get("milli")
993            if milli is not None:
994                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
995                expression.set("nano", milli_to_nano)
996
997            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def cast_sql( self, expression: sqlglot.expressions.Cast, safe_prefix: Optional[str] = None) -> str:
 999        def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
1000            if expression.is_type(exp.DataType.Type.GEOGRAPHY):
1001                return self.func("TO_GEOGRAPHY", expression.this)
1002            if expression.is_type(exp.DataType.Type.GEOMETRY):
1003                return self.func("TO_GEOMETRY", expression.this)
1004
1005            return super().cast_sql(expression, safe_prefix=safe_prefix)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
1007        def trycast_sql(self, expression: exp.TryCast) -> str:
1008            value = expression.this
1009
1010            if value.type is None:
1011                from sqlglot.optimizer.annotate_types import annotate_types
1012
1013                value = annotate_types(value)
1014
1015            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
1016                return super().trycast_sql(expression)
1017
1018            # TRY_CAST only works for string values in Snowflake
1019            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
1021        def log_sql(self, expression: exp.Log) -> str:
1022            if not expression.expression:
1023                return self.func("LN", expression.this)
1024
1025            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
1027        def unnest_sql(self, expression: exp.Unnest) -> str:
1028            unnest_alias = expression.args.get("alias")
1029            offset = expression.args.get("offset")
1030
1031            columns = [
1032                exp.to_identifier("seq"),
1033                exp.to_identifier("key"),
1034                exp.to_identifier("path"),
1035                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
1036                seq_get(unnest_alias.columns if unnest_alias else [], 0)
1037                or exp.to_identifier("value"),
1038                exp.to_identifier("this"),
1039            ]
1040
1041            if unnest_alias:
1042                unnest_alias.set("columns", columns)
1043            else:
1044                unnest_alias = exp.TableAlias(this="_u", columns=columns)
1045
1046            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
1047            alias = self.sql(unnest_alias)
1048            alias = f" AS {alias}" if alias else ""
1049            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
1051        def show_sql(self, expression: exp.Show) -> str:
1052            terse = "TERSE " if expression.args.get("terse") else ""
1053            history = " HISTORY" if expression.args.get("history") else ""
1054            like = self.sql(expression, "like")
1055            like = f" LIKE {like}" if like else ""
1056
1057            scope = self.sql(expression, "scope")
1058            scope = f" {scope}" if scope else ""
1059
1060            scope_kind = self.sql(expression, "scope_kind")
1061            if scope_kind:
1062                scope_kind = f" IN {scope_kind}"
1063
1064            starts_with = self.sql(expression, "starts_with")
1065            if starts_with:
1066                starts_with = f" STARTS WITH {starts_with}"
1067
1068            limit = self.sql(expression, "limit")
1069
1070            from_ = self.sql(expression, "from")
1071            if from_:
1072                from_ = f" FROM {from_}"
1073
1074            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1076        def describe_sql(self, expression: exp.Describe) -> str:
1077            # Default to table if kind is unknown
1078            kind_value = expression.args.get("kind") or "TABLE"
1079            kind = f" {kind_value}" if kind_value else ""
1080            this = f" {self.sql(expression, 'this')}"
1081            expressions = self.expressions(expression, flat=True)
1082            expressions = f" {expressions}" if expressions else ""
1083            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1085        def generatedasidentitycolumnconstraint_sql(
1086            self, expression: exp.GeneratedAsIdentityColumnConstraint
1087        ) -> str:
1088            start = expression.args.get("start")
1089            start = f" START {start}" if start else ""
1090            increment = expression.args.get("increment")
1091            increment = f" INCREMENT {increment}" if increment else ""
1092            return f"AUTOINCREMENT{start}{increment}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1094        def cluster_sql(self, expression: exp.Cluster) -> str:
1095            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1097        def struct_sql(self, expression: exp.Struct) -> str:
1098            keys = []
1099            values = []
1100
1101            for i, e in enumerate(expression.expressions):
1102                if isinstance(e, exp.PropertyEQ):
1103                    keys.append(
1104                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1105                    )
1106                    values.append(e.expression)
1107                else:
1108                    keys.append(exp.Literal.string(f"_{i}"))
1109                    values.append(e)
1110
1111            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
@unsupported_args('weight', 'accuracy')
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1113        @unsupported_args("weight", "accuracy")
1114        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1115            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1117        def alterset_sql(self, expression: exp.AlterSet) -> str:
1118            exprs = self.expressions(expression, flat=True)
1119            exprs = f" {exprs}" if exprs else ""
1120            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1121            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1122            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1123            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1124            tag = self.expressions(expression, key="tag", flat=True)
1125            tag = f" TAG {tag}" if tag else ""
1126
1127            return f"SET{exprs}{file_format}{copy_options}{tag}"
def strtotime_sql(self, expression: sqlglot.expressions.StrToTime):
1129        def strtotime_sql(self, expression: exp.StrToTime):
1130            safe_prefix = "TRY_" if expression.args.get("safe") else ""
1131            return self.func(
1132                f"{safe_prefix}TO_TIMESTAMP", expression.this, self.format_time(expression)
1133            )
def timestampsub_sql(self, expression: sqlglot.expressions.TimestampSub):
1135        def timestampsub_sql(self, expression: exp.TimestampSub):
1136            return self.sql(
1137                exp.TimestampAdd(
1138                    this=expression.this,
1139                    expression=expression.expression * -1,
1140                    unit=expression.unit,
1141                )
1142            )
def jsonextract_sql(self, expression: sqlglot.expressions.JSONExtract):
1144        def jsonextract_sql(self, expression: exp.JSONExtract):
1145            this = expression.this
1146
1147            # JSON strings are valid coming from other dialects such as BQ
1148            return self.func(
1149                "GET_PATH",
1150                exp.ParseJSON(this=this) if this.is_string else this,
1151                expression.expression,
1152            )
def timetostr_sql(self, expression: sqlglot.expressions.TimeToStr) -> str:
1154        def timetostr_sql(self, expression: exp.TimeToStr) -> str:
1155            this = expression.this
1156
1157            if this.is_string:
1158                this = exp.cast(this, exp.DataType.Type.TIMESTAMP)
1159
1160            return self.func("TO_CHAR", this, self.format_time(expression))
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
SUPPORTS_UNIX_SECONDS
PARSE_JSON_NAME
ARRAY_SIZE_DIM_REQUIRED
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
set_operation
set_operations
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
alterrename_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
duplicatekeyproperty_sql
distributedbyproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodatetime_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql
conditionalinsert_sql
multitableinserts_sql
oncondition_sql
jsonexists_sql
arrayagg_sql
apply_sql
grant_sql
grantprivilege_sql
grantprincipal_sql
columns_sql
overlay_sql
todouble_sql
string_sql
median_sql
overflowtruncatebehavior_sql
unixseconds_sql
arraysize_sql