Edit on GitHub

sqlglot.dialects.snowflake

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot.dialects.dialect import (
  7    Dialect,
  8    NormalizationStrategy,
  9    binary_from_function,
 10    date_delta_sql,
 11    date_trunc_to_time,
 12    datestrtodate_sql,
 13    format_time_lambda,
 14    if_sql,
 15    inline_array_sql,
 16    max_or_greatest,
 17    min_or_least,
 18    rename_func,
 19    timestamptrunc_sql,
 20    timestrtotime_sql,
 21    var_map_sql,
 22)
 23from sqlglot.expressions import Literal
 24from sqlglot.helper import seq_get
 25from sqlglot.tokens import TokenType
 26
 27if t.TYPE_CHECKING:
 28    from sqlglot._typing import E
 29
 30
 31def _check_int(s: str) -> bool:
 32    if s[0] in ("-", "+"):
 33        return s[1:].isdigit()
 34    return s.isdigit()
 35
 36
 37# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
 38def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
 39    if len(args) == 2:
 40        first_arg, second_arg = args
 41        if second_arg.is_string:
 42            # case: <string_expr> [ , <format> ]
 43            return format_time_lambda(exp.StrToTime, "snowflake")(args)
 44        return exp.UnixToTime(this=first_arg, scale=second_arg)
 45
 46    from sqlglot.optimizer.simplify import simplify_literals
 47
 48    # The first argument might be an expression like 40 * 365 * 86400, so we try to
 49    # reduce it using `simplify_literals` first and then check if it's a Literal.
 50    first_arg = seq_get(args, 0)
 51    if not isinstance(simplify_literals(first_arg, root=True), Literal):
 52        # case: <variant_expr> or other expressions such as columns
 53        return exp.TimeStrToTime.from_arg_list(args)
 54
 55    if first_arg.is_string:
 56        if _check_int(first_arg.this):
 57            # case: <integer>
 58            return exp.UnixToTime.from_arg_list(args)
 59
 60        # case: <date_expr>
 61        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
 62
 63    # case: <numeric_expr>
 64    return exp.UnixToTime.from_arg_list(args)
 65
 66
 67def _parse_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
 68    expression = parser.parse_var_map(args)
 69
 70    if isinstance(expression, exp.StarMap):
 71        return expression
 72
 73    return exp.Struct(
 74        expressions=[
 75            t.cast(exp.Condition, k).eq(v) for k, v in zip(expression.keys, expression.values)
 76        ]
 77    )
 78
 79
 80def _parse_datediff(args: t.List) -> exp.DateDiff:
 81    return exp.DateDiff(
 82        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
 83    )
 84
 85
 86# https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 87# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 88def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 89    this = self._parse_var() or self._parse_type()
 90
 91    if not this:
 92        return None
 93
 94    self._match(TokenType.COMMA)
 95    expression = self._parse_bitwise()
 96    this = _map_date_part(this)
 97    name = this.name.upper()
 98
 99    if name.startswith("EPOCH"):
100        if name == "EPOCH_MILLISECOND":
101            scale = 10**3
102        elif name == "EPOCH_MICROSECOND":
103            scale = 10**6
104        elif name == "EPOCH_NANOSECOND":
105            scale = 10**9
106        else:
107            scale = None
108
109        ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
110        to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
111
112        if scale:
113            to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
114
115        return to_unix
116
117    return self.expression(exp.Extract, this=this, expression=expression)
118
119
120# https://docs.snowflake.com/en/sql-reference/functions/div0
121def _div0_to_if(args: t.List) -> exp.If:
122    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
123    true = exp.Literal.number(0)
124    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
125    return exp.If(this=cond, true=true, false=false)
126
127
128# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
129def _zeroifnull_to_if(args: t.List) -> exp.If:
130    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
131    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
132
133
134# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
135def _nullifzero_to_if(args: t.List) -> exp.If:
136    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
137    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
138
139
140def _datatype_sql(self: Snowflake.Generator, expression: exp.DataType) -> str:
141    if expression.is_type("array"):
142        return "ARRAY"
143    elif expression.is_type("map"):
144        return "OBJECT"
145    return self.datatype_sql(expression)
146
147
148def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
149    flag = expression.text("flag")
150
151    if "i" not in flag:
152        flag += "i"
153
154    return self.func(
155        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
156    )
157
158
159def _parse_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
160    if len(args) == 3:
161        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
162    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
163
164
165def _parse_regexp_replace(args: t.List) -> exp.RegexpReplace:
166    regexp_replace = exp.RegexpReplace.from_arg_list(args)
167
168    if not regexp_replace.args.get("replacement"):
169        regexp_replace.set("replacement", exp.Literal.string(""))
170
171    return regexp_replace
172
173
174def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
175    def _parse(self: Snowflake.Parser) -> exp.Show:
176        return self._parse_show_snowflake(*args, **kwargs)
177
178    return _parse
179
180
181DATE_PART_MAPPING = {
182    "Y": "YEAR",
183    "YY": "YEAR",
184    "YYY": "YEAR",
185    "YYYY": "YEAR",
186    "YR": "YEAR",
187    "YEARS": "YEAR",
188    "YRS": "YEAR",
189    "MM": "MONTH",
190    "MON": "MONTH",
191    "MONS": "MONTH",
192    "MONTHS": "MONTH",
193    "D": "DAY",
194    "DD": "DAY",
195    "DAYS": "DAY",
196    "DAYOFMONTH": "DAY",
197    "WEEKDAY": "DAYOFWEEK",
198    "DOW": "DAYOFWEEK",
199    "DW": "DAYOFWEEK",
200    "WEEKDAY_ISO": "DAYOFWEEKISO",
201    "DOW_ISO": "DAYOFWEEKISO",
202    "DW_ISO": "DAYOFWEEKISO",
203    "YEARDAY": "DAYOFYEAR",
204    "DOY": "DAYOFYEAR",
205    "DY": "DAYOFYEAR",
206    "W": "WEEK",
207    "WK": "WEEK",
208    "WEEKOFYEAR": "WEEK",
209    "WOY": "WEEK",
210    "WY": "WEEK",
211    "WEEK_ISO": "WEEKISO",
212    "WEEKOFYEARISO": "WEEKISO",
213    "WEEKOFYEAR_ISO": "WEEKISO",
214    "Q": "QUARTER",
215    "QTR": "QUARTER",
216    "QTRS": "QUARTER",
217    "QUARTERS": "QUARTER",
218    "H": "HOUR",
219    "HH": "HOUR",
220    "HR": "HOUR",
221    "HOURS": "HOUR",
222    "HRS": "HOUR",
223    "M": "MINUTE",
224    "MI": "MINUTE",
225    "MIN": "MINUTE",
226    "MINUTES": "MINUTE",
227    "MINS": "MINUTE",
228    "S": "SECOND",
229    "SEC": "SECOND",
230    "SECONDS": "SECOND",
231    "SECS": "SECOND",
232    "MS": "MILLISECOND",
233    "MSEC": "MILLISECOND",
234    "MILLISECONDS": "MILLISECOND",
235    "US": "MICROSECOND",
236    "USEC": "MICROSECOND",
237    "MICROSECONDS": "MICROSECOND",
238    "NS": "NANOSECOND",
239    "NSEC": "NANOSECOND",
240    "NANOSEC": "NANOSECOND",
241    "NSECOND": "NANOSECOND",
242    "NSECONDS": "NANOSECOND",
243    "NANOSECS": "NANOSECOND",
244    "NSECONDS": "NANOSECOND",
245    "EPOCH": "EPOCH_SECOND",
246    "EPOCH_SECONDS": "EPOCH_SECOND",
247    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
248    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
249    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
250    "TZH": "TIMEZONE_HOUR",
251    "TZM": "TIMEZONE_MINUTE",
252}
253
254
255@t.overload
256def _map_date_part(part: exp.Expression) -> exp.Var:
257    pass
258
259
260@t.overload
261def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
262    pass
263
264
265def _map_date_part(part):
266    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
267    return exp.var(mapped) if mapped else part
268
269
270def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
271    trunc = date_trunc_to_time(args)
272    trunc.set("unit", _map_date_part(trunc.args["unit"]))
273    return trunc
274
275
276def _parse_colon_get_path(
277    self: parser.Parser, this: t.Optional[exp.Expression]
278) -> t.Optional[exp.Expression]:
279    while True:
280        path = self._parse_bitwise()
281
282        # The cast :: operator has a lower precedence than the extraction operator :, so
283        # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
284        if isinstance(path, exp.Cast):
285            target_type = path.to
286            path = path.this
287        else:
288            target_type = None
289
290        if isinstance(path, exp.Expression):
291            path = exp.Literal.string(path.sql(dialect="snowflake"))
292
293        # The extraction operator : is left-associative
294        this = self.expression(exp.GetPath, this=this, expression=path)
295
296        if target_type:
297            this = exp.cast(this, target_type)
298
299        if not self._match(TokenType.COLON):
300            break
301
302    return self._parse_range(this)
303
304
305def _parse_timestamp_from_parts(args: t.List) -> exp.Func:
306    if len(args) == 2:
307        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
308        # so we parse this into Anonymous for now instead of introducing complexity
309        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
310
311    return exp.TimestampFromParts.from_arg_list(args)
312
313
314def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
315    """
316    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
317    so we need to unqualify them.
318
319    Example:
320        >>> from sqlglot import parse_one
321        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
322        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
323        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
324    """
325    if isinstance(expression, exp.Pivot) and expression.unpivot:
326        expression = transforms.unqualify_columns(expression)
327
328    return expression
329
330
331class Snowflake(Dialect):
332    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
333    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
334    NULL_ORDERING = "nulls_are_large"
335    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
336    SUPPORTS_USER_DEFINED_TYPES = False
337    SUPPORTS_SEMI_ANTI_JOIN = False
338    PREFER_CTE_ALIAS_COLUMN = True
339    TABLESAMPLE_SIZE_IS_PERCENT = True
340
341    TIME_MAPPING = {
342        "YYYY": "%Y",
343        "yyyy": "%Y",
344        "YY": "%y",
345        "yy": "%y",
346        "MMMM": "%B",
347        "mmmm": "%B",
348        "MON": "%b",
349        "mon": "%b",
350        "MM": "%m",
351        "mm": "%m",
352        "DD": "%d",
353        "dd": "%-d",
354        "DY": "%a",
355        "dy": "%w",
356        "HH24": "%H",
357        "hh24": "%H",
358        "HH12": "%I",
359        "hh12": "%I",
360        "MI": "%M",
361        "mi": "%M",
362        "SS": "%S",
363        "ss": "%S",
364        "FF": "%f",
365        "ff": "%f",
366        "FF6": "%f",
367        "ff6": "%f",
368    }
369
370    def quote_identifier(self, expression: E, identify: bool = True) -> E:
371        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
372        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
373        if (
374            isinstance(expression, exp.Identifier)
375            and isinstance(expression.parent, exp.Table)
376            and expression.name.lower() == "dual"
377        ):
378            return expression  # type: ignore
379
380        return super().quote_identifier(expression, identify=identify)
381
382    class Parser(parser.Parser):
383        IDENTIFY_PIVOT_STRINGS = True
384
385        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
386
387        FUNCTIONS = {
388            **parser.Parser.FUNCTIONS,
389            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
390            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
391            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
392                this=seq_get(args, 1), expression=seq_get(args, 0)
393            ),
394            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
395                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
396                start=seq_get(args, 0),
397                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
398                step=seq_get(args, 2),
399            ),
400            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
401            "BITXOR": binary_from_function(exp.BitwiseXor),
402            "BIT_XOR": binary_from_function(exp.BitwiseXor),
403            "BOOLXOR": binary_from_function(exp.Xor),
404            "CONVERT_TIMEZONE": _parse_convert_timezone,
405            "DATE_TRUNC": _date_trunc_to_time,
406            "DATEADD": lambda args: exp.DateAdd(
407                this=seq_get(args, 2),
408                expression=seq_get(args, 1),
409                unit=_map_date_part(seq_get(args, 0)),
410            ),
411            "DATEDIFF": _parse_datediff,
412            "DIV0": _div0_to_if,
413            "FLATTEN": exp.Explode.from_arg_list,
414            "IFF": exp.If.from_arg_list,
415            "LAST_DAY": lambda args: exp.LastDay(
416                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
417            ),
418            "LISTAGG": exp.GroupConcat.from_arg_list,
419            "NULLIFZERO": _nullifzero_to_if,
420            "OBJECT_CONSTRUCT": _parse_object_construct,
421            "REGEXP_REPLACE": _parse_regexp_replace,
422            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
423            "RLIKE": exp.RegexpLike.from_arg_list,
424            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
425            "TIMEDIFF": _parse_datediff,
426            "TIMESTAMPDIFF": _parse_datediff,
427            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
428            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
429            "TO_TIMESTAMP": _parse_to_timestamp,
430            "TO_VARCHAR": exp.ToChar.from_arg_list,
431            "ZEROIFNULL": _zeroifnull_to_if,
432        }
433
434        FUNCTION_PARSERS = {
435            **parser.Parser.FUNCTION_PARSERS,
436            "DATE_PART": _parse_date_part,
437            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
438        }
439        FUNCTION_PARSERS.pop("TRIM")
440
441        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
442
443        RANGE_PARSERS = {
444            **parser.Parser.RANGE_PARSERS,
445            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
446            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
447            TokenType.COLON: _parse_colon_get_path,
448        }
449
450        ALTER_PARSERS = {
451            **parser.Parser.ALTER_PARSERS,
452            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
453            "UNSET": lambda self: self.expression(
454                exp.Set,
455                tag=self._match_text_seq("TAG"),
456                expressions=self._parse_csv(self._parse_id_var),
457                unset=True,
458            ),
459            "SWAP": lambda self: self._parse_alter_table_swap(),
460        }
461
462        STATEMENT_PARSERS = {
463            **parser.Parser.STATEMENT_PARSERS,
464            TokenType.SHOW: lambda self: self._parse_show(),
465        }
466
467        PROPERTY_PARSERS = {
468            **parser.Parser.PROPERTY_PARSERS,
469            "LOCATION": lambda self: self._parse_location(),
470        }
471
472        SHOW_PARSERS = {
473            "SCHEMAS": _show_parser("SCHEMAS"),
474            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
475            "OBJECTS": _show_parser("OBJECTS"),
476            "TERSE OBJECTS": _show_parser("OBJECTS"),
477            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
478            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
479            "COLUMNS": _show_parser("COLUMNS"),
480        }
481
482        STAGED_FILE_SINGLE_TOKENS = {
483            TokenType.DOT,
484            TokenType.MOD,
485            TokenType.SLASH,
486        }
487
488        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
489
490        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
491            if is_map:
492                # Keys are strings in Snowflake's objects, see also:
493                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
494                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
495                return self._parse_slice(self._parse_string())
496
497            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
498
499        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
500            lateral = super()._parse_lateral()
501            if not lateral:
502                return lateral
503
504            if isinstance(lateral.this, exp.Explode):
505                table_alias = lateral.args.get("alias")
506                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
507                if table_alias and not table_alias.args.get("columns"):
508                    table_alias.set("columns", columns)
509                elif not table_alias:
510                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
511
512            return lateral
513
514        def _parse_at_before(self, table: exp.Table) -> exp.Table:
515            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
516            index = self._index
517            if self._match_texts(("AT", "BEFORE")):
518                this = self._prev.text.upper()
519                kind = (
520                    self._match(TokenType.L_PAREN)
521                    and self._match_texts(self.HISTORICAL_DATA_KIND)
522                    and self._prev.text.upper()
523                )
524                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
525
526                if expression:
527                    self._match_r_paren()
528                    when = self.expression(
529                        exp.HistoricalData, this=this, kind=kind, expression=expression
530                    )
531                    table.set("when", when)
532                else:
533                    self._retreat(index)
534
535            return table
536
537        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
538            # https://docs.snowflake.com/en/user-guide/querying-stage
539            if self._match(TokenType.STRING, advance=False):
540                table = self._parse_string()
541            elif self._match_text_seq("@", advance=False):
542                table = self._parse_location_path()
543            else:
544                table = None
545
546            if table:
547                file_format = None
548                pattern = None
549
550                self._match(TokenType.L_PAREN)
551                while self._curr and not self._match(TokenType.R_PAREN):
552                    if self._match_text_seq("FILE_FORMAT", "=>"):
553                        file_format = self._parse_string() or super()._parse_table_parts()
554                    elif self._match_text_seq("PATTERN", "=>"):
555                        pattern = self._parse_string()
556                    else:
557                        break
558
559                    self._match(TokenType.COMMA)
560
561                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
562            else:
563                table = super()._parse_table_parts(schema=schema)
564
565            return self._parse_at_before(table)
566
567        def _parse_id_var(
568            self,
569            any_token: bool = True,
570            tokens: t.Optional[t.Collection[TokenType]] = None,
571        ) -> t.Optional[exp.Expression]:
572            if self._match_text_seq("IDENTIFIER", "("):
573                identifier = (
574                    super()._parse_id_var(any_token=any_token, tokens=tokens)
575                    or self._parse_string()
576                )
577                self._match_r_paren()
578                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
579
580            return super()._parse_id_var(any_token=any_token, tokens=tokens)
581
582        def _parse_show_snowflake(self, this: str) -> exp.Show:
583            scope = None
584            scope_kind = None
585
586            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
587            # which is syntactically valid but has no effect on the output
588            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
589
590            like = self._parse_string() if self._match(TokenType.LIKE) else None
591
592            if self._match(TokenType.IN):
593                if self._match_text_seq("ACCOUNT"):
594                    scope_kind = "ACCOUNT"
595                elif self._match_set(self.DB_CREATABLES):
596                    scope_kind = self._prev.text.upper()
597                    if self._curr:
598                        scope = self._parse_table_parts()
599                elif self._curr:
600                    scope_kind = "SCHEMA" if this == "OBJECTS" else "TABLE"
601                    scope = self._parse_table_parts()
602
603            return self.expression(
604                exp.Show,
605                **{
606                    "terse": terse,
607                    "this": this,
608                    "like": like,
609                    "scope": scope,
610                    "scope_kind": scope_kind,
611                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
612                    "limit": self._parse_limit(),
613                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
614                },
615            )
616
617        def _parse_alter_table_swap(self) -> exp.SwapTable:
618            self._match_text_seq("WITH")
619            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
620
621        def _parse_location(self) -> exp.LocationProperty:
622            self._match(TokenType.EQ)
623            return self.expression(exp.LocationProperty, this=self._parse_location_path())
624
625        def _parse_location_path(self) -> exp.Var:
626            parts = [self._advance_any(ignore_reserved=True)]
627
628            # We avoid consuming a comma token because external tables like @foo and @bar
629            # can be joined in a query with a comma separator.
630            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
631                parts.append(self._advance_any(ignore_reserved=True))
632
633            return exp.var("".join(part.text for part in parts if part))
634
635    class Tokenizer(tokens.Tokenizer):
636        STRING_ESCAPES = ["\\", "'"]
637        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
638        RAW_STRINGS = ["$$"]
639        COMMENTS = ["--", "//", ("/*", "*/")]
640
641        KEYWORDS = {
642            **tokens.Tokenizer.KEYWORDS,
643            "BYTEINT": TokenType.INT,
644            "CHAR VARYING": TokenType.VARCHAR,
645            "CHARACTER VARYING": TokenType.VARCHAR,
646            "EXCLUDE": TokenType.EXCEPT,
647            "ILIKE ANY": TokenType.ILIKE_ANY,
648            "LIKE ANY": TokenType.LIKE_ANY,
649            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
650            "MINUS": TokenType.EXCEPT,
651            "NCHAR VARYING": TokenType.VARCHAR,
652            "PUT": TokenType.COMMAND,
653            "REMOVE": TokenType.COMMAND,
654            "RENAME": TokenType.REPLACE,
655            "RM": TokenType.COMMAND,
656            "SAMPLE": TokenType.TABLE_SAMPLE,
657            "SQL_DOUBLE": TokenType.DOUBLE,
658            "SQL_VARCHAR": TokenType.VARCHAR,
659            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
660            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
661            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
662            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
663            "TOP": TokenType.TOP,
664        }
665
666        SINGLE_TOKENS = {
667            **tokens.Tokenizer.SINGLE_TOKENS,
668            "$": TokenType.PARAMETER,
669        }
670
671        VAR_SINGLE_TOKENS = {"$"}
672
673        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
674
675    class Generator(generator.Generator):
676        PARAMETER_TOKEN = "$"
677        MATCHED_BY_SOURCE = False
678        SINGLE_STRING_INTERVAL = True
679        JOIN_HINTS = False
680        TABLE_HINTS = False
681        QUERY_HINTS = False
682        AGGREGATE_FILTER_SUPPORTED = False
683        SUPPORTS_TABLE_COPY = False
684        COLLATE_IS_FUNC = True
685        LIMIT_ONLY_LITERALS = True
686        JSON_KEY_VALUE_PAIR_SEP = ","
687        INSERT_OVERWRITE = " OVERWRITE INTO"
688
689        TRANSFORMS = {
690            **generator.Generator.TRANSFORMS,
691            exp.ArgMax: rename_func("MAX_BY"),
692            exp.ArgMin: rename_func("MIN_BY"),
693            exp.Array: inline_array_sql,
694            exp.ArrayConcat: rename_func("ARRAY_CAT"),
695            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
696            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
697            exp.AtTimeZone: lambda self, e: self.func(
698                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
699            ),
700            exp.BitwiseXor: rename_func("BITXOR"),
701            exp.DateAdd: date_delta_sql("DATEADD"),
702            exp.DateDiff: date_delta_sql("DATEDIFF"),
703            exp.DateStrToDate: datestrtodate_sql,
704            exp.DataType: _datatype_sql,
705            exp.DayOfMonth: rename_func("DAYOFMONTH"),
706            exp.DayOfWeek: rename_func("DAYOFWEEK"),
707            exp.DayOfYear: rename_func("DAYOFYEAR"),
708            exp.Explode: rename_func("FLATTEN"),
709            exp.Extract: rename_func("DATE_PART"),
710            exp.FromTimeZone: lambda self, e: self.func(
711                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
712            ),
713            exp.GenerateSeries: lambda self, e: self.func(
714                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
715            ),
716            exp.GroupConcat: rename_func("LISTAGG"),
717            exp.If: if_sql(name="IFF", false_value="NULL"),
718            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
719            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
720            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
721            exp.LogicalOr: rename_func("BOOLOR_AGG"),
722            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
723            exp.Max: max_or_greatest,
724            exp.Min: min_or_least,
725            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
726            exp.PercentileCont: transforms.preprocess(
727                [transforms.add_within_group_for_percentiles]
728            ),
729            exp.PercentileDisc: transforms.preprocess(
730                [transforms.add_within_group_for_percentiles]
731            ),
732            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
733            exp.RegexpILike: _regexpilike_sql,
734            exp.Rand: rename_func("RANDOM"),
735            exp.Select: transforms.preprocess(
736                [
737                    transforms.eliminate_distinct_on,
738                    transforms.explode_to_unnest(),
739                    transforms.eliminate_semi_and_anti_joins,
740                ]
741            ),
742            exp.SHA: rename_func("SHA1"),
743            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
744            exp.StartsWith: rename_func("STARTSWITH"),
745            exp.StrPosition: lambda self, e: self.func(
746                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
747            ),
748            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
749            exp.Struct: lambda self, e: self.func(
750                "OBJECT_CONSTRUCT",
751                *(arg for expression in e.expressions for arg in expression.flatten()),
752            ),
753            exp.Stuff: rename_func("INSERT"),
754            exp.TimestampDiff: lambda self, e: self.func(
755                "TIMESTAMPDIFF", e.unit, e.expression, e.this
756            ),
757            exp.TimestampTrunc: timestamptrunc_sql,
758            exp.TimeStrToTime: timestrtotime_sql,
759            exp.TimeToStr: lambda self, e: self.func(
760                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
761            ),
762            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
763            exp.ToArray: rename_func("TO_ARRAY"),
764            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
765            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
766            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
767            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
768            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
769            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
770            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
771            exp.Xor: rename_func("BOOLXOR"),
772        }
773
774        TYPE_MAPPING = {
775            **generator.Generator.TYPE_MAPPING,
776            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
777        }
778
779        STAR_MAPPING = {
780            "except": "EXCLUDE",
781            "replace": "RENAME",
782        }
783
784        PROPERTIES_LOCATION = {
785            **generator.Generator.PROPERTIES_LOCATION,
786            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
787            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
788        }
789
790        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
791            milli = expression.args.get("milli")
792            if milli is not None:
793                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
794                expression.set("nano", milli_to_nano)
795
796            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
797
798        def trycast_sql(self, expression: exp.TryCast) -> str:
799            value = expression.this
800
801            if value.type is None:
802                from sqlglot.optimizer.annotate_types import annotate_types
803
804                value = annotate_types(value)
805
806            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
807                return super().trycast_sql(expression)
808
809            # TRY_CAST only works for string values in Snowflake
810            return self.cast_sql(expression)
811
812        def log_sql(self, expression: exp.Log) -> str:
813            if not expression.expression:
814                return self.func("LN", expression.this)
815
816            return super().log_sql(expression)
817
818        def unnest_sql(self, expression: exp.Unnest) -> str:
819            unnest_alias = expression.args.get("alias")
820            offset = expression.args.get("offset")
821
822            columns = [
823                exp.to_identifier("seq"),
824                exp.to_identifier("key"),
825                exp.to_identifier("path"),
826                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
827                seq_get(unnest_alias.columns if unnest_alias else [], 0)
828                or exp.to_identifier("value"),
829                exp.to_identifier("this"),
830            ]
831
832            if unnest_alias:
833                unnest_alias.set("columns", columns)
834            else:
835                unnest_alias = exp.TableAlias(this="_u", columns=columns)
836
837            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
838            alias = self.sql(unnest_alias)
839            alias = f" AS {alias}" if alias else ""
840            return f"{explode}{alias}"
841
842        def show_sql(self, expression: exp.Show) -> str:
843            terse = "TERSE " if expression.args.get("terse") else ""
844            like = self.sql(expression, "like")
845            like = f" LIKE {like}" if like else ""
846
847            scope = self.sql(expression, "scope")
848            scope = f" {scope}" if scope else ""
849
850            scope_kind = self.sql(expression, "scope_kind")
851            if scope_kind:
852                scope_kind = f" IN {scope_kind}"
853
854            starts_with = self.sql(expression, "starts_with")
855            if starts_with:
856                starts_with = f" STARTS WITH {starts_with}"
857
858            limit = self.sql(expression, "limit")
859
860            from_ = self.sql(expression, "from")
861            if from_:
862                from_ = f" FROM {from_}"
863
864            return (
865                f"SHOW {terse}{expression.name}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
866            )
867
868        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
869            # Other dialects don't support all of the following parameters, so we need to
870            # generate default values as necessary to ensure the transpilation is correct
871            group = expression.args.get("group")
872            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
873            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
874            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
875
876            return self.func(
877                "REGEXP_SUBSTR",
878                expression.this,
879                expression.expression,
880                position,
881                occurrence,
882                parameters,
883                group,
884            )
885
886        def except_op(self, expression: exp.Except) -> str:
887            if not expression.args.get("distinct", False):
888                self.unsupported("EXCEPT with All is not supported in Snowflake")
889            return super().except_op(expression)
890
891        def intersect_op(self, expression: exp.Intersect) -> str:
892            if not expression.args.get("distinct", False):
893                self.unsupported("INTERSECT with All is not supported in Snowflake")
894            return super().intersect_op(expression)
895
896        def describe_sql(self, expression: exp.Describe) -> str:
897            # Default to table if kind is unknown
898            kind_value = expression.args.get("kind") or "TABLE"
899            kind = f" {kind_value}" if kind_value else ""
900            this = f" {self.sql(expression, 'this')}"
901            expressions = self.expressions(expression, flat=True)
902            expressions = f" {expressions}" if expressions else ""
903            return f"DESCRIBE{kind}{this}{expressions}"
904
905        def generatedasidentitycolumnconstraint_sql(
906            self, expression: exp.GeneratedAsIdentityColumnConstraint
907        ) -> str:
908            start = expression.args.get("start")
909            start = f" START {start}" if start else ""
910            increment = expression.args.get("increment")
911            increment = f" INCREMENT {increment}" if increment else ""
912            return f"AUTOINCREMENT{start}{increment}"
913
914        def swaptable_sql(self, expression: exp.SwapTable) -> str:
915            this = self.sql(expression, "this")
916            return f"SWAP WITH {this}"
917
918        def with_properties(self, properties: exp.Properties) -> str:
919            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
920
921        def cluster_sql(self, expression: exp.Cluster) -> str:
922            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
332class Snowflake(Dialect):
333    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
334    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
335    NULL_ORDERING = "nulls_are_large"
336    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
337    SUPPORTS_USER_DEFINED_TYPES = False
338    SUPPORTS_SEMI_ANTI_JOIN = False
339    PREFER_CTE_ALIAS_COLUMN = True
340    TABLESAMPLE_SIZE_IS_PERCENT = True
341
342    TIME_MAPPING = {
343        "YYYY": "%Y",
344        "yyyy": "%Y",
345        "YY": "%y",
346        "yy": "%y",
347        "MMMM": "%B",
348        "mmmm": "%B",
349        "MON": "%b",
350        "mon": "%b",
351        "MM": "%m",
352        "mm": "%m",
353        "DD": "%d",
354        "dd": "%-d",
355        "DY": "%a",
356        "dy": "%w",
357        "HH24": "%H",
358        "hh24": "%H",
359        "HH12": "%I",
360        "hh12": "%I",
361        "MI": "%M",
362        "mi": "%M",
363        "SS": "%S",
364        "ss": "%S",
365        "FF": "%f",
366        "ff": "%f",
367        "FF6": "%f",
368        "ff6": "%f",
369    }
370
371    def quote_identifier(self, expression: E, identify: bool = True) -> E:
372        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
373        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
374        if (
375            isinstance(expression, exp.Identifier)
376            and isinstance(expression.parent, exp.Table)
377            and expression.name.lower() == "dual"
378        ):
379            return expression  # type: ignore
380
381        return super().quote_identifier(expression, identify=identify)
382
383    class Parser(parser.Parser):
384        IDENTIFY_PIVOT_STRINGS = True
385
386        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
387
388        FUNCTIONS = {
389            **parser.Parser.FUNCTIONS,
390            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
391            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
392            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
393                this=seq_get(args, 1), expression=seq_get(args, 0)
394            ),
395            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
396                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
397                start=seq_get(args, 0),
398                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
399                step=seq_get(args, 2),
400            ),
401            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
402            "BITXOR": binary_from_function(exp.BitwiseXor),
403            "BIT_XOR": binary_from_function(exp.BitwiseXor),
404            "BOOLXOR": binary_from_function(exp.Xor),
405            "CONVERT_TIMEZONE": _parse_convert_timezone,
406            "DATE_TRUNC": _date_trunc_to_time,
407            "DATEADD": lambda args: exp.DateAdd(
408                this=seq_get(args, 2),
409                expression=seq_get(args, 1),
410                unit=_map_date_part(seq_get(args, 0)),
411            ),
412            "DATEDIFF": _parse_datediff,
413            "DIV0": _div0_to_if,
414            "FLATTEN": exp.Explode.from_arg_list,
415            "IFF": exp.If.from_arg_list,
416            "LAST_DAY": lambda args: exp.LastDay(
417                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
418            ),
419            "LISTAGG": exp.GroupConcat.from_arg_list,
420            "NULLIFZERO": _nullifzero_to_if,
421            "OBJECT_CONSTRUCT": _parse_object_construct,
422            "REGEXP_REPLACE": _parse_regexp_replace,
423            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
424            "RLIKE": exp.RegexpLike.from_arg_list,
425            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
426            "TIMEDIFF": _parse_datediff,
427            "TIMESTAMPDIFF": _parse_datediff,
428            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
429            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
430            "TO_TIMESTAMP": _parse_to_timestamp,
431            "TO_VARCHAR": exp.ToChar.from_arg_list,
432            "ZEROIFNULL": _zeroifnull_to_if,
433        }
434
435        FUNCTION_PARSERS = {
436            **parser.Parser.FUNCTION_PARSERS,
437            "DATE_PART": _parse_date_part,
438            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
439        }
440        FUNCTION_PARSERS.pop("TRIM")
441
442        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
443
444        RANGE_PARSERS = {
445            **parser.Parser.RANGE_PARSERS,
446            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
447            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
448            TokenType.COLON: _parse_colon_get_path,
449        }
450
451        ALTER_PARSERS = {
452            **parser.Parser.ALTER_PARSERS,
453            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
454            "UNSET": lambda self: self.expression(
455                exp.Set,
456                tag=self._match_text_seq("TAG"),
457                expressions=self._parse_csv(self._parse_id_var),
458                unset=True,
459            ),
460            "SWAP": lambda self: self._parse_alter_table_swap(),
461        }
462
463        STATEMENT_PARSERS = {
464            **parser.Parser.STATEMENT_PARSERS,
465            TokenType.SHOW: lambda self: self._parse_show(),
466        }
467
468        PROPERTY_PARSERS = {
469            **parser.Parser.PROPERTY_PARSERS,
470            "LOCATION": lambda self: self._parse_location(),
471        }
472
473        SHOW_PARSERS = {
474            "SCHEMAS": _show_parser("SCHEMAS"),
475            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
476            "OBJECTS": _show_parser("OBJECTS"),
477            "TERSE OBJECTS": _show_parser("OBJECTS"),
478            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
479            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
480            "COLUMNS": _show_parser("COLUMNS"),
481        }
482
483        STAGED_FILE_SINGLE_TOKENS = {
484            TokenType.DOT,
485            TokenType.MOD,
486            TokenType.SLASH,
487        }
488
489        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
490
491        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
492            if is_map:
493                # Keys are strings in Snowflake's objects, see also:
494                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
495                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
496                return self._parse_slice(self._parse_string())
497
498            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
499
500        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
501            lateral = super()._parse_lateral()
502            if not lateral:
503                return lateral
504
505            if isinstance(lateral.this, exp.Explode):
506                table_alias = lateral.args.get("alias")
507                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
508                if table_alias and not table_alias.args.get("columns"):
509                    table_alias.set("columns", columns)
510                elif not table_alias:
511                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
512
513            return lateral
514
515        def _parse_at_before(self, table: exp.Table) -> exp.Table:
516            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
517            index = self._index
518            if self._match_texts(("AT", "BEFORE")):
519                this = self._prev.text.upper()
520                kind = (
521                    self._match(TokenType.L_PAREN)
522                    and self._match_texts(self.HISTORICAL_DATA_KIND)
523                    and self._prev.text.upper()
524                )
525                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
526
527                if expression:
528                    self._match_r_paren()
529                    when = self.expression(
530                        exp.HistoricalData, this=this, kind=kind, expression=expression
531                    )
532                    table.set("when", when)
533                else:
534                    self._retreat(index)
535
536            return table
537
538        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
539            # https://docs.snowflake.com/en/user-guide/querying-stage
540            if self._match(TokenType.STRING, advance=False):
541                table = self._parse_string()
542            elif self._match_text_seq("@", advance=False):
543                table = self._parse_location_path()
544            else:
545                table = None
546
547            if table:
548                file_format = None
549                pattern = None
550
551                self._match(TokenType.L_PAREN)
552                while self._curr and not self._match(TokenType.R_PAREN):
553                    if self._match_text_seq("FILE_FORMAT", "=>"):
554                        file_format = self._parse_string() or super()._parse_table_parts()
555                    elif self._match_text_seq("PATTERN", "=>"):
556                        pattern = self._parse_string()
557                    else:
558                        break
559
560                    self._match(TokenType.COMMA)
561
562                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
563            else:
564                table = super()._parse_table_parts(schema=schema)
565
566            return self._parse_at_before(table)
567
568        def _parse_id_var(
569            self,
570            any_token: bool = True,
571            tokens: t.Optional[t.Collection[TokenType]] = None,
572        ) -> t.Optional[exp.Expression]:
573            if self._match_text_seq("IDENTIFIER", "("):
574                identifier = (
575                    super()._parse_id_var(any_token=any_token, tokens=tokens)
576                    or self._parse_string()
577                )
578                self._match_r_paren()
579                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
580
581            return super()._parse_id_var(any_token=any_token, tokens=tokens)
582
583        def _parse_show_snowflake(self, this: str) -> exp.Show:
584            scope = None
585            scope_kind = None
586
587            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
588            # which is syntactically valid but has no effect on the output
589            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
590
591            like = self._parse_string() if self._match(TokenType.LIKE) else None
592
593            if self._match(TokenType.IN):
594                if self._match_text_seq("ACCOUNT"):
595                    scope_kind = "ACCOUNT"
596                elif self._match_set(self.DB_CREATABLES):
597                    scope_kind = self._prev.text.upper()
598                    if self._curr:
599                        scope = self._parse_table_parts()
600                elif self._curr:
601                    scope_kind = "SCHEMA" if this == "OBJECTS" else "TABLE"
602                    scope = self._parse_table_parts()
603
604            return self.expression(
605                exp.Show,
606                **{
607                    "terse": terse,
608                    "this": this,
609                    "like": like,
610                    "scope": scope,
611                    "scope_kind": scope_kind,
612                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
613                    "limit": self._parse_limit(),
614                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
615                },
616            )
617
618        def _parse_alter_table_swap(self) -> exp.SwapTable:
619            self._match_text_seq("WITH")
620            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
621
622        def _parse_location(self) -> exp.LocationProperty:
623            self._match(TokenType.EQ)
624            return self.expression(exp.LocationProperty, this=self._parse_location_path())
625
626        def _parse_location_path(self) -> exp.Var:
627            parts = [self._advance_any(ignore_reserved=True)]
628
629            # We avoid consuming a comma token because external tables like @foo and @bar
630            # can be joined in a query with a comma separator.
631            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
632                parts.append(self._advance_any(ignore_reserved=True))
633
634            return exp.var("".join(part.text for part in parts if part))
635
636    class Tokenizer(tokens.Tokenizer):
637        STRING_ESCAPES = ["\\", "'"]
638        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
639        RAW_STRINGS = ["$$"]
640        COMMENTS = ["--", "//", ("/*", "*/")]
641
642        KEYWORDS = {
643            **tokens.Tokenizer.KEYWORDS,
644            "BYTEINT": TokenType.INT,
645            "CHAR VARYING": TokenType.VARCHAR,
646            "CHARACTER VARYING": TokenType.VARCHAR,
647            "EXCLUDE": TokenType.EXCEPT,
648            "ILIKE ANY": TokenType.ILIKE_ANY,
649            "LIKE ANY": TokenType.LIKE_ANY,
650            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
651            "MINUS": TokenType.EXCEPT,
652            "NCHAR VARYING": TokenType.VARCHAR,
653            "PUT": TokenType.COMMAND,
654            "REMOVE": TokenType.COMMAND,
655            "RENAME": TokenType.REPLACE,
656            "RM": TokenType.COMMAND,
657            "SAMPLE": TokenType.TABLE_SAMPLE,
658            "SQL_DOUBLE": TokenType.DOUBLE,
659            "SQL_VARCHAR": TokenType.VARCHAR,
660            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
661            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
662            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
663            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
664            "TOP": TokenType.TOP,
665        }
666
667        SINGLE_TOKENS = {
668            **tokens.Tokenizer.SINGLE_TOKENS,
669            "$": TokenType.PARAMETER,
670        }
671
672        VAR_SINGLE_TOKENS = {"$"}
673
674        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
675
676    class Generator(generator.Generator):
677        PARAMETER_TOKEN = "$"
678        MATCHED_BY_SOURCE = False
679        SINGLE_STRING_INTERVAL = True
680        JOIN_HINTS = False
681        TABLE_HINTS = False
682        QUERY_HINTS = False
683        AGGREGATE_FILTER_SUPPORTED = False
684        SUPPORTS_TABLE_COPY = False
685        COLLATE_IS_FUNC = True
686        LIMIT_ONLY_LITERALS = True
687        JSON_KEY_VALUE_PAIR_SEP = ","
688        INSERT_OVERWRITE = " OVERWRITE INTO"
689
690        TRANSFORMS = {
691            **generator.Generator.TRANSFORMS,
692            exp.ArgMax: rename_func("MAX_BY"),
693            exp.ArgMin: rename_func("MIN_BY"),
694            exp.Array: inline_array_sql,
695            exp.ArrayConcat: rename_func("ARRAY_CAT"),
696            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
697            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
698            exp.AtTimeZone: lambda self, e: self.func(
699                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
700            ),
701            exp.BitwiseXor: rename_func("BITXOR"),
702            exp.DateAdd: date_delta_sql("DATEADD"),
703            exp.DateDiff: date_delta_sql("DATEDIFF"),
704            exp.DateStrToDate: datestrtodate_sql,
705            exp.DataType: _datatype_sql,
706            exp.DayOfMonth: rename_func("DAYOFMONTH"),
707            exp.DayOfWeek: rename_func("DAYOFWEEK"),
708            exp.DayOfYear: rename_func("DAYOFYEAR"),
709            exp.Explode: rename_func("FLATTEN"),
710            exp.Extract: rename_func("DATE_PART"),
711            exp.FromTimeZone: lambda self, e: self.func(
712                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
713            ),
714            exp.GenerateSeries: lambda self, e: self.func(
715                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
716            ),
717            exp.GroupConcat: rename_func("LISTAGG"),
718            exp.If: if_sql(name="IFF", false_value="NULL"),
719            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
720            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
721            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
722            exp.LogicalOr: rename_func("BOOLOR_AGG"),
723            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
724            exp.Max: max_or_greatest,
725            exp.Min: min_or_least,
726            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
727            exp.PercentileCont: transforms.preprocess(
728                [transforms.add_within_group_for_percentiles]
729            ),
730            exp.PercentileDisc: transforms.preprocess(
731                [transforms.add_within_group_for_percentiles]
732            ),
733            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
734            exp.RegexpILike: _regexpilike_sql,
735            exp.Rand: rename_func("RANDOM"),
736            exp.Select: transforms.preprocess(
737                [
738                    transforms.eliminate_distinct_on,
739                    transforms.explode_to_unnest(),
740                    transforms.eliminate_semi_and_anti_joins,
741                ]
742            ),
743            exp.SHA: rename_func("SHA1"),
744            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
745            exp.StartsWith: rename_func("STARTSWITH"),
746            exp.StrPosition: lambda self, e: self.func(
747                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
748            ),
749            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
750            exp.Struct: lambda self, e: self.func(
751                "OBJECT_CONSTRUCT",
752                *(arg for expression in e.expressions for arg in expression.flatten()),
753            ),
754            exp.Stuff: rename_func("INSERT"),
755            exp.TimestampDiff: lambda self, e: self.func(
756                "TIMESTAMPDIFF", e.unit, e.expression, e.this
757            ),
758            exp.TimestampTrunc: timestamptrunc_sql,
759            exp.TimeStrToTime: timestrtotime_sql,
760            exp.TimeToStr: lambda self, e: self.func(
761                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
762            ),
763            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
764            exp.ToArray: rename_func("TO_ARRAY"),
765            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
766            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
767            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
768            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
769            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
770            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
771            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
772            exp.Xor: rename_func("BOOLXOR"),
773        }
774
775        TYPE_MAPPING = {
776            **generator.Generator.TYPE_MAPPING,
777            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
778        }
779
780        STAR_MAPPING = {
781            "except": "EXCLUDE",
782            "replace": "RENAME",
783        }
784
785        PROPERTIES_LOCATION = {
786            **generator.Generator.PROPERTIES_LOCATION,
787            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
788            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
789        }
790
791        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
792            milli = expression.args.get("milli")
793            if milli is not None:
794                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
795                expression.set("nano", milli_to_nano)
796
797            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
798
799        def trycast_sql(self, expression: exp.TryCast) -> str:
800            value = expression.this
801
802            if value.type is None:
803                from sqlglot.optimizer.annotate_types import annotate_types
804
805                value = annotate_types(value)
806
807            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
808                return super().trycast_sql(expression)
809
810            # TRY_CAST only works for string values in Snowflake
811            return self.cast_sql(expression)
812
813        def log_sql(self, expression: exp.Log) -> str:
814            if not expression.expression:
815                return self.func("LN", expression.this)
816
817            return super().log_sql(expression)
818
819        def unnest_sql(self, expression: exp.Unnest) -> str:
820            unnest_alias = expression.args.get("alias")
821            offset = expression.args.get("offset")
822
823            columns = [
824                exp.to_identifier("seq"),
825                exp.to_identifier("key"),
826                exp.to_identifier("path"),
827                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
828                seq_get(unnest_alias.columns if unnest_alias else [], 0)
829                or exp.to_identifier("value"),
830                exp.to_identifier("this"),
831            ]
832
833            if unnest_alias:
834                unnest_alias.set("columns", columns)
835            else:
836                unnest_alias = exp.TableAlias(this="_u", columns=columns)
837
838            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
839            alias = self.sql(unnest_alias)
840            alias = f" AS {alias}" if alias else ""
841            return f"{explode}{alias}"
842
843        def show_sql(self, expression: exp.Show) -> str:
844            terse = "TERSE " if expression.args.get("terse") else ""
845            like = self.sql(expression, "like")
846            like = f" LIKE {like}" if like else ""
847
848            scope = self.sql(expression, "scope")
849            scope = f" {scope}" if scope else ""
850
851            scope_kind = self.sql(expression, "scope_kind")
852            if scope_kind:
853                scope_kind = f" IN {scope_kind}"
854
855            starts_with = self.sql(expression, "starts_with")
856            if starts_with:
857                starts_with = f" STARTS WITH {starts_with}"
858
859            limit = self.sql(expression, "limit")
860
861            from_ = self.sql(expression, "from")
862            if from_:
863                from_ = f" FROM {from_}"
864
865            return (
866                f"SHOW {terse}{expression.name}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
867            )
868
869        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
870            # Other dialects don't support all of the following parameters, so we need to
871            # generate default values as necessary to ensure the transpilation is correct
872            group = expression.args.get("group")
873            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
874            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
875            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
876
877            return self.func(
878                "REGEXP_SUBSTR",
879                expression.this,
880                expression.expression,
881                position,
882                occurrence,
883                parameters,
884                group,
885            )
886
887        def except_op(self, expression: exp.Except) -> str:
888            if not expression.args.get("distinct", False):
889                self.unsupported("EXCEPT with All is not supported in Snowflake")
890            return super().except_op(expression)
891
892        def intersect_op(self, expression: exp.Intersect) -> str:
893            if not expression.args.get("distinct", False):
894                self.unsupported("INTERSECT with All is not supported in Snowflake")
895            return super().intersect_op(expression)
896
897        def describe_sql(self, expression: exp.Describe) -> str:
898            # Default to table if kind is unknown
899            kind_value = expression.args.get("kind") or "TABLE"
900            kind = f" {kind_value}" if kind_value else ""
901            this = f" {self.sql(expression, 'this')}"
902            expressions = self.expressions(expression, flat=True)
903            expressions = f" {expressions}" if expressions else ""
904            return f"DESCRIBE{kind}{this}{expressions}"
905
906        def generatedasidentitycolumnconstraint_sql(
907            self, expression: exp.GeneratedAsIdentityColumnConstraint
908        ) -> str:
909            start = expression.args.get("start")
910            start = f" START {start}" if start else ""
911            increment = expression.args.get("increment")
912            increment = f" INCREMENT {increment}" if increment else ""
913            return f"AUTOINCREMENT{start}{increment}"
914
915        def swaptable_sql(self, expression: exp.SwapTable) -> str:
916            this = self.sql(expression, "this")
917            return f"SWAP WITH {this}"
918
919        def with_properties(self, properties: exp.Properties) -> str:
920            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
921
922        def cluster_sql(self, expression: exp.Cluster) -> str:
923            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Indicates the default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Determines whether or not user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Determines whether or not SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Determines whether or not a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime format.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
371    def quote_identifier(self, expression: E, identify: bool = True) -> E:
372        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
373        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
374        if (
375            isinstance(expression, exp.Identifier)
376            and isinstance(expression.parent, exp.Table)
377            and expression.name.lower() == "dual"
378        ):
379            return expression  # type: ignore
380
381        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
383    class Parser(parser.Parser):
384        IDENTIFY_PIVOT_STRINGS = True
385
386        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
387
388        FUNCTIONS = {
389            **parser.Parser.FUNCTIONS,
390            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
391            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
392            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
393                this=seq_get(args, 1), expression=seq_get(args, 0)
394            ),
395            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
396                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
397                start=seq_get(args, 0),
398                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
399                step=seq_get(args, 2),
400            ),
401            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
402            "BITXOR": binary_from_function(exp.BitwiseXor),
403            "BIT_XOR": binary_from_function(exp.BitwiseXor),
404            "BOOLXOR": binary_from_function(exp.Xor),
405            "CONVERT_TIMEZONE": _parse_convert_timezone,
406            "DATE_TRUNC": _date_trunc_to_time,
407            "DATEADD": lambda args: exp.DateAdd(
408                this=seq_get(args, 2),
409                expression=seq_get(args, 1),
410                unit=_map_date_part(seq_get(args, 0)),
411            ),
412            "DATEDIFF": _parse_datediff,
413            "DIV0": _div0_to_if,
414            "FLATTEN": exp.Explode.from_arg_list,
415            "IFF": exp.If.from_arg_list,
416            "LAST_DAY": lambda args: exp.LastDay(
417                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
418            ),
419            "LISTAGG": exp.GroupConcat.from_arg_list,
420            "NULLIFZERO": _nullifzero_to_if,
421            "OBJECT_CONSTRUCT": _parse_object_construct,
422            "REGEXP_REPLACE": _parse_regexp_replace,
423            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
424            "RLIKE": exp.RegexpLike.from_arg_list,
425            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
426            "TIMEDIFF": _parse_datediff,
427            "TIMESTAMPDIFF": _parse_datediff,
428            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
429            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
430            "TO_TIMESTAMP": _parse_to_timestamp,
431            "TO_VARCHAR": exp.ToChar.from_arg_list,
432            "ZEROIFNULL": _zeroifnull_to_if,
433        }
434
435        FUNCTION_PARSERS = {
436            **parser.Parser.FUNCTION_PARSERS,
437            "DATE_PART": _parse_date_part,
438            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
439        }
440        FUNCTION_PARSERS.pop("TRIM")
441
442        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
443
444        RANGE_PARSERS = {
445            **parser.Parser.RANGE_PARSERS,
446            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
447            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
448            TokenType.COLON: _parse_colon_get_path,
449        }
450
451        ALTER_PARSERS = {
452            **parser.Parser.ALTER_PARSERS,
453            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
454            "UNSET": lambda self: self.expression(
455                exp.Set,
456                tag=self._match_text_seq("TAG"),
457                expressions=self._parse_csv(self._parse_id_var),
458                unset=True,
459            ),
460            "SWAP": lambda self: self._parse_alter_table_swap(),
461        }
462
463        STATEMENT_PARSERS = {
464            **parser.Parser.STATEMENT_PARSERS,
465            TokenType.SHOW: lambda self: self._parse_show(),
466        }
467
468        PROPERTY_PARSERS = {
469            **parser.Parser.PROPERTY_PARSERS,
470            "LOCATION": lambda self: self._parse_location(),
471        }
472
473        SHOW_PARSERS = {
474            "SCHEMAS": _show_parser("SCHEMAS"),
475            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
476            "OBJECTS": _show_parser("OBJECTS"),
477            "TERSE OBJECTS": _show_parser("OBJECTS"),
478            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
479            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
480            "COLUMNS": _show_parser("COLUMNS"),
481        }
482
483        STAGED_FILE_SINGLE_TOKENS = {
484            TokenType.DOT,
485            TokenType.MOD,
486            TokenType.SLASH,
487        }
488
489        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
490
491        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
492            if is_map:
493                # Keys are strings in Snowflake's objects, see also:
494                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
495                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
496                return self._parse_slice(self._parse_string())
497
498            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
499
500        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
501            lateral = super()._parse_lateral()
502            if not lateral:
503                return lateral
504
505            if isinstance(lateral.this, exp.Explode):
506                table_alias = lateral.args.get("alias")
507                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
508                if table_alias and not table_alias.args.get("columns"):
509                    table_alias.set("columns", columns)
510                elif not table_alias:
511                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
512
513            return lateral
514
515        def _parse_at_before(self, table: exp.Table) -> exp.Table:
516            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
517            index = self._index
518            if self._match_texts(("AT", "BEFORE")):
519                this = self._prev.text.upper()
520                kind = (
521                    self._match(TokenType.L_PAREN)
522                    and self._match_texts(self.HISTORICAL_DATA_KIND)
523                    and self._prev.text.upper()
524                )
525                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
526
527                if expression:
528                    self._match_r_paren()
529                    when = self.expression(
530                        exp.HistoricalData, this=this, kind=kind, expression=expression
531                    )
532                    table.set("when", when)
533                else:
534                    self._retreat(index)
535
536            return table
537
538        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
539            # https://docs.snowflake.com/en/user-guide/querying-stage
540            if self._match(TokenType.STRING, advance=False):
541                table = self._parse_string()
542            elif self._match_text_seq("@", advance=False):
543                table = self._parse_location_path()
544            else:
545                table = None
546
547            if table:
548                file_format = None
549                pattern = None
550
551                self._match(TokenType.L_PAREN)
552                while self._curr and not self._match(TokenType.R_PAREN):
553                    if self._match_text_seq("FILE_FORMAT", "=>"):
554                        file_format = self._parse_string() or super()._parse_table_parts()
555                    elif self._match_text_seq("PATTERN", "=>"):
556                        pattern = self._parse_string()
557                    else:
558                        break
559
560                    self._match(TokenType.COMMA)
561
562                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
563            else:
564                table = super()._parse_table_parts(schema=schema)
565
566            return self._parse_at_before(table)
567
568        def _parse_id_var(
569            self,
570            any_token: bool = True,
571            tokens: t.Optional[t.Collection[TokenType]] = None,
572        ) -> t.Optional[exp.Expression]:
573            if self._match_text_seq("IDENTIFIER", "("):
574                identifier = (
575                    super()._parse_id_var(any_token=any_token, tokens=tokens)
576                    or self._parse_string()
577                )
578                self._match_r_paren()
579                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
580
581            return super()._parse_id_var(any_token=any_token, tokens=tokens)
582
583        def _parse_show_snowflake(self, this: str) -> exp.Show:
584            scope = None
585            scope_kind = None
586
587            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
588            # which is syntactically valid but has no effect on the output
589            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
590
591            like = self._parse_string() if self._match(TokenType.LIKE) else None
592
593            if self._match(TokenType.IN):
594                if self._match_text_seq("ACCOUNT"):
595                    scope_kind = "ACCOUNT"
596                elif self._match_set(self.DB_CREATABLES):
597                    scope_kind = self._prev.text.upper()
598                    if self._curr:
599                        scope = self._parse_table_parts()
600                elif self._curr:
601                    scope_kind = "SCHEMA" if this == "OBJECTS" else "TABLE"
602                    scope = self._parse_table_parts()
603
604            return self.expression(
605                exp.Show,
606                **{
607                    "terse": terse,
608                    "this": this,
609                    "like": like,
610                    "scope": scope,
611                    "scope_kind": scope_kind,
612                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
613                    "limit": self._parse_limit(),
614                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
615                },
616            )
617
618        def _parse_alter_table_swap(self) -> exp.SwapTable:
619            self._match_text_seq("WITH")
620            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
621
622        def _parse_location(self) -> exp.LocationProperty:
623            self._match(TokenType.EQ)
624            return self.expression(exp.LocationProperty, this=self._parse_location_path())
625
626        def _parse_location_path(self) -> exp.Var:
627            parts = [self._advance_any(ignore_reserved=True)]
628
629            # We avoid consuming a comma token because external tables like @foo and @bar
630            # can be joined in a query with a comma separator.
631            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
632                parts.append(self._advance_any(ignore_reserved=True))
633
634            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
TABLE_ALIAS_TOKENS = {<TokenType.TOP: 'TOP'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.BINARY: 'BINARY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.VAR: 'VAR'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DIV: 'DIV'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.FALSE: 'FALSE'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.MODEL: 'MODEL'>, <TokenType.BIT: 'BIT'>, <TokenType.UUID: 'UUID'>, <TokenType.DATE32: 'DATE32'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.IPV4: 'IPV4'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.UINT: 'UINT'>, <TokenType.ALL: 'ALL'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.NULL: 'NULL'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.SEMI: 'SEMI'>, <TokenType.FIRST: 'FIRST'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.NEXT: 'NEXT'>, <TokenType.UINT128: 'UINT128'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.KEEP: 'KEEP'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.ENUM: 'ENUM'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TABLE: 'TABLE'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.SUPER: 'SUPER'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.VIEW: 'VIEW'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.XML: 'XML'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.JSON: 'JSON'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.SOME: 'SOME'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.CASE: 'CASE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.INT: 'INT'>, <TokenType.TEXT: 'TEXT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.END: 'END'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.IPV6: 'IPV6'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.ASC: 'ASC'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.ROWS: 'ROWS'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.LOAD: 'LOAD'>, <TokenType.USE: 'USE'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.CACHE: 'CACHE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.KILL: 'KILL'>, <TokenType.SET: 'SET'>, <TokenType.JSONB: 'JSONB'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.NESTED: 'NESTED'>, <TokenType.TIME: 'TIME'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.DELETE: 'DELETE'>, <TokenType.INET: 'INET'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.DESC: 'DESC'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.DATE: 'DATE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.MAP: 'MAP'>, <TokenType.FINAL: 'FINAL'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UINT256: 'UINT256'>, <TokenType.IS: 'IS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.INT256: 'INT256'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.MONEY: 'MONEY'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.ANY: 'ANY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.YEAR: 'YEAR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FILTER: 'FILTER'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _parse_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GET_PATH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GetPath'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function parse_logarithm>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _parse_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _parse_convert_timezone>, 'DATEADD': <function Snowflake.Parser.<lambda>>, 'DIV0': <function _div0_to_if>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _nullifzero_to_if>, 'OBJECT_CONSTRUCT': <function _parse_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEDIFF': <function _parse_datediff>, 'TIMESTAMPDIFF': <function _parse_datediff>, 'TO_TIMESTAMP': <function _parse_to_timestamp>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _zeroifnull_to_if>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function _parse_date_part>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.COLON: 'COLON'>: <function _parse_colon_get_path>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}}, 'OBJECTS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'COLUMNS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ID_VAR_TOKENS
INTERVAL_VARS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
LAMBDAS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
CONSTRAINT_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
MODIFIABLES
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_UNION
UNION_MODIFIERS
NO_PAREN_IF_COMMANDS
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
636    class Tokenizer(tokens.Tokenizer):
637        STRING_ESCAPES = ["\\", "'"]
638        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
639        RAW_STRINGS = ["$$"]
640        COMMENTS = ["--", "//", ("/*", "*/")]
641
642        KEYWORDS = {
643            **tokens.Tokenizer.KEYWORDS,
644            "BYTEINT": TokenType.INT,
645            "CHAR VARYING": TokenType.VARCHAR,
646            "CHARACTER VARYING": TokenType.VARCHAR,
647            "EXCLUDE": TokenType.EXCEPT,
648            "ILIKE ANY": TokenType.ILIKE_ANY,
649            "LIKE ANY": TokenType.LIKE_ANY,
650            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
651            "MINUS": TokenType.EXCEPT,
652            "NCHAR VARYING": TokenType.VARCHAR,
653            "PUT": TokenType.COMMAND,
654            "REMOVE": TokenType.COMMAND,
655            "RENAME": TokenType.REPLACE,
656            "RM": TokenType.COMMAND,
657            "SAMPLE": TokenType.TABLE_SAMPLE,
658            "SQL_DOUBLE": TokenType.DOUBLE,
659            "SQL_VARCHAR": TokenType.VARCHAR,
660            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
661            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
662            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
663            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
664            "TOP": TokenType.TOP,
665        }
666
667        SINGLE_TOKENS = {
668            **tokens.Tokenizer.SINGLE_TOKENS,
669            "$": TokenType.PARAMETER,
670        }
671
672        VAR_SINGLE_TOKENS = {"$"}
673
674        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
676    class Generator(generator.Generator):
677        PARAMETER_TOKEN = "$"
678        MATCHED_BY_SOURCE = False
679        SINGLE_STRING_INTERVAL = True
680        JOIN_HINTS = False
681        TABLE_HINTS = False
682        QUERY_HINTS = False
683        AGGREGATE_FILTER_SUPPORTED = False
684        SUPPORTS_TABLE_COPY = False
685        COLLATE_IS_FUNC = True
686        LIMIT_ONLY_LITERALS = True
687        JSON_KEY_VALUE_PAIR_SEP = ","
688        INSERT_OVERWRITE = " OVERWRITE INTO"
689
690        TRANSFORMS = {
691            **generator.Generator.TRANSFORMS,
692            exp.ArgMax: rename_func("MAX_BY"),
693            exp.ArgMin: rename_func("MIN_BY"),
694            exp.Array: inline_array_sql,
695            exp.ArrayConcat: rename_func("ARRAY_CAT"),
696            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
697            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
698            exp.AtTimeZone: lambda self, e: self.func(
699                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
700            ),
701            exp.BitwiseXor: rename_func("BITXOR"),
702            exp.DateAdd: date_delta_sql("DATEADD"),
703            exp.DateDiff: date_delta_sql("DATEDIFF"),
704            exp.DateStrToDate: datestrtodate_sql,
705            exp.DataType: _datatype_sql,
706            exp.DayOfMonth: rename_func("DAYOFMONTH"),
707            exp.DayOfWeek: rename_func("DAYOFWEEK"),
708            exp.DayOfYear: rename_func("DAYOFYEAR"),
709            exp.Explode: rename_func("FLATTEN"),
710            exp.Extract: rename_func("DATE_PART"),
711            exp.FromTimeZone: lambda self, e: self.func(
712                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
713            ),
714            exp.GenerateSeries: lambda self, e: self.func(
715                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
716            ),
717            exp.GroupConcat: rename_func("LISTAGG"),
718            exp.If: if_sql(name="IFF", false_value="NULL"),
719            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
720            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
721            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
722            exp.LogicalOr: rename_func("BOOLOR_AGG"),
723            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
724            exp.Max: max_or_greatest,
725            exp.Min: min_or_least,
726            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
727            exp.PercentileCont: transforms.preprocess(
728                [transforms.add_within_group_for_percentiles]
729            ),
730            exp.PercentileDisc: transforms.preprocess(
731                [transforms.add_within_group_for_percentiles]
732            ),
733            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
734            exp.RegexpILike: _regexpilike_sql,
735            exp.Rand: rename_func("RANDOM"),
736            exp.Select: transforms.preprocess(
737                [
738                    transforms.eliminate_distinct_on,
739                    transforms.explode_to_unnest(),
740                    transforms.eliminate_semi_and_anti_joins,
741                ]
742            ),
743            exp.SHA: rename_func("SHA1"),
744            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
745            exp.StartsWith: rename_func("STARTSWITH"),
746            exp.StrPosition: lambda self, e: self.func(
747                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
748            ),
749            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
750            exp.Struct: lambda self, e: self.func(
751                "OBJECT_CONSTRUCT",
752                *(arg for expression in e.expressions for arg in expression.flatten()),
753            ),
754            exp.Stuff: rename_func("INSERT"),
755            exp.TimestampDiff: lambda self, e: self.func(
756                "TIMESTAMPDIFF", e.unit, e.expression, e.this
757            ),
758            exp.TimestampTrunc: timestamptrunc_sql,
759            exp.TimeStrToTime: timestrtotime_sql,
760            exp.TimeToStr: lambda self, e: self.func(
761                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
762            ),
763            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
764            exp.ToArray: rename_func("TO_ARRAY"),
765            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
766            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
767            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
768            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
769            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
770            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
771            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
772            exp.Xor: rename_func("BOOLXOR"),
773        }
774
775        TYPE_MAPPING = {
776            **generator.Generator.TYPE_MAPPING,
777            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
778        }
779
780        STAR_MAPPING = {
781            "except": "EXCLUDE",
782            "replace": "RENAME",
783        }
784
785        PROPERTIES_LOCATION = {
786            **generator.Generator.PROPERTIES_LOCATION,
787            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
788            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
789        }
790
791        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
792            milli = expression.args.get("milli")
793            if milli is not None:
794                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
795                expression.set("nano", milli_to_nano)
796
797            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
798
799        def trycast_sql(self, expression: exp.TryCast) -> str:
800            value = expression.this
801
802            if value.type is None:
803                from sqlglot.optimizer.annotate_types import annotate_types
804
805                value = annotate_types(value)
806
807            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
808                return super().trycast_sql(expression)
809
810            # TRY_CAST only works for string values in Snowflake
811            return self.cast_sql(expression)
812
813        def log_sql(self, expression: exp.Log) -> str:
814            if not expression.expression:
815                return self.func("LN", expression.this)
816
817            return super().log_sql(expression)
818
819        def unnest_sql(self, expression: exp.Unnest) -> str:
820            unnest_alias = expression.args.get("alias")
821            offset = expression.args.get("offset")
822
823            columns = [
824                exp.to_identifier("seq"),
825                exp.to_identifier("key"),
826                exp.to_identifier("path"),
827                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
828                seq_get(unnest_alias.columns if unnest_alias else [], 0)
829                or exp.to_identifier("value"),
830                exp.to_identifier("this"),
831            ]
832
833            if unnest_alias:
834                unnest_alias.set("columns", columns)
835            else:
836                unnest_alias = exp.TableAlias(this="_u", columns=columns)
837
838            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
839            alias = self.sql(unnest_alias)
840            alias = f" AS {alias}" if alias else ""
841            return f"{explode}{alias}"
842
843        def show_sql(self, expression: exp.Show) -> str:
844            terse = "TERSE " if expression.args.get("terse") else ""
845            like = self.sql(expression, "like")
846            like = f" LIKE {like}" if like else ""
847
848            scope = self.sql(expression, "scope")
849            scope = f" {scope}" if scope else ""
850
851            scope_kind = self.sql(expression, "scope_kind")
852            if scope_kind:
853                scope_kind = f" IN {scope_kind}"
854
855            starts_with = self.sql(expression, "starts_with")
856            if starts_with:
857                starts_with = f" STARTS WITH {starts_with}"
858
859            limit = self.sql(expression, "limit")
860
861            from_ = self.sql(expression, "from")
862            if from_:
863                from_ = f" FROM {from_}"
864
865            return (
866                f"SHOW {terse}{expression.name}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
867            )
868
869        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
870            # Other dialects don't support all of the following parameters, so we need to
871            # generate default values as necessary to ensure the transpilation is correct
872            group = expression.args.get("group")
873            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
874            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
875            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
876
877            return self.func(
878                "REGEXP_SUBSTR",
879                expression.this,
880                expression.expression,
881                position,
882                occurrence,
883                parameters,
884                group,
885            )
886
887        def except_op(self, expression: exp.Except) -> str:
888            if not expression.args.get("distinct", False):
889                self.unsupported("EXCEPT with All is not supported in Snowflake")
890            return super().except_op(expression)
891
892        def intersect_op(self, expression: exp.Intersect) -> str:
893            if not expression.args.get("distinct", False):
894                self.unsupported("INTERSECT with All is not supported in Snowflake")
895            return super().intersect_op(expression)
896
897        def describe_sql(self, expression: exp.Describe) -> str:
898            # Default to table if kind is unknown
899            kind_value = expression.args.get("kind") or "TABLE"
900            kind = f" {kind_value}" if kind_value else ""
901            this = f" {self.sql(expression, 'this')}"
902            expressions = self.expressions(expression, flat=True)
903            expressions = f" {expressions}" if expressions else ""
904            return f"DESCRIBE{kind}{this}{expressions}"
905
906        def generatedasidentitycolumnconstraint_sql(
907            self, expression: exp.GeneratedAsIdentityColumnConstraint
908        ) -> str:
909            start = expression.args.get("start")
910            start = f" START {start}" if start else ""
911            increment = expression.args.get("increment")
912            increment = f" INCREMENT {increment}" if increment else ""
913            return f"AUTOINCREMENT{start}{increment}"
914
915        def swaptable_sql(self, expression: exp.SwapTable) -> str:
916            this = self.sql(expression, "this")
917            return f"SWAP WITH {this}"
918
919        def with_properties(self, properties: exp.Properties) -> str:
920            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
921
922        def cluster_sql(self, expression: exp.Cluster) -> str:
923            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayJoin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DataType'>: <function _datatype_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Struct'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
791        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
792            milli = expression.args.get("milli")
793            if milli is not None:
794                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
795                expression.set("nano", milli_to_nano)
796
797            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
799        def trycast_sql(self, expression: exp.TryCast) -> str:
800            value = expression.this
801
802            if value.type is None:
803                from sqlglot.optimizer.annotate_types import annotate_types
804
805                value = annotate_types(value)
806
807            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
808                return super().trycast_sql(expression)
809
810            # TRY_CAST only works for string values in Snowflake
811            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
813        def log_sql(self, expression: exp.Log) -> str:
814            if not expression.expression:
815                return self.func("LN", expression.this)
816
817            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
819        def unnest_sql(self, expression: exp.Unnest) -> str:
820            unnest_alias = expression.args.get("alias")
821            offset = expression.args.get("offset")
822
823            columns = [
824                exp.to_identifier("seq"),
825                exp.to_identifier("key"),
826                exp.to_identifier("path"),
827                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
828                seq_get(unnest_alias.columns if unnest_alias else [], 0)
829                or exp.to_identifier("value"),
830                exp.to_identifier("this"),
831            ]
832
833            if unnest_alias:
834                unnest_alias.set("columns", columns)
835            else:
836                unnest_alias = exp.TableAlias(this="_u", columns=columns)
837
838            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
839            alias = self.sql(unnest_alias)
840            alias = f" AS {alias}" if alias else ""
841            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
843        def show_sql(self, expression: exp.Show) -> str:
844            terse = "TERSE " if expression.args.get("terse") else ""
845            like = self.sql(expression, "like")
846            like = f" LIKE {like}" if like else ""
847
848            scope = self.sql(expression, "scope")
849            scope = f" {scope}" if scope else ""
850
851            scope_kind = self.sql(expression, "scope_kind")
852            if scope_kind:
853                scope_kind = f" IN {scope_kind}"
854
855            starts_with = self.sql(expression, "starts_with")
856            if starts_with:
857                starts_with = f" STARTS WITH {starts_with}"
858
859            limit = self.sql(expression, "limit")
860
861            from_ = self.sql(expression, "from")
862            if from_:
863                from_ = f" FROM {from_}"
864
865            return (
866                f"SHOW {terse}{expression.name}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
867            )
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
869        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
870            # Other dialects don't support all of the following parameters, so we need to
871            # generate default values as necessary to ensure the transpilation is correct
872            group = expression.args.get("group")
873            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
874            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
875            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
876
877            return self.func(
878                "REGEXP_SUBSTR",
879                expression.this,
880                expression.expression,
881                position,
882                occurrence,
883                parameters,
884                group,
885            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
887        def except_op(self, expression: exp.Except) -> str:
888            if not expression.args.get("distinct", False):
889                self.unsupported("EXCEPT with All is not supported in Snowflake")
890            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
892        def intersect_op(self, expression: exp.Intersect) -> str:
893            if not expression.args.get("distinct", False):
894                self.unsupported("INTERSECT with All is not supported in Snowflake")
895            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
897        def describe_sql(self, expression: exp.Describe) -> str:
898            # Default to table if kind is unknown
899            kind_value = expression.args.get("kind") or "TABLE"
900            kind = f" {kind_value}" if kind_value else ""
901            this = f" {self.sql(expression, 'this')}"
902            expressions = self.expressions(expression, flat=True)
903            expressions = f" {expressions}" if expressions else ""
904            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
906        def generatedasidentitycolumnconstraint_sql(
907            self, expression: exp.GeneratedAsIdentityColumnConstraint
908        ) -> str:
909            start = expression.args.get("start")
910            start = f" START {start}" if start else ""
911            increment = expression.args.get("increment")
912            increment = f" INCREMENT {increment}" if increment else ""
913            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
915        def swaptable_sql(self, expression: exp.SwapTable) -> str:
916            this = self.sql(expression, "this")
917            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
919        def with_properties(self, properties: exp.Properties) -> str:
920            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
922        def cluster_sql(self, expression: exp.Cluster) -> str:
923            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
SELECT_KINDS: Tuple[str, ...] = ()
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
EXPRESSIONS_WITHOUT_NESTED_CTES
KEY_VALUE_DEFINITIONS
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
renamecolumn_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql