Edit on GitHub

sqlglot.dialects.snowflake

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot._typing import E
  7from sqlglot.dialects.dialect import (
  8    Dialect,
  9    NormalizationStrategy,
 10    binary_from_function,
 11    date_delta_sql,
 12    date_trunc_to_time,
 13    datestrtodate_sql,
 14    format_time_lambda,
 15    if_sql,
 16    inline_array_sql,
 17    max_or_greatest,
 18    min_or_least,
 19    rename_func,
 20    timestamptrunc_sql,
 21    timestrtotime_sql,
 22    var_map_sql,
 23)
 24from sqlglot.expressions import Literal
 25from sqlglot.helper import seq_get
 26from sqlglot.tokens import TokenType
 27
 28
 29def _check_int(s: str) -> bool:
 30    if s[0] in ("-", "+"):
 31        return s[1:].isdigit()
 32    return s.isdigit()
 33
 34
 35# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
 36def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
 37    if len(args) == 2:
 38        first_arg, second_arg = args
 39        if second_arg.is_string:
 40            # case: <string_expr> [ , <format> ]
 41            return format_time_lambda(exp.StrToTime, "snowflake")(args)
 42        return exp.UnixToTime(this=first_arg, scale=second_arg)
 43
 44    from sqlglot.optimizer.simplify import simplify_literals
 45
 46    # The first argument might be an expression like 40 * 365 * 86400, so we try to
 47    # reduce it using `simplify_literals` first and then check if it's a Literal.
 48    first_arg = seq_get(args, 0)
 49    if not isinstance(simplify_literals(first_arg, root=True), Literal):
 50        # case: <variant_expr> or other expressions such as columns
 51        return exp.TimeStrToTime.from_arg_list(args)
 52
 53    if first_arg.is_string:
 54        if _check_int(first_arg.this):
 55            # case: <integer>
 56            return exp.UnixToTime.from_arg_list(args)
 57
 58        # case: <date_expr>
 59        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
 60
 61    # case: <numeric_expr>
 62    return exp.UnixToTime.from_arg_list(args)
 63
 64
 65def _parse_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
 66    expression = parser.parse_var_map(args)
 67
 68    if isinstance(expression, exp.StarMap):
 69        return expression
 70
 71    return exp.Struct(
 72        expressions=[
 73            t.cast(exp.Condition, k).eq(v) for k, v in zip(expression.keys, expression.values)
 74        ]
 75    )
 76
 77
 78def _parse_datediff(args: t.List) -> exp.DateDiff:
 79    return exp.DateDiff(
 80        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
 81    )
 82
 83
 84# https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 85# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 86def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 87    this = self._parse_var() or self._parse_type()
 88
 89    if not this:
 90        return None
 91
 92    self._match(TokenType.COMMA)
 93    expression = self._parse_bitwise()
 94    this = _map_date_part(this)
 95    name = this.name.upper()
 96
 97    if name.startswith("EPOCH"):
 98        if name == "EPOCH_MILLISECOND":
 99            scale = 10**3
100        elif name == "EPOCH_MICROSECOND":
101            scale = 10**6
102        elif name == "EPOCH_NANOSECOND":
103            scale = 10**9
104        else:
105            scale = None
106
107        ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
108        to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
109
110        if scale:
111            to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
112
113        return to_unix
114
115    return self.expression(exp.Extract, this=this, expression=expression)
116
117
118# https://docs.snowflake.com/en/sql-reference/functions/div0
119def _div0_to_if(args: t.List) -> exp.If:
120    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
121    true = exp.Literal.number(0)
122    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
123    return exp.If(this=cond, true=true, false=false)
124
125
126# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
127def _zeroifnull_to_if(args: t.List) -> exp.If:
128    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
129    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
130
131
132# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
133def _nullifzero_to_if(args: t.List) -> exp.If:
134    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
135    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
136
137
138def _datatype_sql(self: Snowflake.Generator, expression: exp.DataType) -> str:
139    if expression.is_type("array"):
140        return "ARRAY"
141    elif expression.is_type("map"):
142        return "OBJECT"
143    return self.datatype_sql(expression)
144
145
146def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
147    flag = expression.text("flag")
148
149    if "i" not in flag:
150        flag += "i"
151
152    return self.func(
153        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
154    )
155
156
157def _parse_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
158    if len(args) == 3:
159        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
160    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
161
162
163def _parse_regexp_replace(args: t.List) -> exp.RegexpReplace:
164    regexp_replace = exp.RegexpReplace.from_arg_list(args)
165
166    if not regexp_replace.args.get("replacement"):
167        regexp_replace.set("replacement", exp.Literal.string(""))
168
169    return regexp_replace
170
171
172def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
173    def _parse(self: Snowflake.Parser) -> exp.Show:
174        return self._parse_show_snowflake(*args, **kwargs)
175
176    return _parse
177
178
179DATE_PART_MAPPING = {
180    "Y": "YEAR",
181    "YY": "YEAR",
182    "YYY": "YEAR",
183    "YYYY": "YEAR",
184    "YR": "YEAR",
185    "YEARS": "YEAR",
186    "YRS": "YEAR",
187    "MM": "MONTH",
188    "MON": "MONTH",
189    "MONS": "MONTH",
190    "MONTHS": "MONTH",
191    "D": "DAY",
192    "DD": "DAY",
193    "DAYS": "DAY",
194    "DAYOFMONTH": "DAY",
195    "WEEKDAY": "DAYOFWEEK",
196    "DOW": "DAYOFWEEK",
197    "DW": "DAYOFWEEK",
198    "WEEKDAY_ISO": "DAYOFWEEKISO",
199    "DOW_ISO": "DAYOFWEEKISO",
200    "DW_ISO": "DAYOFWEEKISO",
201    "YEARDAY": "DAYOFYEAR",
202    "DOY": "DAYOFYEAR",
203    "DY": "DAYOFYEAR",
204    "W": "WEEK",
205    "WK": "WEEK",
206    "WEEKOFYEAR": "WEEK",
207    "WOY": "WEEK",
208    "WY": "WEEK",
209    "WEEK_ISO": "WEEKISO",
210    "WEEKOFYEARISO": "WEEKISO",
211    "WEEKOFYEAR_ISO": "WEEKISO",
212    "Q": "QUARTER",
213    "QTR": "QUARTER",
214    "QTRS": "QUARTER",
215    "QUARTERS": "QUARTER",
216    "H": "HOUR",
217    "HH": "HOUR",
218    "HR": "HOUR",
219    "HOURS": "HOUR",
220    "HRS": "HOUR",
221    "M": "MINUTE",
222    "MI": "MINUTE",
223    "MIN": "MINUTE",
224    "MINUTES": "MINUTE",
225    "MINS": "MINUTE",
226    "S": "SECOND",
227    "SEC": "SECOND",
228    "SECONDS": "SECOND",
229    "SECS": "SECOND",
230    "MS": "MILLISECOND",
231    "MSEC": "MILLISECOND",
232    "MILLISECONDS": "MILLISECOND",
233    "US": "MICROSECOND",
234    "USEC": "MICROSECOND",
235    "MICROSECONDS": "MICROSECOND",
236    "NS": "NANOSECOND",
237    "NSEC": "NANOSECOND",
238    "NANOSEC": "NANOSECOND",
239    "NSECOND": "NANOSECOND",
240    "NSECONDS": "NANOSECOND",
241    "NANOSECS": "NANOSECOND",
242    "NSECONDS": "NANOSECOND",
243    "EPOCH": "EPOCH_SECOND",
244    "EPOCH_SECONDS": "EPOCH_SECOND",
245    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
246    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
247    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
248    "TZH": "TIMEZONE_HOUR",
249    "TZM": "TIMEZONE_MINUTE",
250}
251
252
253@t.overload
254def _map_date_part(part: exp.Expression) -> exp.Var:
255    pass
256
257
258@t.overload
259def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
260    pass
261
262
263def _map_date_part(part):
264    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
265    return exp.var(mapped) if mapped else part
266
267
268def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
269    trunc = date_trunc_to_time(args)
270    trunc.set("unit", _map_date_part(trunc.args["unit"]))
271    return trunc
272
273
274def _parse_colon_get_path(
275    self: parser.Parser, this: t.Optional[exp.Expression]
276) -> t.Optional[exp.Expression]:
277    while True:
278        path = self._parse_bitwise()
279
280        # The cast :: operator has a lower precedence than the extraction operator :, so
281        # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
282        if isinstance(path, exp.Cast):
283            target_type = path.to
284            path = path.this
285        else:
286            target_type = None
287
288        if isinstance(path, exp.Expression):
289            path = exp.Literal.string(path.sql(dialect="snowflake"))
290
291        # The extraction operator : is left-associative
292        this = self.expression(exp.GetPath, this=this, expression=path)
293
294        if target_type:
295            this = exp.cast(this, target_type)
296
297        if not self._match(TokenType.COLON):
298            break
299
300    if self._match_set(self.RANGE_PARSERS):
301        this = self.RANGE_PARSERS[self._prev.token_type](self, this) or this
302
303    return this
304
305
306def _parse_timestamp_from_parts(args: t.List) -> exp.Func:
307    if len(args) == 2:
308        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
309        # so we parse this into Anonymous for now instead of introducing complexity
310        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
311
312    return exp.TimestampFromParts.from_arg_list(args)
313
314
315def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
316    """
317    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
318    so we need to unqualify them.
319
320    Example:
321        >>> from sqlglot import parse_one
322        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
323        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
324        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
325    """
326    if isinstance(expression, exp.Pivot) and expression.unpivot:
327        expression = transforms.unqualify_columns(expression)
328
329    return expression
330
331
332class Snowflake(Dialect):
333    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
334    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
335    NULL_ORDERING = "nulls_are_large"
336    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
337    SUPPORTS_USER_DEFINED_TYPES = False
338    SUPPORTS_SEMI_ANTI_JOIN = False
339    PREFER_CTE_ALIAS_COLUMN = True
340    TABLESAMPLE_SIZE_IS_PERCENT = True
341
342    TIME_MAPPING = {
343        "YYYY": "%Y",
344        "yyyy": "%Y",
345        "YY": "%y",
346        "yy": "%y",
347        "MMMM": "%B",
348        "mmmm": "%B",
349        "MON": "%b",
350        "mon": "%b",
351        "MM": "%m",
352        "mm": "%m",
353        "DD": "%d",
354        "dd": "%-d",
355        "DY": "%a",
356        "dy": "%w",
357        "HH24": "%H",
358        "hh24": "%H",
359        "HH12": "%I",
360        "hh12": "%I",
361        "MI": "%M",
362        "mi": "%M",
363        "SS": "%S",
364        "ss": "%S",
365        "FF": "%f",
366        "ff": "%f",
367        "FF6": "%f",
368        "ff6": "%f",
369    }
370
371    def quote_identifier(self, expression: E, identify: bool = True) -> E:
372        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
373        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
374        if (
375            isinstance(expression, exp.Identifier)
376            and isinstance(expression.parent, exp.Table)
377            and expression.name.lower() == "dual"
378        ):
379            return t.cast(E, expression)
380
381        return super().quote_identifier(expression, identify=identify)
382
383    class Parser(parser.Parser):
384        IDENTIFY_PIVOT_STRINGS = True
385
386        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
387
388        FUNCTIONS = {
389            **parser.Parser.FUNCTIONS,
390            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
391            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
392            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
393                this=seq_get(args, 1), expression=seq_get(args, 0)
394            ),
395            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
396                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
397                start=seq_get(args, 0),
398                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
399                step=seq_get(args, 2),
400            ),
401            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
402            "BITXOR": binary_from_function(exp.BitwiseXor),
403            "BIT_XOR": binary_from_function(exp.BitwiseXor),
404            "BOOLXOR": binary_from_function(exp.Xor),
405            "CONVERT_TIMEZONE": _parse_convert_timezone,
406            "DATE_TRUNC": _date_trunc_to_time,
407            "DATEADD": lambda args: exp.DateAdd(
408                this=seq_get(args, 2),
409                expression=seq_get(args, 1),
410                unit=_map_date_part(seq_get(args, 0)),
411            ),
412            "DATEDIFF": _parse_datediff,
413            "DIV0": _div0_to_if,
414            "FLATTEN": exp.Explode.from_arg_list,
415            "IFF": exp.If.from_arg_list,
416            "LAST_DAY": lambda args: exp.LastDay(
417                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
418            ),
419            "LISTAGG": exp.GroupConcat.from_arg_list,
420            "NULLIFZERO": _nullifzero_to_if,
421            "OBJECT_CONSTRUCT": _parse_object_construct,
422            "REGEXP_REPLACE": _parse_regexp_replace,
423            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
424            "RLIKE": exp.RegexpLike.from_arg_list,
425            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
426            "TIMEDIFF": _parse_datediff,
427            "TIMESTAMPDIFF": _parse_datediff,
428            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
429            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
430            "TO_TIMESTAMP": _parse_to_timestamp,
431            "TO_VARCHAR": exp.ToChar.from_arg_list,
432            "ZEROIFNULL": _zeroifnull_to_if,
433        }
434
435        FUNCTION_PARSERS = {
436            **parser.Parser.FUNCTION_PARSERS,
437            "DATE_PART": _parse_date_part,
438            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
439        }
440        FUNCTION_PARSERS.pop("TRIM")
441
442        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
443
444        RANGE_PARSERS = {
445            **parser.Parser.RANGE_PARSERS,
446            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
447            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
448            TokenType.COLON: _parse_colon_get_path,
449        }
450
451        ALTER_PARSERS = {
452            **parser.Parser.ALTER_PARSERS,
453            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
454            "UNSET": lambda self: self.expression(
455                exp.Set,
456                tag=self._match_text_seq("TAG"),
457                expressions=self._parse_csv(self._parse_id_var),
458                unset=True,
459            ),
460            "SWAP": lambda self: self._parse_alter_table_swap(),
461        }
462
463        STATEMENT_PARSERS = {
464            **parser.Parser.STATEMENT_PARSERS,
465            TokenType.SHOW: lambda self: self._parse_show(),
466        }
467
468        PROPERTY_PARSERS = {
469            **parser.Parser.PROPERTY_PARSERS,
470            "LOCATION": lambda self: self._parse_location(),
471        }
472
473        SHOW_PARSERS = {
474            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
475            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
476            "COLUMNS": _show_parser("COLUMNS"),
477        }
478
479        STAGED_FILE_SINGLE_TOKENS = {
480            TokenType.DOT,
481            TokenType.MOD,
482            TokenType.SLASH,
483        }
484
485        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
486
487        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
488            if is_map:
489                # Keys are strings in Snowflake's objects, see also:
490                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
491                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
492                return self._parse_slice(self._parse_string())
493
494            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
495
496        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
497            lateral = super()._parse_lateral()
498            if not lateral:
499                return lateral
500
501            if isinstance(lateral.this, exp.Explode):
502                table_alias = lateral.args.get("alias")
503                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
504                if table_alias and not table_alias.args.get("columns"):
505                    table_alias.set("columns", columns)
506                elif not table_alias:
507                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
508
509            return lateral
510
511        def _parse_at_before(self, table: exp.Table) -> exp.Table:
512            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
513            index = self._index
514            if self._match_texts(("AT", "BEFORE")):
515                this = self._prev.text.upper()
516                kind = (
517                    self._match(TokenType.L_PAREN)
518                    and self._match_texts(self.HISTORICAL_DATA_KIND)
519                    and self._prev.text.upper()
520                )
521                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
522
523                if expression:
524                    self._match_r_paren()
525                    when = self.expression(
526                        exp.HistoricalData, this=this, kind=kind, expression=expression
527                    )
528                    table.set("when", when)
529                else:
530                    self._retreat(index)
531
532            return table
533
534        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
535            # https://docs.snowflake.com/en/user-guide/querying-stage
536            if self._match(TokenType.STRING, advance=False):
537                table = self._parse_string()
538            elif self._match_text_seq("@", advance=False):
539                table = self._parse_location_path()
540            else:
541                table = None
542
543            if table:
544                file_format = None
545                pattern = None
546
547                self._match(TokenType.L_PAREN)
548                while self._curr and not self._match(TokenType.R_PAREN):
549                    if self._match_text_seq("FILE_FORMAT", "=>"):
550                        file_format = self._parse_string() or super()._parse_table_parts()
551                    elif self._match_text_seq("PATTERN", "=>"):
552                        pattern = self._parse_string()
553                    else:
554                        break
555
556                    self._match(TokenType.COMMA)
557
558                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
559            else:
560                table = super()._parse_table_parts(schema=schema)
561
562            return self._parse_at_before(table)
563
564        def _parse_id_var(
565            self,
566            any_token: bool = True,
567            tokens: t.Optional[t.Collection[TokenType]] = None,
568        ) -> t.Optional[exp.Expression]:
569            if self._match_text_seq("IDENTIFIER", "("):
570                identifier = (
571                    super()._parse_id_var(any_token=any_token, tokens=tokens)
572                    or self._parse_string()
573                )
574                self._match_r_paren()
575                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
576
577            return super()._parse_id_var(any_token=any_token, tokens=tokens)
578
579        def _parse_show_snowflake(self, this: str) -> exp.Show:
580            scope = None
581            scope_kind = None
582
583            like = self._parse_string() if self._match(TokenType.LIKE) else None
584
585            if self._match(TokenType.IN):
586                if self._match_text_seq("ACCOUNT"):
587                    scope_kind = "ACCOUNT"
588                elif self._match_set(self.DB_CREATABLES):
589                    scope_kind = self._prev.text
590                    if self._curr:
591                        scope = self._parse_table()
592                elif self._curr:
593                    scope_kind = "TABLE"
594                    scope = self._parse_table()
595
596            return self.expression(
597                exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind
598            )
599
600        def _parse_alter_table_swap(self) -> exp.SwapTable:
601            self._match_text_seq("WITH")
602            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
603
604        def _parse_location(self) -> exp.LocationProperty:
605            self._match(TokenType.EQ)
606            return self.expression(exp.LocationProperty, this=self._parse_location_path())
607
608        def _parse_location_path(self) -> exp.Var:
609            parts = [self._advance_any(ignore_reserved=True)]
610
611            # We avoid consuming a comma token because external tables like @foo and @bar
612            # can be joined in a query with a comma separator.
613            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
614                parts.append(self._advance_any(ignore_reserved=True))
615
616            return exp.var("".join(part.text for part in parts if part))
617
618    class Tokenizer(tokens.Tokenizer):
619        STRING_ESCAPES = ["\\", "'"]
620        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
621        RAW_STRINGS = ["$$"]
622        COMMENTS = ["--", "//", ("/*", "*/")]
623
624        KEYWORDS = {
625            **tokens.Tokenizer.KEYWORDS,
626            "BYTEINT": TokenType.INT,
627            "CHAR VARYING": TokenType.VARCHAR,
628            "CHARACTER VARYING": TokenType.VARCHAR,
629            "EXCLUDE": TokenType.EXCEPT,
630            "ILIKE ANY": TokenType.ILIKE_ANY,
631            "LIKE ANY": TokenType.LIKE_ANY,
632            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
633            "MINUS": TokenType.EXCEPT,
634            "NCHAR VARYING": TokenType.VARCHAR,
635            "PUT": TokenType.COMMAND,
636            "REMOVE": TokenType.COMMAND,
637            "RENAME": TokenType.REPLACE,
638            "RM": TokenType.COMMAND,
639            "SAMPLE": TokenType.TABLE_SAMPLE,
640            "SQL_DOUBLE": TokenType.DOUBLE,
641            "SQL_VARCHAR": TokenType.VARCHAR,
642            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
643            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
644            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
645            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
646            "TOP": TokenType.TOP,
647        }
648
649        SINGLE_TOKENS = {
650            **tokens.Tokenizer.SINGLE_TOKENS,
651            "$": TokenType.PARAMETER,
652        }
653
654        VAR_SINGLE_TOKENS = {"$"}
655
656        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
657
658    class Generator(generator.Generator):
659        PARAMETER_TOKEN = "$"
660        MATCHED_BY_SOURCE = False
661        SINGLE_STRING_INTERVAL = True
662        JOIN_HINTS = False
663        TABLE_HINTS = False
664        QUERY_HINTS = False
665        AGGREGATE_FILTER_SUPPORTED = False
666        SUPPORTS_TABLE_COPY = False
667        COLLATE_IS_FUNC = True
668        LIMIT_ONLY_LITERALS = True
669        JSON_KEY_VALUE_PAIR_SEP = ","
670        INSERT_OVERWRITE = " OVERWRITE INTO"
671
672        TRANSFORMS = {
673            **generator.Generator.TRANSFORMS,
674            exp.ArgMax: rename_func("MAX_BY"),
675            exp.ArgMin: rename_func("MIN_BY"),
676            exp.Array: inline_array_sql,
677            exp.ArrayConcat: rename_func("ARRAY_CAT"),
678            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
679            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
680            exp.AtTimeZone: lambda self, e: self.func(
681                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
682            ),
683            exp.BitwiseXor: rename_func("BITXOR"),
684            exp.DateAdd: date_delta_sql("DATEADD"),
685            exp.DateDiff: date_delta_sql("DATEDIFF"),
686            exp.DateStrToDate: datestrtodate_sql,
687            exp.DataType: _datatype_sql,
688            exp.DayOfMonth: rename_func("DAYOFMONTH"),
689            exp.DayOfWeek: rename_func("DAYOFWEEK"),
690            exp.DayOfYear: rename_func("DAYOFYEAR"),
691            exp.Explode: rename_func("FLATTEN"),
692            exp.Extract: rename_func("DATE_PART"),
693            exp.GenerateSeries: lambda self, e: self.func(
694                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
695            ),
696            exp.GroupConcat: rename_func("LISTAGG"),
697            exp.If: if_sql(name="IFF", false_value="NULL"),
698            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
699            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
700            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
701            exp.LogicalOr: rename_func("BOOLOR_AGG"),
702            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
703            exp.Max: max_or_greatest,
704            exp.Min: min_or_least,
705            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
706            exp.PercentileCont: transforms.preprocess(
707                [transforms.add_within_group_for_percentiles]
708            ),
709            exp.PercentileDisc: transforms.preprocess(
710                [transforms.add_within_group_for_percentiles]
711            ),
712            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
713            exp.RegexpILike: _regexpilike_sql,
714            exp.Rand: rename_func("RANDOM"),
715            exp.Select: transforms.preprocess(
716                [
717                    transforms.eliminate_distinct_on,
718                    transforms.explode_to_unnest(),
719                    transforms.eliminate_semi_and_anti_joins,
720                ]
721            ),
722            exp.SHA: rename_func("SHA1"),
723            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
724            exp.StartsWith: rename_func("STARTSWITH"),
725            exp.StrPosition: lambda self, e: self.func(
726                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
727            ),
728            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
729            exp.Struct: lambda self, e: self.func(
730                "OBJECT_CONSTRUCT",
731                *(arg for expression in e.expressions for arg in expression.flatten()),
732            ),
733            exp.Stuff: rename_func("INSERT"),
734            exp.TimestampDiff: lambda self, e: self.func(
735                "TIMESTAMPDIFF", e.unit, e.expression, e.this
736            ),
737            exp.TimestampTrunc: timestamptrunc_sql,
738            exp.TimeStrToTime: timestrtotime_sql,
739            exp.TimeToStr: lambda self, e: self.func(
740                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
741            ),
742            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
743            exp.ToArray: rename_func("TO_ARRAY"),
744            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
745            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
746            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
747            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
748            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
749            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
750            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
751            exp.Xor: rename_func("BOOLXOR"),
752        }
753
754        TYPE_MAPPING = {
755            **generator.Generator.TYPE_MAPPING,
756            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
757        }
758
759        STAR_MAPPING = {
760            "except": "EXCLUDE",
761            "replace": "RENAME",
762        }
763
764        PROPERTIES_LOCATION = {
765            **generator.Generator.PROPERTIES_LOCATION,
766            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
767            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
768        }
769
770        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
771            milli = expression.args.get("milli")
772            if milli is not None:
773                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
774                expression.set("nano", milli_to_nano)
775
776            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
777
778        def trycast_sql(self, expression: exp.TryCast) -> str:
779            value = expression.this
780
781            if value.type is None:
782                from sqlglot.optimizer.annotate_types import annotate_types
783
784                value = annotate_types(value)
785
786            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
787                return super().trycast_sql(expression)
788
789            # TRY_CAST only works for string values in Snowflake
790            return self.cast_sql(expression)
791
792        def log_sql(self, expression: exp.Log) -> str:
793            if not expression.expression:
794                return self.func("LN", expression.this)
795
796            return super().log_sql(expression)
797
798        def unnest_sql(self, expression: exp.Unnest) -> str:
799            unnest_alias = expression.args.get("alias")
800            offset = expression.args.get("offset")
801
802            columns = [
803                exp.to_identifier("seq"),
804                exp.to_identifier("key"),
805                exp.to_identifier("path"),
806                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
807                seq_get(unnest_alias.columns if unnest_alias else [], 0)
808                or exp.to_identifier("value"),
809                exp.to_identifier("this"),
810            ]
811
812            if unnest_alias:
813                unnest_alias.set("columns", columns)
814            else:
815                unnest_alias = exp.TableAlias(this="_u", columns=columns)
816
817            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
818            alias = self.sql(unnest_alias)
819            alias = f" AS {alias}" if alias else ""
820            return f"{explode}{alias}"
821
822        def show_sql(self, expression: exp.Show) -> str:
823            like = self.sql(expression, "like")
824            like = f" LIKE {like}" if like else ""
825
826            scope = self.sql(expression, "scope")
827            scope = f" {scope}" if scope else ""
828
829            scope_kind = self.sql(expression, "scope_kind")
830            if scope_kind:
831                scope_kind = f" IN {scope_kind}"
832
833            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
834
835        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
836            # Other dialects don't support all of the following parameters, so we need to
837            # generate default values as necessary to ensure the transpilation is correct
838            group = expression.args.get("group")
839            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
840            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
841            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
842
843            return self.func(
844                "REGEXP_SUBSTR",
845                expression.this,
846                expression.expression,
847                position,
848                occurrence,
849                parameters,
850                group,
851            )
852
853        def except_op(self, expression: exp.Except) -> str:
854            if not expression.args.get("distinct", False):
855                self.unsupported("EXCEPT with All is not supported in Snowflake")
856            return super().except_op(expression)
857
858        def intersect_op(self, expression: exp.Intersect) -> str:
859            if not expression.args.get("distinct", False):
860                self.unsupported("INTERSECT with All is not supported in Snowflake")
861            return super().intersect_op(expression)
862
863        def describe_sql(self, expression: exp.Describe) -> str:
864            # Default to table if kind is unknown
865            kind_value = expression.args.get("kind") or "TABLE"
866            kind = f" {kind_value}" if kind_value else ""
867            this = f" {self.sql(expression, 'this')}"
868            expressions = self.expressions(expression, flat=True)
869            expressions = f" {expressions}" if expressions else ""
870            return f"DESCRIBE{kind}{this}{expressions}"
871
872        def generatedasidentitycolumnconstraint_sql(
873            self, expression: exp.GeneratedAsIdentityColumnConstraint
874        ) -> str:
875            start = expression.args.get("start")
876            start = f" START {start}" if start else ""
877            increment = expression.args.get("increment")
878            increment = f" INCREMENT {increment}" if increment else ""
879            return f"AUTOINCREMENT{start}{increment}"
880
881        def swaptable_sql(self, expression: exp.SwapTable) -> str:
882            this = self.sql(expression, "this")
883            return f"SWAP WITH {this}"
884
885        def with_properties(self, properties: exp.Properties) -> str:
886            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
333class Snowflake(Dialect):
334    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
335    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
336    NULL_ORDERING = "nulls_are_large"
337    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
338    SUPPORTS_USER_DEFINED_TYPES = False
339    SUPPORTS_SEMI_ANTI_JOIN = False
340    PREFER_CTE_ALIAS_COLUMN = True
341    TABLESAMPLE_SIZE_IS_PERCENT = True
342
343    TIME_MAPPING = {
344        "YYYY": "%Y",
345        "yyyy": "%Y",
346        "YY": "%y",
347        "yy": "%y",
348        "MMMM": "%B",
349        "mmmm": "%B",
350        "MON": "%b",
351        "mon": "%b",
352        "MM": "%m",
353        "mm": "%m",
354        "DD": "%d",
355        "dd": "%-d",
356        "DY": "%a",
357        "dy": "%w",
358        "HH24": "%H",
359        "hh24": "%H",
360        "HH12": "%I",
361        "hh12": "%I",
362        "MI": "%M",
363        "mi": "%M",
364        "SS": "%S",
365        "ss": "%S",
366        "FF": "%f",
367        "ff": "%f",
368        "FF6": "%f",
369        "ff6": "%f",
370    }
371
372    def quote_identifier(self, expression: E, identify: bool = True) -> E:
373        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
374        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
375        if (
376            isinstance(expression, exp.Identifier)
377            and isinstance(expression.parent, exp.Table)
378            and expression.name.lower() == "dual"
379        ):
380            return t.cast(E, expression)
381
382        return super().quote_identifier(expression, identify=identify)
383
384    class Parser(parser.Parser):
385        IDENTIFY_PIVOT_STRINGS = True
386
387        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
388
389        FUNCTIONS = {
390            **parser.Parser.FUNCTIONS,
391            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
392            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
393            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
394                this=seq_get(args, 1), expression=seq_get(args, 0)
395            ),
396            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
397                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
398                start=seq_get(args, 0),
399                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
400                step=seq_get(args, 2),
401            ),
402            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
403            "BITXOR": binary_from_function(exp.BitwiseXor),
404            "BIT_XOR": binary_from_function(exp.BitwiseXor),
405            "BOOLXOR": binary_from_function(exp.Xor),
406            "CONVERT_TIMEZONE": _parse_convert_timezone,
407            "DATE_TRUNC": _date_trunc_to_time,
408            "DATEADD": lambda args: exp.DateAdd(
409                this=seq_get(args, 2),
410                expression=seq_get(args, 1),
411                unit=_map_date_part(seq_get(args, 0)),
412            ),
413            "DATEDIFF": _parse_datediff,
414            "DIV0": _div0_to_if,
415            "FLATTEN": exp.Explode.from_arg_list,
416            "IFF": exp.If.from_arg_list,
417            "LAST_DAY": lambda args: exp.LastDay(
418                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
419            ),
420            "LISTAGG": exp.GroupConcat.from_arg_list,
421            "NULLIFZERO": _nullifzero_to_if,
422            "OBJECT_CONSTRUCT": _parse_object_construct,
423            "REGEXP_REPLACE": _parse_regexp_replace,
424            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
425            "RLIKE": exp.RegexpLike.from_arg_list,
426            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
427            "TIMEDIFF": _parse_datediff,
428            "TIMESTAMPDIFF": _parse_datediff,
429            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
430            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
431            "TO_TIMESTAMP": _parse_to_timestamp,
432            "TO_VARCHAR": exp.ToChar.from_arg_list,
433            "ZEROIFNULL": _zeroifnull_to_if,
434        }
435
436        FUNCTION_PARSERS = {
437            **parser.Parser.FUNCTION_PARSERS,
438            "DATE_PART": _parse_date_part,
439            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
440        }
441        FUNCTION_PARSERS.pop("TRIM")
442
443        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
444
445        RANGE_PARSERS = {
446            **parser.Parser.RANGE_PARSERS,
447            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
448            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
449            TokenType.COLON: _parse_colon_get_path,
450        }
451
452        ALTER_PARSERS = {
453            **parser.Parser.ALTER_PARSERS,
454            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
455            "UNSET": lambda self: self.expression(
456                exp.Set,
457                tag=self._match_text_seq("TAG"),
458                expressions=self._parse_csv(self._parse_id_var),
459                unset=True,
460            ),
461            "SWAP": lambda self: self._parse_alter_table_swap(),
462        }
463
464        STATEMENT_PARSERS = {
465            **parser.Parser.STATEMENT_PARSERS,
466            TokenType.SHOW: lambda self: self._parse_show(),
467        }
468
469        PROPERTY_PARSERS = {
470            **parser.Parser.PROPERTY_PARSERS,
471            "LOCATION": lambda self: self._parse_location(),
472        }
473
474        SHOW_PARSERS = {
475            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
476            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
477            "COLUMNS": _show_parser("COLUMNS"),
478        }
479
480        STAGED_FILE_SINGLE_TOKENS = {
481            TokenType.DOT,
482            TokenType.MOD,
483            TokenType.SLASH,
484        }
485
486        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
487
488        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
489            if is_map:
490                # Keys are strings in Snowflake's objects, see also:
491                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
492                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
493                return self._parse_slice(self._parse_string())
494
495            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
496
497        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
498            lateral = super()._parse_lateral()
499            if not lateral:
500                return lateral
501
502            if isinstance(lateral.this, exp.Explode):
503                table_alias = lateral.args.get("alias")
504                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
505                if table_alias and not table_alias.args.get("columns"):
506                    table_alias.set("columns", columns)
507                elif not table_alias:
508                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
509
510            return lateral
511
512        def _parse_at_before(self, table: exp.Table) -> exp.Table:
513            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
514            index = self._index
515            if self._match_texts(("AT", "BEFORE")):
516                this = self._prev.text.upper()
517                kind = (
518                    self._match(TokenType.L_PAREN)
519                    and self._match_texts(self.HISTORICAL_DATA_KIND)
520                    and self._prev.text.upper()
521                )
522                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
523
524                if expression:
525                    self._match_r_paren()
526                    when = self.expression(
527                        exp.HistoricalData, this=this, kind=kind, expression=expression
528                    )
529                    table.set("when", when)
530                else:
531                    self._retreat(index)
532
533            return table
534
535        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
536            # https://docs.snowflake.com/en/user-guide/querying-stage
537            if self._match(TokenType.STRING, advance=False):
538                table = self._parse_string()
539            elif self._match_text_seq("@", advance=False):
540                table = self._parse_location_path()
541            else:
542                table = None
543
544            if table:
545                file_format = None
546                pattern = None
547
548                self._match(TokenType.L_PAREN)
549                while self._curr and not self._match(TokenType.R_PAREN):
550                    if self._match_text_seq("FILE_FORMAT", "=>"):
551                        file_format = self._parse_string() or super()._parse_table_parts()
552                    elif self._match_text_seq("PATTERN", "=>"):
553                        pattern = self._parse_string()
554                    else:
555                        break
556
557                    self._match(TokenType.COMMA)
558
559                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
560            else:
561                table = super()._parse_table_parts(schema=schema)
562
563            return self._parse_at_before(table)
564
565        def _parse_id_var(
566            self,
567            any_token: bool = True,
568            tokens: t.Optional[t.Collection[TokenType]] = None,
569        ) -> t.Optional[exp.Expression]:
570            if self._match_text_seq("IDENTIFIER", "("):
571                identifier = (
572                    super()._parse_id_var(any_token=any_token, tokens=tokens)
573                    or self._parse_string()
574                )
575                self._match_r_paren()
576                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
577
578            return super()._parse_id_var(any_token=any_token, tokens=tokens)
579
580        def _parse_show_snowflake(self, this: str) -> exp.Show:
581            scope = None
582            scope_kind = None
583
584            like = self._parse_string() if self._match(TokenType.LIKE) else None
585
586            if self._match(TokenType.IN):
587                if self._match_text_seq("ACCOUNT"):
588                    scope_kind = "ACCOUNT"
589                elif self._match_set(self.DB_CREATABLES):
590                    scope_kind = self._prev.text
591                    if self._curr:
592                        scope = self._parse_table()
593                elif self._curr:
594                    scope_kind = "TABLE"
595                    scope = self._parse_table()
596
597            return self.expression(
598                exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind
599            )
600
601        def _parse_alter_table_swap(self) -> exp.SwapTable:
602            self._match_text_seq("WITH")
603            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
604
605        def _parse_location(self) -> exp.LocationProperty:
606            self._match(TokenType.EQ)
607            return self.expression(exp.LocationProperty, this=self._parse_location_path())
608
609        def _parse_location_path(self) -> exp.Var:
610            parts = [self._advance_any(ignore_reserved=True)]
611
612            # We avoid consuming a comma token because external tables like @foo and @bar
613            # can be joined in a query with a comma separator.
614            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
615                parts.append(self._advance_any(ignore_reserved=True))
616
617            return exp.var("".join(part.text for part in parts if part))
618
619    class Tokenizer(tokens.Tokenizer):
620        STRING_ESCAPES = ["\\", "'"]
621        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
622        RAW_STRINGS = ["$$"]
623        COMMENTS = ["--", "//", ("/*", "*/")]
624
625        KEYWORDS = {
626            **tokens.Tokenizer.KEYWORDS,
627            "BYTEINT": TokenType.INT,
628            "CHAR VARYING": TokenType.VARCHAR,
629            "CHARACTER VARYING": TokenType.VARCHAR,
630            "EXCLUDE": TokenType.EXCEPT,
631            "ILIKE ANY": TokenType.ILIKE_ANY,
632            "LIKE ANY": TokenType.LIKE_ANY,
633            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
634            "MINUS": TokenType.EXCEPT,
635            "NCHAR VARYING": TokenType.VARCHAR,
636            "PUT": TokenType.COMMAND,
637            "REMOVE": TokenType.COMMAND,
638            "RENAME": TokenType.REPLACE,
639            "RM": TokenType.COMMAND,
640            "SAMPLE": TokenType.TABLE_SAMPLE,
641            "SQL_DOUBLE": TokenType.DOUBLE,
642            "SQL_VARCHAR": TokenType.VARCHAR,
643            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
644            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
645            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
646            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
647            "TOP": TokenType.TOP,
648        }
649
650        SINGLE_TOKENS = {
651            **tokens.Tokenizer.SINGLE_TOKENS,
652            "$": TokenType.PARAMETER,
653        }
654
655        VAR_SINGLE_TOKENS = {"$"}
656
657        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
658
659    class Generator(generator.Generator):
660        PARAMETER_TOKEN = "$"
661        MATCHED_BY_SOURCE = False
662        SINGLE_STRING_INTERVAL = True
663        JOIN_HINTS = False
664        TABLE_HINTS = False
665        QUERY_HINTS = False
666        AGGREGATE_FILTER_SUPPORTED = False
667        SUPPORTS_TABLE_COPY = False
668        COLLATE_IS_FUNC = True
669        LIMIT_ONLY_LITERALS = True
670        JSON_KEY_VALUE_PAIR_SEP = ","
671        INSERT_OVERWRITE = " OVERWRITE INTO"
672
673        TRANSFORMS = {
674            **generator.Generator.TRANSFORMS,
675            exp.ArgMax: rename_func("MAX_BY"),
676            exp.ArgMin: rename_func("MIN_BY"),
677            exp.Array: inline_array_sql,
678            exp.ArrayConcat: rename_func("ARRAY_CAT"),
679            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
680            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
681            exp.AtTimeZone: lambda self, e: self.func(
682                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
683            ),
684            exp.BitwiseXor: rename_func("BITXOR"),
685            exp.DateAdd: date_delta_sql("DATEADD"),
686            exp.DateDiff: date_delta_sql("DATEDIFF"),
687            exp.DateStrToDate: datestrtodate_sql,
688            exp.DataType: _datatype_sql,
689            exp.DayOfMonth: rename_func("DAYOFMONTH"),
690            exp.DayOfWeek: rename_func("DAYOFWEEK"),
691            exp.DayOfYear: rename_func("DAYOFYEAR"),
692            exp.Explode: rename_func("FLATTEN"),
693            exp.Extract: rename_func("DATE_PART"),
694            exp.GenerateSeries: lambda self, e: self.func(
695                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
696            ),
697            exp.GroupConcat: rename_func("LISTAGG"),
698            exp.If: if_sql(name="IFF", false_value="NULL"),
699            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
700            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
701            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
702            exp.LogicalOr: rename_func("BOOLOR_AGG"),
703            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
704            exp.Max: max_or_greatest,
705            exp.Min: min_or_least,
706            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
707            exp.PercentileCont: transforms.preprocess(
708                [transforms.add_within_group_for_percentiles]
709            ),
710            exp.PercentileDisc: transforms.preprocess(
711                [transforms.add_within_group_for_percentiles]
712            ),
713            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
714            exp.RegexpILike: _regexpilike_sql,
715            exp.Rand: rename_func("RANDOM"),
716            exp.Select: transforms.preprocess(
717                [
718                    transforms.eliminate_distinct_on,
719                    transforms.explode_to_unnest(),
720                    transforms.eliminate_semi_and_anti_joins,
721                ]
722            ),
723            exp.SHA: rename_func("SHA1"),
724            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
725            exp.StartsWith: rename_func("STARTSWITH"),
726            exp.StrPosition: lambda self, e: self.func(
727                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
728            ),
729            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
730            exp.Struct: lambda self, e: self.func(
731                "OBJECT_CONSTRUCT",
732                *(arg for expression in e.expressions for arg in expression.flatten()),
733            ),
734            exp.Stuff: rename_func("INSERT"),
735            exp.TimestampDiff: lambda self, e: self.func(
736                "TIMESTAMPDIFF", e.unit, e.expression, e.this
737            ),
738            exp.TimestampTrunc: timestamptrunc_sql,
739            exp.TimeStrToTime: timestrtotime_sql,
740            exp.TimeToStr: lambda self, e: self.func(
741                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
742            ),
743            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
744            exp.ToArray: rename_func("TO_ARRAY"),
745            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
746            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
747            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
748            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
749            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
750            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
751            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
752            exp.Xor: rename_func("BOOLXOR"),
753        }
754
755        TYPE_MAPPING = {
756            **generator.Generator.TYPE_MAPPING,
757            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
758        }
759
760        STAR_MAPPING = {
761            "except": "EXCLUDE",
762            "replace": "RENAME",
763        }
764
765        PROPERTIES_LOCATION = {
766            **generator.Generator.PROPERTIES_LOCATION,
767            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
768            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
769        }
770
771        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
772            milli = expression.args.get("milli")
773            if milli is not None:
774                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
775                expression.set("nano", milli_to_nano)
776
777            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
778
779        def trycast_sql(self, expression: exp.TryCast) -> str:
780            value = expression.this
781
782            if value.type is None:
783                from sqlglot.optimizer.annotate_types import annotate_types
784
785                value = annotate_types(value)
786
787            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
788                return super().trycast_sql(expression)
789
790            # TRY_CAST only works for string values in Snowflake
791            return self.cast_sql(expression)
792
793        def log_sql(self, expression: exp.Log) -> str:
794            if not expression.expression:
795                return self.func("LN", expression.this)
796
797            return super().log_sql(expression)
798
799        def unnest_sql(self, expression: exp.Unnest) -> str:
800            unnest_alias = expression.args.get("alias")
801            offset = expression.args.get("offset")
802
803            columns = [
804                exp.to_identifier("seq"),
805                exp.to_identifier("key"),
806                exp.to_identifier("path"),
807                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
808                seq_get(unnest_alias.columns if unnest_alias else [], 0)
809                or exp.to_identifier("value"),
810                exp.to_identifier("this"),
811            ]
812
813            if unnest_alias:
814                unnest_alias.set("columns", columns)
815            else:
816                unnest_alias = exp.TableAlias(this="_u", columns=columns)
817
818            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
819            alias = self.sql(unnest_alias)
820            alias = f" AS {alias}" if alias else ""
821            return f"{explode}{alias}"
822
823        def show_sql(self, expression: exp.Show) -> str:
824            like = self.sql(expression, "like")
825            like = f" LIKE {like}" if like else ""
826
827            scope = self.sql(expression, "scope")
828            scope = f" {scope}" if scope else ""
829
830            scope_kind = self.sql(expression, "scope_kind")
831            if scope_kind:
832                scope_kind = f" IN {scope_kind}"
833
834            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
835
836        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
837            # Other dialects don't support all of the following parameters, so we need to
838            # generate default values as necessary to ensure the transpilation is correct
839            group = expression.args.get("group")
840            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
841            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
842            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
843
844            return self.func(
845                "REGEXP_SUBSTR",
846                expression.this,
847                expression.expression,
848                position,
849                occurrence,
850                parameters,
851                group,
852            )
853
854        def except_op(self, expression: exp.Except) -> str:
855            if not expression.args.get("distinct", False):
856                self.unsupported("EXCEPT with All is not supported in Snowflake")
857            return super().except_op(expression)
858
859        def intersect_op(self, expression: exp.Intersect) -> str:
860            if not expression.args.get("distinct", False):
861                self.unsupported("INTERSECT with All is not supported in Snowflake")
862            return super().intersect_op(expression)
863
864        def describe_sql(self, expression: exp.Describe) -> str:
865            # Default to table if kind is unknown
866            kind_value = expression.args.get("kind") or "TABLE"
867            kind = f" {kind_value}" if kind_value else ""
868            this = f" {self.sql(expression, 'this')}"
869            expressions = self.expressions(expression, flat=True)
870            expressions = f" {expressions}" if expressions else ""
871            return f"DESCRIBE{kind}{this}{expressions}"
872
873        def generatedasidentitycolumnconstraint_sql(
874            self, expression: exp.GeneratedAsIdentityColumnConstraint
875        ) -> str:
876            start = expression.args.get("start")
877            start = f" START {start}" if start else ""
878            increment = expression.args.get("increment")
879            increment = f" INCREMENT {increment}" if increment else ""
880            return f"AUTOINCREMENT{start}{increment}"
881
882        def swaptable_sql(self, expression: exp.SwapTable) -> str:
883            this = self.sql(expression, "this")
884            return f"SWAP WITH {this}"
885
886        def with_properties(self, properties: exp.Properties) -> str:
887            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Indicates the default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Determines whether or not user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Determines whether or not SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Determines whether or not a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime format.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
372    def quote_identifier(self, expression: E, identify: bool = True) -> E:
373        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
374        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
375        if (
376            isinstance(expression, exp.Identifier)
377            and isinstance(expression.parent, exp.Table)
378            and expression.name.lower() == "dual"
379        ):
380            return t.cast(E, expression)
381
382        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
384    class Parser(parser.Parser):
385        IDENTIFY_PIVOT_STRINGS = True
386
387        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
388
389        FUNCTIONS = {
390            **parser.Parser.FUNCTIONS,
391            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
392            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
393            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
394                this=seq_get(args, 1), expression=seq_get(args, 0)
395            ),
396            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
397                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
398                start=seq_get(args, 0),
399                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
400                step=seq_get(args, 2),
401            ),
402            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
403            "BITXOR": binary_from_function(exp.BitwiseXor),
404            "BIT_XOR": binary_from_function(exp.BitwiseXor),
405            "BOOLXOR": binary_from_function(exp.Xor),
406            "CONVERT_TIMEZONE": _parse_convert_timezone,
407            "DATE_TRUNC": _date_trunc_to_time,
408            "DATEADD": lambda args: exp.DateAdd(
409                this=seq_get(args, 2),
410                expression=seq_get(args, 1),
411                unit=_map_date_part(seq_get(args, 0)),
412            ),
413            "DATEDIFF": _parse_datediff,
414            "DIV0": _div0_to_if,
415            "FLATTEN": exp.Explode.from_arg_list,
416            "IFF": exp.If.from_arg_list,
417            "LAST_DAY": lambda args: exp.LastDay(
418                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
419            ),
420            "LISTAGG": exp.GroupConcat.from_arg_list,
421            "NULLIFZERO": _nullifzero_to_if,
422            "OBJECT_CONSTRUCT": _parse_object_construct,
423            "REGEXP_REPLACE": _parse_regexp_replace,
424            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
425            "RLIKE": exp.RegexpLike.from_arg_list,
426            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
427            "TIMEDIFF": _parse_datediff,
428            "TIMESTAMPDIFF": _parse_datediff,
429            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
430            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
431            "TO_TIMESTAMP": _parse_to_timestamp,
432            "TO_VARCHAR": exp.ToChar.from_arg_list,
433            "ZEROIFNULL": _zeroifnull_to_if,
434        }
435
436        FUNCTION_PARSERS = {
437            **parser.Parser.FUNCTION_PARSERS,
438            "DATE_PART": _parse_date_part,
439            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
440        }
441        FUNCTION_PARSERS.pop("TRIM")
442
443        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
444
445        RANGE_PARSERS = {
446            **parser.Parser.RANGE_PARSERS,
447            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
448            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
449            TokenType.COLON: _parse_colon_get_path,
450        }
451
452        ALTER_PARSERS = {
453            **parser.Parser.ALTER_PARSERS,
454            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
455            "UNSET": lambda self: self.expression(
456                exp.Set,
457                tag=self._match_text_seq("TAG"),
458                expressions=self._parse_csv(self._parse_id_var),
459                unset=True,
460            ),
461            "SWAP": lambda self: self._parse_alter_table_swap(),
462        }
463
464        STATEMENT_PARSERS = {
465            **parser.Parser.STATEMENT_PARSERS,
466            TokenType.SHOW: lambda self: self._parse_show(),
467        }
468
469        PROPERTY_PARSERS = {
470            **parser.Parser.PROPERTY_PARSERS,
471            "LOCATION": lambda self: self._parse_location(),
472        }
473
474        SHOW_PARSERS = {
475            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
476            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
477            "COLUMNS": _show_parser("COLUMNS"),
478        }
479
480        STAGED_FILE_SINGLE_TOKENS = {
481            TokenType.DOT,
482            TokenType.MOD,
483            TokenType.SLASH,
484        }
485
486        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
487
488        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
489            if is_map:
490                # Keys are strings in Snowflake's objects, see also:
491                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
492                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
493                return self._parse_slice(self._parse_string())
494
495            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
496
497        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
498            lateral = super()._parse_lateral()
499            if not lateral:
500                return lateral
501
502            if isinstance(lateral.this, exp.Explode):
503                table_alias = lateral.args.get("alias")
504                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
505                if table_alias and not table_alias.args.get("columns"):
506                    table_alias.set("columns", columns)
507                elif not table_alias:
508                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
509
510            return lateral
511
512        def _parse_at_before(self, table: exp.Table) -> exp.Table:
513            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
514            index = self._index
515            if self._match_texts(("AT", "BEFORE")):
516                this = self._prev.text.upper()
517                kind = (
518                    self._match(TokenType.L_PAREN)
519                    and self._match_texts(self.HISTORICAL_DATA_KIND)
520                    and self._prev.text.upper()
521                )
522                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
523
524                if expression:
525                    self._match_r_paren()
526                    when = self.expression(
527                        exp.HistoricalData, this=this, kind=kind, expression=expression
528                    )
529                    table.set("when", when)
530                else:
531                    self._retreat(index)
532
533            return table
534
535        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
536            # https://docs.snowflake.com/en/user-guide/querying-stage
537            if self._match(TokenType.STRING, advance=False):
538                table = self._parse_string()
539            elif self._match_text_seq("@", advance=False):
540                table = self._parse_location_path()
541            else:
542                table = None
543
544            if table:
545                file_format = None
546                pattern = None
547
548                self._match(TokenType.L_PAREN)
549                while self._curr and not self._match(TokenType.R_PAREN):
550                    if self._match_text_seq("FILE_FORMAT", "=>"):
551                        file_format = self._parse_string() or super()._parse_table_parts()
552                    elif self._match_text_seq("PATTERN", "=>"):
553                        pattern = self._parse_string()
554                    else:
555                        break
556
557                    self._match(TokenType.COMMA)
558
559                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
560            else:
561                table = super()._parse_table_parts(schema=schema)
562
563            return self._parse_at_before(table)
564
565        def _parse_id_var(
566            self,
567            any_token: bool = True,
568            tokens: t.Optional[t.Collection[TokenType]] = None,
569        ) -> t.Optional[exp.Expression]:
570            if self._match_text_seq("IDENTIFIER", "("):
571                identifier = (
572                    super()._parse_id_var(any_token=any_token, tokens=tokens)
573                    or self._parse_string()
574                )
575                self._match_r_paren()
576                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
577
578            return super()._parse_id_var(any_token=any_token, tokens=tokens)
579
580        def _parse_show_snowflake(self, this: str) -> exp.Show:
581            scope = None
582            scope_kind = None
583
584            like = self._parse_string() if self._match(TokenType.LIKE) else None
585
586            if self._match(TokenType.IN):
587                if self._match_text_seq("ACCOUNT"):
588                    scope_kind = "ACCOUNT"
589                elif self._match_set(self.DB_CREATABLES):
590                    scope_kind = self._prev.text
591                    if self._curr:
592                        scope = self._parse_table()
593                elif self._curr:
594                    scope_kind = "TABLE"
595                    scope = self._parse_table()
596
597            return self.expression(
598                exp.Show, this=this, like=like, scope=scope, scope_kind=scope_kind
599            )
600
601        def _parse_alter_table_swap(self) -> exp.SwapTable:
602            self._match_text_seq("WITH")
603            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
604
605        def _parse_location(self) -> exp.LocationProperty:
606            self._match(TokenType.EQ)
607            return self.expression(exp.LocationProperty, this=self._parse_location_path())
608
609        def _parse_location_path(self) -> exp.Var:
610            parts = [self._advance_any(ignore_reserved=True)]
611
612            # We avoid consuming a comma token because external tables like @foo and @bar
613            # can be joined in a query with a comma separator.
614            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
615                parts.append(self._advance_any(ignore_reserved=True))
616
617            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
TABLE_ALIAS_TOKENS = {<TokenType.FUNCTION: 'FUNCTION'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.JSON: 'JSON'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.LOAD: 'LOAD'>, <TokenType.SOME: 'SOME'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.INET: 'INET'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.CACHE: 'CACHE'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.UINT128: 'UINT128'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DELETE: 'DELETE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.INT256: 'INT256'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.KILL: 'KILL'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.FINAL: 'FINAL'>, <TokenType.IPV4: 'IPV4'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.BIT: 'BIT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.ANTI: 'ANTI'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ALL: 'ALL'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.IS: 'IS'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.ROW: 'ROW'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.END: 'END'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.TEXT: 'TEXT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.FALSE: 'FALSE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.JSONB: 'JSONB'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.NESTED: 'NESTED'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.VIEW: 'VIEW'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.DATE: 'DATE'>, <TokenType.INT: 'INT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.DATE32: 'DATE32'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TIME: 'TIME'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.MAP: 'MAP'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.DIV: 'DIV'>, <TokenType.MONEY: 'MONEY'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.DESC: 'DESC'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.CHAR: 'CHAR'>, <TokenType.XML: 'XML'>, <TokenType.INT128: 'INT128'>, <TokenType.NULL: 'NULL'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.KEEP: 'KEEP'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.USE: 'USE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.UINT: 'UINT'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.ANY: 'ANY'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ENUM: 'ENUM'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TOP: 'TOP'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.TABLE: 'TABLE'>, <TokenType.UUID: 'UUID'>, <TokenType.IPV6: 'IPV6'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.NEXT: 'NEXT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.SET: 'SET'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.ASC: 'ASC'>, <TokenType.VAR: 'VAR'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.CASE: 'CASE'>, <TokenType.FIRST: 'FIRST'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _parse_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GET_PATH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GetPath'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function parse_logarithm>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _parse_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _parse_convert_timezone>, 'DATEADD': <function Snowflake.Parser.<lambda>>, 'DIV0': <function _div0_to_if>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _nullifzero_to_if>, 'OBJECT_CONSTRUCT': <function _parse_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEDIFF': <function _parse_datediff>, 'TIMESTAMPDIFF': <function _parse_datediff>, 'TO_TIMESTAMP': <function _parse_to_timestamp>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _zeroifnull_to_if>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function _parse_date_part>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.COLON: 'COLON'>: <function _parse_colon_get_path>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.MOD: 'MOD'>, <TokenType.SLASH: 'SLASH'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SHOW_TRIE: Dict = {'PRIMARY': {'KEYS': {0: True}}, 'TERSE': {'PRIMARY': {'KEYS': {0: True}}}, 'COLUMNS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
619    class Tokenizer(tokens.Tokenizer):
620        STRING_ESCAPES = ["\\", "'"]
621        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
622        RAW_STRINGS = ["$$"]
623        COMMENTS = ["--", "//", ("/*", "*/")]
624
625        KEYWORDS = {
626            **tokens.Tokenizer.KEYWORDS,
627            "BYTEINT": TokenType.INT,
628            "CHAR VARYING": TokenType.VARCHAR,
629            "CHARACTER VARYING": TokenType.VARCHAR,
630            "EXCLUDE": TokenType.EXCEPT,
631            "ILIKE ANY": TokenType.ILIKE_ANY,
632            "LIKE ANY": TokenType.LIKE_ANY,
633            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
634            "MINUS": TokenType.EXCEPT,
635            "NCHAR VARYING": TokenType.VARCHAR,
636            "PUT": TokenType.COMMAND,
637            "REMOVE": TokenType.COMMAND,
638            "RENAME": TokenType.REPLACE,
639            "RM": TokenType.COMMAND,
640            "SAMPLE": TokenType.TABLE_SAMPLE,
641            "SQL_DOUBLE": TokenType.DOUBLE,
642            "SQL_VARCHAR": TokenType.VARCHAR,
643            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
644            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
645            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
646            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
647            "TOP": TokenType.TOP,
648        }
649
650        SINGLE_TOKENS = {
651            **tokens.Tokenizer.SINGLE_TOKENS,
652            "$": TokenType.PARAMETER,
653        }
654
655        VAR_SINGLE_TOKENS = {"$"}
656
657        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>}
class Snowflake.Generator(sqlglot.generator.Generator):
659    class Generator(generator.Generator):
660        PARAMETER_TOKEN = "$"
661        MATCHED_BY_SOURCE = False
662        SINGLE_STRING_INTERVAL = True
663        JOIN_HINTS = False
664        TABLE_HINTS = False
665        QUERY_HINTS = False
666        AGGREGATE_FILTER_SUPPORTED = False
667        SUPPORTS_TABLE_COPY = False
668        COLLATE_IS_FUNC = True
669        LIMIT_ONLY_LITERALS = True
670        JSON_KEY_VALUE_PAIR_SEP = ","
671        INSERT_OVERWRITE = " OVERWRITE INTO"
672
673        TRANSFORMS = {
674            **generator.Generator.TRANSFORMS,
675            exp.ArgMax: rename_func("MAX_BY"),
676            exp.ArgMin: rename_func("MIN_BY"),
677            exp.Array: inline_array_sql,
678            exp.ArrayConcat: rename_func("ARRAY_CAT"),
679            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
680            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
681            exp.AtTimeZone: lambda self, e: self.func(
682                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
683            ),
684            exp.BitwiseXor: rename_func("BITXOR"),
685            exp.DateAdd: date_delta_sql("DATEADD"),
686            exp.DateDiff: date_delta_sql("DATEDIFF"),
687            exp.DateStrToDate: datestrtodate_sql,
688            exp.DataType: _datatype_sql,
689            exp.DayOfMonth: rename_func("DAYOFMONTH"),
690            exp.DayOfWeek: rename_func("DAYOFWEEK"),
691            exp.DayOfYear: rename_func("DAYOFYEAR"),
692            exp.Explode: rename_func("FLATTEN"),
693            exp.Extract: rename_func("DATE_PART"),
694            exp.GenerateSeries: lambda self, e: self.func(
695                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
696            ),
697            exp.GroupConcat: rename_func("LISTAGG"),
698            exp.If: if_sql(name="IFF", false_value="NULL"),
699            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
700            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
701            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
702            exp.LogicalOr: rename_func("BOOLOR_AGG"),
703            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
704            exp.Max: max_or_greatest,
705            exp.Min: min_or_least,
706            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
707            exp.PercentileCont: transforms.preprocess(
708                [transforms.add_within_group_for_percentiles]
709            ),
710            exp.PercentileDisc: transforms.preprocess(
711                [transforms.add_within_group_for_percentiles]
712            ),
713            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
714            exp.RegexpILike: _regexpilike_sql,
715            exp.Rand: rename_func("RANDOM"),
716            exp.Select: transforms.preprocess(
717                [
718                    transforms.eliminate_distinct_on,
719                    transforms.explode_to_unnest(),
720                    transforms.eliminate_semi_and_anti_joins,
721                ]
722            ),
723            exp.SHA: rename_func("SHA1"),
724            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
725            exp.StartsWith: rename_func("STARTSWITH"),
726            exp.StrPosition: lambda self, e: self.func(
727                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
728            ),
729            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
730            exp.Struct: lambda self, e: self.func(
731                "OBJECT_CONSTRUCT",
732                *(arg for expression in e.expressions for arg in expression.flatten()),
733            ),
734            exp.Stuff: rename_func("INSERT"),
735            exp.TimestampDiff: lambda self, e: self.func(
736                "TIMESTAMPDIFF", e.unit, e.expression, e.this
737            ),
738            exp.TimestampTrunc: timestamptrunc_sql,
739            exp.TimeStrToTime: timestrtotime_sql,
740            exp.TimeToStr: lambda self, e: self.func(
741                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
742            ),
743            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
744            exp.ToArray: rename_func("TO_ARRAY"),
745            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
746            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
747            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
748            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
749            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
750            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
751            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
752            exp.Xor: rename_func("BOOLXOR"),
753        }
754
755        TYPE_MAPPING = {
756            **generator.Generator.TYPE_MAPPING,
757            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
758        }
759
760        STAR_MAPPING = {
761            "except": "EXCLUDE",
762            "replace": "RENAME",
763        }
764
765        PROPERTIES_LOCATION = {
766            **generator.Generator.PROPERTIES_LOCATION,
767            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
768            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
769        }
770
771        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
772            milli = expression.args.get("milli")
773            if milli is not None:
774                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
775                expression.set("nano", milli_to_nano)
776
777            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
778
779        def trycast_sql(self, expression: exp.TryCast) -> str:
780            value = expression.this
781
782            if value.type is None:
783                from sqlglot.optimizer.annotate_types import annotate_types
784
785                value = annotate_types(value)
786
787            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
788                return super().trycast_sql(expression)
789
790            # TRY_CAST only works for string values in Snowflake
791            return self.cast_sql(expression)
792
793        def log_sql(self, expression: exp.Log) -> str:
794            if not expression.expression:
795                return self.func("LN", expression.this)
796
797            return super().log_sql(expression)
798
799        def unnest_sql(self, expression: exp.Unnest) -> str:
800            unnest_alias = expression.args.get("alias")
801            offset = expression.args.get("offset")
802
803            columns = [
804                exp.to_identifier("seq"),
805                exp.to_identifier("key"),
806                exp.to_identifier("path"),
807                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
808                seq_get(unnest_alias.columns if unnest_alias else [], 0)
809                or exp.to_identifier("value"),
810                exp.to_identifier("this"),
811            ]
812
813            if unnest_alias:
814                unnest_alias.set("columns", columns)
815            else:
816                unnest_alias = exp.TableAlias(this="_u", columns=columns)
817
818            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
819            alias = self.sql(unnest_alias)
820            alias = f" AS {alias}" if alias else ""
821            return f"{explode}{alias}"
822
823        def show_sql(self, expression: exp.Show) -> str:
824            like = self.sql(expression, "like")
825            like = f" LIKE {like}" if like else ""
826
827            scope = self.sql(expression, "scope")
828            scope = f" {scope}" if scope else ""
829
830            scope_kind = self.sql(expression, "scope_kind")
831            if scope_kind:
832                scope_kind = f" IN {scope_kind}"
833
834            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
835
836        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
837            # Other dialects don't support all of the following parameters, so we need to
838            # generate default values as necessary to ensure the transpilation is correct
839            group = expression.args.get("group")
840            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
841            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
842            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
843
844            return self.func(
845                "REGEXP_SUBSTR",
846                expression.this,
847                expression.expression,
848                position,
849                occurrence,
850                parameters,
851                group,
852            )
853
854        def except_op(self, expression: exp.Except) -> str:
855            if not expression.args.get("distinct", False):
856                self.unsupported("EXCEPT with All is not supported in Snowflake")
857            return super().except_op(expression)
858
859        def intersect_op(self, expression: exp.Intersect) -> str:
860            if not expression.args.get("distinct", False):
861                self.unsupported("INTERSECT with All is not supported in Snowflake")
862            return super().intersect_op(expression)
863
864        def describe_sql(self, expression: exp.Describe) -> str:
865            # Default to table if kind is unknown
866            kind_value = expression.args.get("kind") or "TABLE"
867            kind = f" {kind_value}" if kind_value else ""
868            this = f" {self.sql(expression, 'this')}"
869            expressions = self.expressions(expression, flat=True)
870            expressions = f" {expressions}" if expressions else ""
871            return f"DESCRIBE{kind}{this}{expressions}"
872
873        def generatedasidentitycolumnconstraint_sql(
874            self, expression: exp.GeneratedAsIdentityColumnConstraint
875        ) -> str:
876            start = expression.args.get("start")
877            start = f" START {start}" if start else ""
878            increment = expression.args.get("increment")
879            increment = f" INCREMENT {increment}" if increment else ""
880            return f"AUTOINCREMENT{start}{increment}"
881
882        def swaptable_sql(self, expression: exp.SwapTable) -> str:
883            this = self.sql(expression, "this")
884            return f"SWAP WITH {this}"
885
886        def with_properties(self, properties: exp.Properties) -> str:
887            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayJoin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DataType'>: <function _datatype_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Struct'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
771        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
772            milli = expression.args.get("milli")
773            if milli is not None:
774                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
775                expression.set("nano", milli_to_nano)
776
777            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
779        def trycast_sql(self, expression: exp.TryCast) -> str:
780            value = expression.this
781
782            if value.type is None:
783                from sqlglot.optimizer.annotate_types import annotate_types
784
785                value = annotate_types(value)
786
787            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
788                return super().trycast_sql(expression)
789
790            # TRY_CAST only works for string values in Snowflake
791            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
793        def log_sql(self, expression: exp.Log) -> str:
794            if not expression.expression:
795                return self.func("LN", expression.this)
796
797            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
799        def unnest_sql(self, expression: exp.Unnest) -> str:
800            unnest_alias = expression.args.get("alias")
801            offset = expression.args.get("offset")
802
803            columns = [
804                exp.to_identifier("seq"),
805                exp.to_identifier("key"),
806                exp.to_identifier("path"),
807                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
808                seq_get(unnest_alias.columns if unnest_alias else [], 0)
809                or exp.to_identifier("value"),
810                exp.to_identifier("this"),
811            ]
812
813            if unnest_alias:
814                unnest_alias.set("columns", columns)
815            else:
816                unnest_alias = exp.TableAlias(this="_u", columns=columns)
817
818            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
819            alias = self.sql(unnest_alias)
820            alias = f" AS {alias}" if alias else ""
821            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
823        def show_sql(self, expression: exp.Show) -> str:
824            like = self.sql(expression, "like")
825            like = f" LIKE {like}" if like else ""
826
827            scope = self.sql(expression, "scope")
828            scope = f" {scope}" if scope else ""
829
830            scope_kind = self.sql(expression, "scope_kind")
831            if scope_kind:
832                scope_kind = f" IN {scope_kind}"
833
834            return f"SHOW {expression.name}{like}{scope_kind}{scope}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
836        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
837            # Other dialects don't support all of the following parameters, so we need to
838            # generate default values as necessary to ensure the transpilation is correct
839            group = expression.args.get("group")
840            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
841            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
842            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
843
844            return self.func(
845                "REGEXP_SUBSTR",
846                expression.this,
847                expression.expression,
848                position,
849                occurrence,
850                parameters,
851                group,
852            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
854        def except_op(self, expression: exp.Except) -> str:
855            if not expression.args.get("distinct", False):
856                self.unsupported("EXCEPT with All is not supported in Snowflake")
857            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
859        def intersect_op(self, expression: exp.Intersect) -> str:
860            if not expression.args.get("distinct", False):
861                self.unsupported("INTERSECT with All is not supported in Snowflake")
862            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
864        def describe_sql(self, expression: exp.Describe) -> str:
865            # Default to table if kind is unknown
866            kind_value = expression.args.get("kind") or "TABLE"
867            kind = f" {kind_value}" if kind_value else ""
868            this = f" {self.sql(expression, 'this')}"
869            expressions = self.expressions(expression, flat=True)
870            expressions = f" {expressions}" if expressions else ""
871            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
873        def generatedasidentitycolumnconstraint_sql(
874            self, expression: exp.GeneratedAsIdentityColumnConstraint
875        ) -> str:
876            start = expression.args.get("start")
877            start = f" START {start}" if start else ""
878            increment = expression.args.get("increment")
879            increment = f" INCREMENT {increment}" if increment else ""
880            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
882        def swaptable_sql(self, expression: exp.SwapTable) -> str:
883            this = self.sql(expression, "this")
884            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
886        def with_properties(self, properties: exp.Properties) -> str:
887            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
SELECT_KINDS: Tuple[str, ...] = ()
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
EXPRESSIONS_WITHOUT_NESTED_CTES
KEY_VALUE_DEFINITIONS
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql