Edit on GitHub

sqlglot.dialects.snowflake

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot._typing import E
  7from sqlglot.dialects.dialect import (
  8    Dialect,
  9    NormalizationStrategy,
 10    binary_from_function,
 11    date_delta_sql,
 12    date_trunc_to_time,
 13    datestrtodate_sql,
 14    format_time_lambda,
 15    if_sql,
 16    inline_array_sql,
 17    json_keyvalue_comma_sql,
 18    max_or_greatest,
 19    min_or_least,
 20    rename_func,
 21    timestamptrunc_sql,
 22    timestrtotime_sql,
 23    var_map_sql,
 24)
 25from sqlglot.expressions import Literal
 26from sqlglot.helper import seq_get
 27from sqlglot.tokens import TokenType
 28
 29
 30def _check_int(s: str) -> bool:
 31    if s[0] in ("-", "+"):
 32        return s[1:].isdigit()
 33    return s.isdigit()
 34
 35
 36# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
 37def _parse_to_timestamp(args: t.List) -> t.Union[exp.StrToTime, exp.UnixToTime, exp.TimeStrToTime]:
 38    if len(args) == 2:
 39        first_arg, second_arg = args
 40        if second_arg.is_string:
 41            # case: <string_expr> [ , <format> ]
 42            return format_time_lambda(exp.StrToTime, "snowflake")(args)
 43
 44        # case: <numeric_expr> [ , <scale> ]
 45        if second_arg.name not in ["0", "3", "9"]:
 46            raise ValueError(
 47                f"Scale for snowflake numeric timestamp is {second_arg}, but should be 0, 3, or 9"
 48            )
 49
 50        if second_arg.name == "0":
 51            timescale = exp.UnixToTime.SECONDS
 52        elif second_arg.name == "3":
 53            timescale = exp.UnixToTime.MILLIS
 54        elif second_arg.name == "9":
 55            timescale = exp.UnixToTime.NANOS
 56
 57        return exp.UnixToTime(this=first_arg, scale=timescale)
 58
 59    from sqlglot.optimizer.simplify import simplify_literals
 60
 61    # The first argument might be an expression like 40 * 365 * 86400, so we try to
 62    # reduce it using `simplify_literals` first and then check if it's a Literal.
 63    first_arg = seq_get(args, 0)
 64    if not isinstance(simplify_literals(first_arg, root=True), Literal):
 65        # case: <variant_expr> or other expressions such as columns
 66        return exp.TimeStrToTime.from_arg_list(args)
 67
 68    if first_arg.is_string:
 69        if _check_int(first_arg.this):
 70            # case: <integer>
 71            return exp.UnixToTime.from_arg_list(args)
 72
 73        # case: <date_expr>
 74        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
 75
 76    # case: <numeric_expr>
 77    return exp.UnixToTime.from_arg_list(args)
 78
 79
 80def _parse_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
 81    expression = parser.parse_var_map(args)
 82
 83    if isinstance(expression, exp.StarMap):
 84        return expression
 85
 86    return exp.Struct(
 87        expressions=[
 88            t.cast(exp.Condition, k).eq(v) for k, v in zip(expression.keys, expression.values)
 89        ]
 90    )
 91
 92
 93def _parse_datediff(args: t.List) -> exp.DateDiff:
 94    return exp.DateDiff(
 95        this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0))
 96    )
 97
 98
 99def _unix_to_time_sql(self: Snowflake.Generator, expression: exp.UnixToTime) -> str:
100    scale = expression.args.get("scale")
101    timestamp = self.sql(expression, "this")
102    if scale in (None, exp.UnixToTime.SECONDS):
103        return f"TO_TIMESTAMP({timestamp})"
104    if scale == exp.UnixToTime.MILLIS:
105        return f"TO_TIMESTAMP({timestamp}, 3)"
106    if scale == exp.UnixToTime.MICROS:
107        return f"TO_TIMESTAMP({timestamp} / 1000, 3)"
108    if scale == exp.UnixToTime.NANOS:
109        return f"TO_TIMESTAMP({timestamp}, 9)"
110
111    self.unsupported(f"Unsupported scale for timestamp: {scale}.")
112    return ""
113
114
115# https://docs.snowflake.com/en/sql-reference/functions/date_part.html
116# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
117def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
118    this = self._parse_var() or self._parse_type()
119
120    if not this:
121        return None
122
123    self._match(TokenType.COMMA)
124    expression = self._parse_bitwise()
125    this = _map_date_part(this)
126    name = this.name.upper()
127
128    if name.startswith("EPOCH"):
129        if name == "EPOCH_MILLISECOND":
130            scale = 10**3
131        elif name == "EPOCH_MICROSECOND":
132            scale = 10**6
133        elif name == "EPOCH_NANOSECOND":
134            scale = 10**9
135        else:
136            scale = None
137
138        ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
139        to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
140
141        if scale:
142            to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
143
144        return to_unix
145
146    return self.expression(exp.Extract, this=this, expression=expression)
147
148
149# https://docs.snowflake.com/en/sql-reference/functions/div0
150def _div0_to_if(args: t.List) -> exp.If:
151    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
152    true = exp.Literal.number(0)
153    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
154    return exp.If(this=cond, true=true, false=false)
155
156
157# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
158def _zeroifnull_to_if(args: t.List) -> exp.If:
159    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
160    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
161
162
163# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
164def _nullifzero_to_if(args: t.List) -> exp.If:
165    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
166    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
167
168
169def _datatype_sql(self: Snowflake.Generator, expression: exp.DataType) -> str:
170    if expression.is_type("array"):
171        return "ARRAY"
172    elif expression.is_type("map"):
173        return "OBJECT"
174    return self.datatype_sql(expression)
175
176
177def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
178    flag = expression.text("flag")
179
180    if "i" not in flag:
181        flag += "i"
182
183    return self.func(
184        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
185    )
186
187
188def _parse_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]:
189    if len(args) == 3:
190        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
191    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
192
193
194def _parse_regexp_replace(args: t.List) -> exp.RegexpReplace:
195    regexp_replace = exp.RegexpReplace.from_arg_list(args)
196
197    if not regexp_replace.args.get("replacement"):
198        regexp_replace.set("replacement", exp.Literal.string(""))
199
200    return regexp_replace
201
202
203def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
204    def _parse(self: Snowflake.Parser) -> exp.Show:
205        return self._parse_show_snowflake(*args, **kwargs)
206
207    return _parse
208
209
210DATE_PART_MAPPING = {
211    "Y": "YEAR",
212    "YY": "YEAR",
213    "YYY": "YEAR",
214    "YYYY": "YEAR",
215    "YR": "YEAR",
216    "YEARS": "YEAR",
217    "YRS": "YEAR",
218    "MM": "MONTH",
219    "MON": "MONTH",
220    "MONS": "MONTH",
221    "MONTHS": "MONTH",
222    "D": "DAY",
223    "DD": "DAY",
224    "DAYS": "DAY",
225    "DAYOFMONTH": "DAY",
226    "WEEKDAY": "DAYOFWEEK",
227    "DOW": "DAYOFWEEK",
228    "DW": "DAYOFWEEK",
229    "WEEKDAY_ISO": "DAYOFWEEKISO",
230    "DOW_ISO": "DAYOFWEEKISO",
231    "DW_ISO": "DAYOFWEEKISO",
232    "YEARDAY": "DAYOFYEAR",
233    "DOY": "DAYOFYEAR",
234    "DY": "DAYOFYEAR",
235    "W": "WEEK",
236    "WK": "WEEK",
237    "WEEKOFYEAR": "WEEK",
238    "WOY": "WEEK",
239    "WY": "WEEK",
240    "WEEK_ISO": "WEEKISO",
241    "WEEKOFYEARISO": "WEEKISO",
242    "WEEKOFYEAR_ISO": "WEEKISO",
243    "Q": "QUARTER",
244    "QTR": "QUARTER",
245    "QTRS": "QUARTER",
246    "QUARTERS": "QUARTER",
247    "H": "HOUR",
248    "HH": "HOUR",
249    "HR": "HOUR",
250    "HOURS": "HOUR",
251    "HRS": "HOUR",
252    "M": "MINUTE",
253    "MI": "MINUTE",
254    "MIN": "MINUTE",
255    "MINUTES": "MINUTE",
256    "MINS": "MINUTE",
257    "S": "SECOND",
258    "SEC": "SECOND",
259    "SECONDS": "SECOND",
260    "SECS": "SECOND",
261    "MS": "MILLISECOND",
262    "MSEC": "MILLISECOND",
263    "MILLISECONDS": "MILLISECOND",
264    "US": "MICROSECOND",
265    "USEC": "MICROSECOND",
266    "MICROSECONDS": "MICROSECOND",
267    "NS": "NANOSECOND",
268    "NSEC": "NANOSECOND",
269    "NANOSEC": "NANOSECOND",
270    "NSECOND": "NANOSECOND",
271    "NSECONDS": "NANOSECOND",
272    "NANOSECS": "NANOSECOND",
273    "NSECONDS": "NANOSECOND",
274    "EPOCH": "EPOCH_SECOND",
275    "EPOCH_SECONDS": "EPOCH_SECOND",
276    "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND",
277    "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND",
278    "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND",
279    "TZH": "TIMEZONE_HOUR",
280    "TZM": "TIMEZONE_MINUTE",
281}
282
283
284@t.overload
285def _map_date_part(part: exp.Expression) -> exp.Var:
286    pass
287
288
289@t.overload
290def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
291    pass
292
293
294def _map_date_part(part):
295    mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None
296    return exp.var(mapped) if mapped else part
297
298
299def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
300    trunc = date_trunc_to_time(args)
301    trunc.set("unit", _map_date_part(trunc.args["unit"]))
302    return trunc
303
304
305def _parse_colon_get_path(
306    self: parser.Parser, this: t.Optional[exp.Expression]
307) -> t.Optional[exp.Expression]:
308    while True:
309        path = self._parse_bitwise()
310
311        # The cast :: operator has a lower precedence than the extraction operator :, so
312        # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH
313        if isinstance(path, exp.Cast):
314            target_type = path.to
315            path = path.this
316        else:
317            target_type = None
318
319        if isinstance(path, exp.Expression):
320            path = exp.Literal.string(path.sql(dialect="snowflake"))
321
322        # The extraction operator : is left-associative
323        this = self.expression(exp.GetPath, this=this, expression=path)
324
325        if target_type:
326            this = exp.cast(this, target_type)
327
328        if not self._match(TokenType.COLON):
329            break
330
331    return this
332
333
334def _parse_timestamp_from_parts(args: t.List) -> exp.Func:
335    if len(args) == 2:
336        # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept,
337        # so we parse this into Anonymous for now instead of introducing complexity
338        return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args)
339
340    return exp.TimestampFromParts.from_arg_list(args)
341
342
343class Snowflake(Dialect):
344    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
345    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
346    NULL_ORDERING = "nulls_are_large"
347    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
348    SUPPORTS_USER_DEFINED_TYPES = False
349    SUPPORTS_SEMI_ANTI_JOIN = False
350    PREFER_CTE_ALIAS_COLUMN = True
351    TABLESAMPLE_SIZE_IS_PERCENT = True
352
353    TIME_MAPPING = {
354        "YYYY": "%Y",
355        "yyyy": "%Y",
356        "YY": "%y",
357        "yy": "%y",
358        "MMMM": "%B",
359        "mmmm": "%B",
360        "MON": "%b",
361        "mon": "%b",
362        "MM": "%m",
363        "mm": "%m",
364        "DD": "%d",
365        "dd": "%-d",
366        "DY": "%a",
367        "dy": "%w",
368        "HH24": "%H",
369        "hh24": "%H",
370        "HH12": "%I",
371        "hh12": "%I",
372        "MI": "%M",
373        "mi": "%M",
374        "SS": "%S",
375        "ss": "%S",
376        "FF": "%f",
377        "ff": "%f",
378        "FF6": "%f",
379        "ff6": "%f",
380    }
381
382    def quote_identifier(self, expression: E, identify: bool = True) -> E:
383        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
384        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
385        if (
386            isinstance(expression, exp.Identifier)
387            and isinstance(expression.parent, exp.Table)
388            and expression.name.lower() == "dual"
389        ):
390            return t.cast(E, expression)
391
392        return super().quote_identifier(expression, identify=identify)
393
394    class Parser(parser.Parser):
395        IDENTIFY_PIVOT_STRINGS = True
396
397        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
398
399        FUNCTIONS = {
400            **parser.Parser.FUNCTIONS,
401            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
402            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
403            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
404                this=seq_get(args, 1), expression=seq_get(args, 0)
405            ),
406            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
407                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
408                start=seq_get(args, 0),
409                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
410                step=seq_get(args, 2),
411            ),
412            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
413            "BITXOR": binary_from_function(exp.BitwiseXor),
414            "BIT_XOR": binary_from_function(exp.BitwiseXor),
415            "BOOLXOR": binary_from_function(exp.Xor),
416            "CONVERT_TIMEZONE": _parse_convert_timezone,
417            "DATE_TRUNC": _date_trunc_to_time,
418            "DATEADD": lambda args: exp.DateAdd(
419                this=seq_get(args, 2),
420                expression=seq_get(args, 1),
421                unit=_map_date_part(seq_get(args, 0)),
422            ),
423            "DATEDIFF": _parse_datediff,
424            "DIV0": _div0_to_if,
425            "FLATTEN": exp.Explode.from_arg_list,
426            "IFF": exp.If.from_arg_list,
427            "LAST_DAY": lambda args: exp.LastDay(
428                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
429            ),
430            "LISTAGG": exp.GroupConcat.from_arg_list,
431            "NULLIFZERO": _nullifzero_to_if,
432            "OBJECT_CONSTRUCT": _parse_object_construct,
433            "REGEXP_REPLACE": _parse_regexp_replace,
434            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
435            "RLIKE": exp.RegexpLike.from_arg_list,
436            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
437            "TIMEDIFF": _parse_datediff,
438            "TIMESTAMPDIFF": _parse_datediff,
439            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
440            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
441            "TO_TIMESTAMP": _parse_to_timestamp,
442            "TO_VARCHAR": exp.ToChar.from_arg_list,
443            "ZEROIFNULL": _zeroifnull_to_if,
444        }
445
446        FUNCTION_PARSERS = {
447            **parser.Parser.FUNCTION_PARSERS,
448            "DATE_PART": _parse_date_part,
449            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
450        }
451        FUNCTION_PARSERS.pop("TRIM")
452
453        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
454
455        RANGE_PARSERS = {
456            **parser.Parser.RANGE_PARSERS,
457            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
458            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
459            TokenType.COLON: _parse_colon_get_path,
460        }
461
462        ALTER_PARSERS = {
463            **parser.Parser.ALTER_PARSERS,
464            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
465            "UNSET": lambda self: self.expression(
466                exp.Set,
467                tag=self._match_text_seq("TAG"),
468                expressions=self._parse_csv(self._parse_id_var),
469                unset=True,
470            ),
471            "SWAP": lambda self: self._parse_alter_table_swap(),
472        }
473
474        STATEMENT_PARSERS = {
475            **parser.Parser.STATEMENT_PARSERS,
476            TokenType.SHOW: lambda self: self._parse_show(),
477        }
478
479        PROPERTY_PARSERS = {
480            **parser.Parser.PROPERTY_PARSERS,
481            "LOCATION": lambda self: self._parse_location(),
482        }
483
484        SHOW_PARSERS = {
485            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
486            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
487        }
488
489        STAGED_FILE_SINGLE_TOKENS = {
490            TokenType.DOT,
491            TokenType.MOD,
492            TokenType.SLASH,
493        }
494
495        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
496
497        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
498            if is_map:
499                # Keys are strings in Snowflake's objects, see also:
500                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
501                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
502                return self._parse_slice(self._parse_string())
503
504            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
505
506        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
507            lateral = super()._parse_lateral()
508            if not lateral:
509                return lateral
510
511            if isinstance(lateral.this, exp.Explode):
512                table_alias = lateral.args.get("alias")
513                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
514                if table_alias and not table_alias.args.get("columns"):
515                    table_alias.set("columns", columns)
516                elif not table_alias:
517                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
518
519            return lateral
520
521        def _parse_at_before(self, table: exp.Table) -> exp.Table:
522            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
523            index = self._index
524            if self._match_texts(("AT", "BEFORE")):
525                this = self._prev.text.upper()
526                kind = (
527                    self._match(TokenType.L_PAREN)
528                    and self._match_texts(self.HISTORICAL_DATA_KIND)
529                    and self._prev.text.upper()
530                )
531                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
532
533                if expression:
534                    self._match_r_paren()
535                    when = self.expression(
536                        exp.HistoricalData, this=this, kind=kind, expression=expression
537                    )
538                    table.set("when", when)
539                else:
540                    self._retreat(index)
541
542            return table
543
544        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
545            # https://docs.snowflake.com/en/user-guide/querying-stage
546            if self._match(TokenType.STRING, advance=False):
547                table = self._parse_string()
548            elif self._match_text_seq("@", advance=False):
549                table = self._parse_location_path()
550            else:
551                table = None
552
553            if table:
554                file_format = None
555                pattern = None
556
557                self._match(TokenType.L_PAREN)
558                while self._curr and not self._match(TokenType.R_PAREN):
559                    if self._match_text_seq("FILE_FORMAT", "=>"):
560                        file_format = self._parse_string() or super()._parse_table_parts()
561                    elif self._match_text_seq("PATTERN", "=>"):
562                        pattern = self._parse_string()
563                    else:
564                        break
565
566                    self._match(TokenType.COMMA)
567
568                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
569            else:
570                table = super()._parse_table_parts(schema=schema)
571
572            return self._parse_at_before(table)
573
574        def _parse_id_var(
575            self,
576            any_token: bool = True,
577            tokens: t.Optional[t.Collection[TokenType]] = None,
578        ) -> t.Optional[exp.Expression]:
579            if self._match_text_seq("IDENTIFIER", "("):
580                identifier = (
581                    super()._parse_id_var(any_token=any_token, tokens=tokens)
582                    or self._parse_string()
583                )
584                self._match_r_paren()
585                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
586
587            return super()._parse_id_var(any_token=any_token, tokens=tokens)
588
589        def _parse_show_snowflake(self, this: str) -> exp.Show:
590            scope = None
591            scope_kind = None
592
593            if self._match(TokenType.IN):
594                if self._match_text_seq("ACCOUNT"):
595                    scope_kind = "ACCOUNT"
596                elif self._match_set(self.DB_CREATABLES):
597                    scope_kind = self._prev.text
598                    if self._curr:
599                        scope = self._parse_table()
600                elif self._curr:
601                    scope_kind = "TABLE"
602                    scope = self._parse_table()
603
604            return self.expression(exp.Show, this=this, scope=scope, scope_kind=scope_kind)
605
606        def _parse_alter_table_swap(self) -> exp.SwapTable:
607            self._match_text_seq("WITH")
608            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
609
610        def _parse_location(self) -> exp.LocationProperty:
611            self._match(TokenType.EQ)
612            return self.expression(exp.LocationProperty, this=self._parse_location_path())
613
614        def _parse_location_path(self) -> exp.Var:
615            parts = [self._advance_any(ignore_reserved=True)]
616
617            # We avoid consuming a comma token because external tables like @foo and @bar
618            # can be joined in a query with a comma separator.
619            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
620                parts.append(self._advance_any(ignore_reserved=True))
621
622            return exp.var("".join(part.text for part in parts if part))
623
624    class Tokenizer(tokens.Tokenizer):
625        STRING_ESCAPES = ["\\", "'"]
626        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
627        RAW_STRINGS = ["$$"]
628        COMMENTS = ["--", "//", ("/*", "*/")]
629
630        KEYWORDS = {
631            **tokens.Tokenizer.KEYWORDS,
632            "BYTEINT": TokenType.INT,
633            "CHAR VARYING": TokenType.VARCHAR,
634            "CHARACTER VARYING": TokenType.VARCHAR,
635            "EXCLUDE": TokenType.EXCEPT,
636            "ILIKE ANY": TokenType.ILIKE_ANY,
637            "LIKE ANY": TokenType.LIKE_ANY,
638            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
639            "MINUS": TokenType.EXCEPT,
640            "NCHAR VARYING": TokenType.VARCHAR,
641            "PUT": TokenType.COMMAND,
642            "RENAME": TokenType.REPLACE,
643            "SAMPLE": TokenType.TABLE_SAMPLE,
644            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
645            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
646            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
647            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
648            "TOP": TokenType.TOP,
649        }
650
651        SINGLE_TOKENS = {
652            **tokens.Tokenizer.SINGLE_TOKENS,
653            "$": TokenType.PARAMETER,
654        }
655
656        VAR_SINGLE_TOKENS = {"$"}
657
658        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
659
660    class Generator(generator.Generator):
661        PARAMETER_TOKEN = "$"
662        MATCHED_BY_SOURCE = False
663        SINGLE_STRING_INTERVAL = True
664        JOIN_HINTS = False
665        TABLE_HINTS = False
666        QUERY_HINTS = False
667        AGGREGATE_FILTER_SUPPORTED = False
668        SUPPORTS_TABLE_COPY = False
669        COLLATE_IS_FUNC = True
670        LIMIT_ONLY_LITERALS = True
671
672        TRANSFORMS = {
673            **generator.Generator.TRANSFORMS,
674            exp.ArgMax: rename_func("MAX_BY"),
675            exp.ArgMin: rename_func("MIN_BY"),
676            exp.Array: inline_array_sql,
677            exp.ArrayConcat: rename_func("ARRAY_CAT"),
678            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
679            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
680            exp.AtTimeZone: lambda self, e: self.func(
681                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
682            ),
683            exp.BitwiseXor: rename_func("BITXOR"),
684            exp.DateAdd: date_delta_sql("DATEADD"),
685            exp.DateDiff: date_delta_sql("DATEDIFF"),
686            exp.DateStrToDate: datestrtodate_sql,
687            exp.DataType: _datatype_sql,
688            exp.DayOfMonth: rename_func("DAYOFMONTH"),
689            exp.DayOfWeek: rename_func("DAYOFWEEK"),
690            exp.DayOfYear: rename_func("DAYOFYEAR"),
691            exp.Explode: rename_func("FLATTEN"),
692            exp.Extract: rename_func("DATE_PART"),
693            exp.GenerateSeries: lambda self, e: self.func(
694                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
695            ),
696            exp.GroupConcat: rename_func("LISTAGG"),
697            exp.If: if_sql(name="IFF", false_value="NULL"),
698            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
699            exp.JSONKeyValue: json_keyvalue_comma_sql,
700            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
701            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
702            exp.LogicalOr: rename_func("BOOLOR_AGG"),
703            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
704            exp.Max: max_or_greatest,
705            exp.Min: min_or_least,
706            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
707            exp.PercentileCont: transforms.preprocess(
708                [transforms.add_within_group_for_percentiles]
709            ),
710            exp.PercentileDisc: transforms.preprocess(
711                [transforms.add_within_group_for_percentiles]
712            ),
713            exp.RegexpILike: _regexpilike_sql,
714            exp.Rand: rename_func("RANDOM"),
715            exp.Select: transforms.preprocess(
716                [
717                    transforms.eliminate_distinct_on,
718                    transforms.explode_to_unnest(),
719                    transforms.eliminate_semi_and_anti_joins,
720                ]
721            ),
722            exp.SHA: rename_func("SHA1"),
723            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
724            exp.StartsWith: rename_func("STARTSWITH"),
725            exp.StrPosition: lambda self, e: self.func(
726                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
727            ),
728            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
729            exp.Struct: lambda self, e: self.func(
730                "OBJECT_CONSTRUCT",
731                *(arg for expression in e.expressions for arg in expression.flatten()),
732            ),
733            exp.Stuff: rename_func("INSERT"),
734            exp.TimestampDiff: lambda self, e: self.func(
735                "TIMESTAMPDIFF", e.unit, e.expression, e.this
736            ),
737            exp.TimestampTrunc: timestamptrunc_sql,
738            exp.TimeStrToTime: timestrtotime_sql,
739            exp.TimeToStr: lambda self, e: self.func(
740                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
741            ),
742            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
743            exp.ToArray: rename_func("TO_ARRAY"),
744            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
745            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
746            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
747            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
748            exp.UnixToTime: _unix_to_time_sql,
749            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
750            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
751            exp.Xor: rename_func("BOOLXOR"),
752        }
753
754        TYPE_MAPPING = {
755            **generator.Generator.TYPE_MAPPING,
756            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
757        }
758
759        STAR_MAPPING = {
760            "except": "EXCLUDE",
761            "replace": "RENAME",
762        }
763
764        PROPERTIES_LOCATION = {
765            **generator.Generator.PROPERTIES_LOCATION,
766            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
767            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
768        }
769
770        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
771            milli = expression.args.get("milli")
772            if milli is not None:
773                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
774                expression.set("nano", milli_to_nano)
775
776            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
777
778        def trycast_sql(self, expression: exp.TryCast) -> str:
779            value = expression.this
780
781            if value.type is None:
782                from sqlglot.optimizer.annotate_types import annotate_types
783
784                value = annotate_types(value)
785
786            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
787                return super().trycast_sql(expression)
788
789            # TRY_CAST only works for string values in Snowflake
790            return self.cast_sql(expression)
791
792        def log_sql(self, expression: exp.Log) -> str:
793            if not expression.expression:
794                return self.func("LN", expression.this)
795
796            return super().log_sql(expression)
797
798        def unnest_sql(self, expression: exp.Unnest) -> str:
799            unnest_alias = expression.args.get("alias")
800            offset = expression.args.get("offset")
801
802            columns = [
803                exp.to_identifier("seq"),
804                exp.to_identifier("key"),
805                exp.to_identifier("path"),
806                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
807                seq_get(unnest_alias.columns if unnest_alias else [], 0)
808                or exp.to_identifier("value"),
809                exp.to_identifier("this"),
810            ]
811
812            if unnest_alias:
813                unnest_alias.set("columns", columns)
814            else:
815                unnest_alias = exp.TableAlias(this="_u", columns=columns)
816
817            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
818            alias = self.sql(unnest_alias)
819            alias = f" AS {alias}" if alias else ""
820            return f"{explode}{alias}"
821
822        def show_sql(self, expression: exp.Show) -> str:
823            scope = self.sql(expression, "scope")
824            scope = f" {scope}" if scope else ""
825
826            scope_kind = self.sql(expression, "scope_kind")
827            if scope_kind:
828                scope_kind = f" IN {scope_kind}"
829
830            return f"SHOW {expression.name}{scope_kind}{scope}"
831
832        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
833            # Other dialects don't support all of the following parameters, so we need to
834            # generate default values as necessary to ensure the transpilation is correct
835            group = expression.args.get("group")
836            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
837            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
838            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
839
840            return self.func(
841                "REGEXP_SUBSTR",
842                expression.this,
843                expression.expression,
844                position,
845                occurrence,
846                parameters,
847                group,
848            )
849
850        def except_op(self, expression: exp.Except) -> str:
851            if not expression.args.get("distinct", False):
852                self.unsupported("EXCEPT with All is not supported in Snowflake")
853            return super().except_op(expression)
854
855        def intersect_op(self, expression: exp.Intersect) -> str:
856            if not expression.args.get("distinct", False):
857                self.unsupported("INTERSECT with All is not supported in Snowflake")
858            return super().intersect_op(expression)
859
860        def describe_sql(self, expression: exp.Describe) -> str:
861            # Default to table if kind is unknown
862            kind_value = expression.args.get("kind") or "TABLE"
863            kind = f" {kind_value}" if kind_value else ""
864            this = f" {self.sql(expression, 'this')}"
865            expressions = self.expressions(expression, flat=True)
866            expressions = f" {expressions}" if expressions else ""
867            return f"DESCRIBE{kind}{this}{expressions}"
868
869        def generatedasidentitycolumnconstraint_sql(
870            self, expression: exp.GeneratedAsIdentityColumnConstraint
871        ) -> str:
872            start = expression.args.get("start")
873            start = f" START {start}" if start else ""
874            increment = expression.args.get("increment")
875            increment = f" INCREMENT {increment}" if increment else ""
876            return f"AUTOINCREMENT{start}{increment}"
877
878        def swaptable_sql(self, expression: exp.SwapTable) -> str:
879            this = self.sql(expression, "this")
880            return f"SWAP WITH {this}"
881
882        def with_properties(self, properties: exp.Properties) -> str:
883            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
DATE_PART_MAPPING = {'Y': 'YEAR', 'YY': 'YEAR', 'YYY': 'YEAR', 'YYYY': 'YEAR', 'YR': 'YEAR', 'YEARS': 'YEAR', 'YRS': 'YEAR', 'MM': 'MONTH', 'MON': 'MONTH', 'MONS': 'MONTH', 'MONTHS': 'MONTH', 'D': 'DAY', 'DD': 'DAY', 'DAYS': 'DAY', 'DAYOFMONTH': 'DAY', 'WEEKDAY': 'DAYOFWEEK', 'DOW': 'DAYOFWEEK', 'DW': 'DAYOFWEEK', 'WEEKDAY_ISO': 'DAYOFWEEKISO', 'DOW_ISO': 'DAYOFWEEKISO', 'DW_ISO': 'DAYOFWEEKISO', 'YEARDAY': 'DAYOFYEAR', 'DOY': 'DAYOFYEAR', 'DY': 'DAYOFYEAR', 'W': 'WEEK', 'WK': 'WEEK', 'WEEKOFYEAR': 'WEEK', 'WOY': 'WEEK', 'WY': 'WEEK', 'WEEK_ISO': 'WEEKISO', 'WEEKOFYEARISO': 'WEEKISO', 'WEEKOFYEAR_ISO': 'WEEKISO', 'Q': 'QUARTER', 'QTR': 'QUARTER', 'QTRS': 'QUARTER', 'QUARTERS': 'QUARTER', 'H': 'HOUR', 'HH': 'HOUR', 'HR': 'HOUR', 'HOURS': 'HOUR', 'HRS': 'HOUR', 'M': 'MINUTE', 'MI': 'MINUTE', 'MIN': 'MINUTE', 'MINUTES': 'MINUTE', 'MINS': 'MINUTE', 'S': 'SECOND', 'SEC': 'SECOND', 'SECONDS': 'SECOND', 'SECS': 'SECOND', 'MS': 'MILLISECOND', 'MSEC': 'MILLISECOND', 'MILLISECONDS': 'MILLISECOND', 'US': 'MICROSECOND', 'USEC': 'MICROSECOND', 'MICROSECONDS': 'MICROSECOND', 'NS': 'NANOSECOND', 'NSEC': 'NANOSECOND', 'NANOSEC': 'NANOSECOND', 'NSECOND': 'NANOSECOND', 'NSECONDS': 'NANOSECOND', 'NANOSECS': 'NANOSECOND', 'EPOCH': 'EPOCH_SECOND', 'EPOCH_SECONDS': 'EPOCH_SECOND', 'EPOCH_MILLISECONDS': 'EPOCH_MILLISECOND', 'EPOCH_MICROSECONDS': 'EPOCH_MICROSECOND', 'EPOCH_NANOSECONDS': 'EPOCH_NANOSECOND', 'TZH': 'TIMEZONE_HOUR', 'TZM': 'TIMEZONE_MINUTE'}
class Snowflake(sqlglot.dialects.dialect.Dialect):
344class Snowflake(Dialect):
345    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
346    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
347    NULL_ORDERING = "nulls_are_large"
348    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
349    SUPPORTS_USER_DEFINED_TYPES = False
350    SUPPORTS_SEMI_ANTI_JOIN = False
351    PREFER_CTE_ALIAS_COLUMN = True
352    TABLESAMPLE_SIZE_IS_PERCENT = True
353
354    TIME_MAPPING = {
355        "YYYY": "%Y",
356        "yyyy": "%Y",
357        "YY": "%y",
358        "yy": "%y",
359        "MMMM": "%B",
360        "mmmm": "%B",
361        "MON": "%b",
362        "mon": "%b",
363        "MM": "%m",
364        "mm": "%m",
365        "DD": "%d",
366        "dd": "%-d",
367        "DY": "%a",
368        "dy": "%w",
369        "HH24": "%H",
370        "hh24": "%H",
371        "HH12": "%I",
372        "hh12": "%I",
373        "MI": "%M",
374        "mi": "%M",
375        "SS": "%S",
376        "ss": "%S",
377        "FF": "%f",
378        "ff": "%f",
379        "FF6": "%f",
380        "ff6": "%f",
381    }
382
383    def quote_identifier(self, expression: E, identify: bool = True) -> E:
384        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
385        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
386        if (
387            isinstance(expression, exp.Identifier)
388            and isinstance(expression.parent, exp.Table)
389            and expression.name.lower() == "dual"
390        ):
391            return t.cast(E, expression)
392
393        return super().quote_identifier(expression, identify=identify)
394
395    class Parser(parser.Parser):
396        IDENTIFY_PIVOT_STRINGS = True
397
398        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
399
400        FUNCTIONS = {
401            **parser.Parser.FUNCTIONS,
402            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
403            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
404            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
405                this=seq_get(args, 1), expression=seq_get(args, 0)
406            ),
407            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
408                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
409                start=seq_get(args, 0),
410                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
411                step=seq_get(args, 2),
412            ),
413            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
414            "BITXOR": binary_from_function(exp.BitwiseXor),
415            "BIT_XOR": binary_from_function(exp.BitwiseXor),
416            "BOOLXOR": binary_from_function(exp.Xor),
417            "CONVERT_TIMEZONE": _parse_convert_timezone,
418            "DATE_TRUNC": _date_trunc_to_time,
419            "DATEADD": lambda args: exp.DateAdd(
420                this=seq_get(args, 2),
421                expression=seq_get(args, 1),
422                unit=_map_date_part(seq_get(args, 0)),
423            ),
424            "DATEDIFF": _parse_datediff,
425            "DIV0": _div0_to_if,
426            "FLATTEN": exp.Explode.from_arg_list,
427            "IFF": exp.If.from_arg_list,
428            "LAST_DAY": lambda args: exp.LastDay(
429                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
430            ),
431            "LISTAGG": exp.GroupConcat.from_arg_list,
432            "NULLIFZERO": _nullifzero_to_if,
433            "OBJECT_CONSTRUCT": _parse_object_construct,
434            "REGEXP_REPLACE": _parse_regexp_replace,
435            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
436            "RLIKE": exp.RegexpLike.from_arg_list,
437            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
438            "TIMEDIFF": _parse_datediff,
439            "TIMESTAMPDIFF": _parse_datediff,
440            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
441            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
442            "TO_TIMESTAMP": _parse_to_timestamp,
443            "TO_VARCHAR": exp.ToChar.from_arg_list,
444            "ZEROIFNULL": _zeroifnull_to_if,
445        }
446
447        FUNCTION_PARSERS = {
448            **parser.Parser.FUNCTION_PARSERS,
449            "DATE_PART": _parse_date_part,
450            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
451        }
452        FUNCTION_PARSERS.pop("TRIM")
453
454        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
455
456        RANGE_PARSERS = {
457            **parser.Parser.RANGE_PARSERS,
458            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
459            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
460            TokenType.COLON: _parse_colon_get_path,
461        }
462
463        ALTER_PARSERS = {
464            **parser.Parser.ALTER_PARSERS,
465            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
466            "UNSET": lambda self: self.expression(
467                exp.Set,
468                tag=self._match_text_seq("TAG"),
469                expressions=self._parse_csv(self._parse_id_var),
470                unset=True,
471            ),
472            "SWAP": lambda self: self._parse_alter_table_swap(),
473        }
474
475        STATEMENT_PARSERS = {
476            **parser.Parser.STATEMENT_PARSERS,
477            TokenType.SHOW: lambda self: self._parse_show(),
478        }
479
480        PROPERTY_PARSERS = {
481            **parser.Parser.PROPERTY_PARSERS,
482            "LOCATION": lambda self: self._parse_location(),
483        }
484
485        SHOW_PARSERS = {
486            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
487            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
488        }
489
490        STAGED_FILE_SINGLE_TOKENS = {
491            TokenType.DOT,
492            TokenType.MOD,
493            TokenType.SLASH,
494        }
495
496        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
497
498        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
499            if is_map:
500                # Keys are strings in Snowflake's objects, see also:
501                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
502                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
503                return self._parse_slice(self._parse_string())
504
505            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
506
507        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
508            lateral = super()._parse_lateral()
509            if not lateral:
510                return lateral
511
512            if isinstance(lateral.this, exp.Explode):
513                table_alias = lateral.args.get("alias")
514                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
515                if table_alias and not table_alias.args.get("columns"):
516                    table_alias.set("columns", columns)
517                elif not table_alias:
518                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
519
520            return lateral
521
522        def _parse_at_before(self, table: exp.Table) -> exp.Table:
523            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
524            index = self._index
525            if self._match_texts(("AT", "BEFORE")):
526                this = self._prev.text.upper()
527                kind = (
528                    self._match(TokenType.L_PAREN)
529                    and self._match_texts(self.HISTORICAL_DATA_KIND)
530                    and self._prev.text.upper()
531                )
532                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
533
534                if expression:
535                    self._match_r_paren()
536                    when = self.expression(
537                        exp.HistoricalData, this=this, kind=kind, expression=expression
538                    )
539                    table.set("when", when)
540                else:
541                    self._retreat(index)
542
543            return table
544
545        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
546            # https://docs.snowflake.com/en/user-guide/querying-stage
547            if self._match(TokenType.STRING, advance=False):
548                table = self._parse_string()
549            elif self._match_text_seq("@", advance=False):
550                table = self._parse_location_path()
551            else:
552                table = None
553
554            if table:
555                file_format = None
556                pattern = None
557
558                self._match(TokenType.L_PAREN)
559                while self._curr and not self._match(TokenType.R_PAREN):
560                    if self._match_text_seq("FILE_FORMAT", "=>"):
561                        file_format = self._parse_string() or super()._parse_table_parts()
562                    elif self._match_text_seq("PATTERN", "=>"):
563                        pattern = self._parse_string()
564                    else:
565                        break
566
567                    self._match(TokenType.COMMA)
568
569                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
570            else:
571                table = super()._parse_table_parts(schema=schema)
572
573            return self._parse_at_before(table)
574
575        def _parse_id_var(
576            self,
577            any_token: bool = True,
578            tokens: t.Optional[t.Collection[TokenType]] = None,
579        ) -> t.Optional[exp.Expression]:
580            if self._match_text_seq("IDENTIFIER", "("):
581                identifier = (
582                    super()._parse_id_var(any_token=any_token, tokens=tokens)
583                    or self._parse_string()
584                )
585                self._match_r_paren()
586                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
587
588            return super()._parse_id_var(any_token=any_token, tokens=tokens)
589
590        def _parse_show_snowflake(self, this: str) -> exp.Show:
591            scope = None
592            scope_kind = None
593
594            if self._match(TokenType.IN):
595                if self._match_text_seq("ACCOUNT"):
596                    scope_kind = "ACCOUNT"
597                elif self._match_set(self.DB_CREATABLES):
598                    scope_kind = self._prev.text
599                    if self._curr:
600                        scope = self._parse_table()
601                elif self._curr:
602                    scope_kind = "TABLE"
603                    scope = self._parse_table()
604
605            return self.expression(exp.Show, this=this, scope=scope, scope_kind=scope_kind)
606
607        def _parse_alter_table_swap(self) -> exp.SwapTable:
608            self._match_text_seq("WITH")
609            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
610
611        def _parse_location(self) -> exp.LocationProperty:
612            self._match(TokenType.EQ)
613            return self.expression(exp.LocationProperty, this=self._parse_location_path())
614
615        def _parse_location_path(self) -> exp.Var:
616            parts = [self._advance_any(ignore_reserved=True)]
617
618            # We avoid consuming a comma token because external tables like @foo and @bar
619            # can be joined in a query with a comma separator.
620            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
621                parts.append(self._advance_any(ignore_reserved=True))
622
623            return exp.var("".join(part.text for part in parts if part))
624
625    class Tokenizer(tokens.Tokenizer):
626        STRING_ESCAPES = ["\\", "'"]
627        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
628        RAW_STRINGS = ["$$"]
629        COMMENTS = ["--", "//", ("/*", "*/")]
630
631        KEYWORDS = {
632            **tokens.Tokenizer.KEYWORDS,
633            "BYTEINT": TokenType.INT,
634            "CHAR VARYING": TokenType.VARCHAR,
635            "CHARACTER VARYING": TokenType.VARCHAR,
636            "EXCLUDE": TokenType.EXCEPT,
637            "ILIKE ANY": TokenType.ILIKE_ANY,
638            "LIKE ANY": TokenType.LIKE_ANY,
639            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
640            "MINUS": TokenType.EXCEPT,
641            "NCHAR VARYING": TokenType.VARCHAR,
642            "PUT": TokenType.COMMAND,
643            "RENAME": TokenType.REPLACE,
644            "SAMPLE": TokenType.TABLE_SAMPLE,
645            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
646            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
647            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
648            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
649            "TOP": TokenType.TOP,
650        }
651
652        SINGLE_TOKENS = {
653            **tokens.Tokenizer.SINGLE_TOKENS,
654            "$": TokenType.PARAMETER,
655        }
656
657        VAR_SINGLE_TOKENS = {"$"}
658
659        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
660
661    class Generator(generator.Generator):
662        PARAMETER_TOKEN = "$"
663        MATCHED_BY_SOURCE = False
664        SINGLE_STRING_INTERVAL = True
665        JOIN_HINTS = False
666        TABLE_HINTS = False
667        QUERY_HINTS = False
668        AGGREGATE_FILTER_SUPPORTED = False
669        SUPPORTS_TABLE_COPY = False
670        COLLATE_IS_FUNC = True
671        LIMIT_ONLY_LITERALS = True
672
673        TRANSFORMS = {
674            **generator.Generator.TRANSFORMS,
675            exp.ArgMax: rename_func("MAX_BY"),
676            exp.ArgMin: rename_func("MIN_BY"),
677            exp.Array: inline_array_sql,
678            exp.ArrayConcat: rename_func("ARRAY_CAT"),
679            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
680            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
681            exp.AtTimeZone: lambda self, e: self.func(
682                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
683            ),
684            exp.BitwiseXor: rename_func("BITXOR"),
685            exp.DateAdd: date_delta_sql("DATEADD"),
686            exp.DateDiff: date_delta_sql("DATEDIFF"),
687            exp.DateStrToDate: datestrtodate_sql,
688            exp.DataType: _datatype_sql,
689            exp.DayOfMonth: rename_func("DAYOFMONTH"),
690            exp.DayOfWeek: rename_func("DAYOFWEEK"),
691            exp.DayOfYear: rename_func("DAYOFYEAR"),
692            exp.Explode: rename_func("FLATTEN"),
693            exp.Extract: rename_func("DATE_PART"),
694            exp.GenerateSeries: lambda self, e: self.func(
695                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
696            ),
697            exp.GroupConcat: rename_func("LISTAGG"),
698            exp.If: if_sql(name="IFF", false_value="NULL"),
699            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
700            exp.JSONKeyValue: json_keyvalue_comma_sql,
701            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
702            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
703            exp.LogicalOr: rename_func("BOOLOR_AGG"),
704            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
705            exp.Max: max_or_greatest,
706            exp.Min: min_or_least,
707            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
708            exp.PercentileCont: transforms.preprocess(
709                [transforms.add_within_group_for_percentiles]
710            ),
711            exp.PercentileDisc: transforms.preprocess(
712                [transforms.add_within_group_for_percentiles]
713            ),
714            exp.RegexpILike: _regexpilike_sql,
715            exp.Rand: rename_func("RANDOM"),
716            exp.Select: transforms.preprocess(
717                [
718                    transforms.eliminate_distinct_on,
719                    transforms.explode_to_unnest(),
720                    transforms.eliminate_semi_and_anti_joins,
721                ]
722            ),
723            exp.SHA: rename_func("SHA1"),
724            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
725            exp.StartsWith: rename_func("STARTSWITH"),
726            exp.StrPosition: lambda self, e: self.func(
727                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
728            ),
729            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
730            exp.Struct: lambda self, e: self.func(
731                "OBJECT_CONSTRUCT",
732                *(arg for expression in e.expressions for arg in expression.flatten()),
733            ),
734            exp.Stuff: rename_func("INSERT"),
735            exp.TimestampDiff: lambda self, e: self.func(
736                "TIMESTAMPDIFF", e.unit, e.expression, e.this
737            ),
738            exp.TimestampTrunc: timestamptrunc_sql,
739            exp.TimeStrToTime: timestrtotime_sql,
740            exp.TimeToStr: lambda self, e: self.func(
741                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
742            ),
743            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
744            exp.ToArray: rename_func("TO_ARRAY"),
745            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
746            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
747            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
748            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
749            exp.UnixToTime: _unix_to_time_sql,
750            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
751            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
752            exp.Xor: rename_func("BOOLXOR"),
753        }
754
755        TYPE_MAPPING = {
756            **generator.Generator.TYPE_MAPPING,
757            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
758        }
759
760        STAR_MAPPING = {
761            "except": "EXCLUDE",
762            "replace": "RENAME",
763        }
764
765        PROPERTIES_LOCATION = {
766            **generator.Generator.PROPERTIES_LOCATION,
767            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
768            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
769        }
770
771        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
772            milli = expression.args.get("milli")
773            if milli is not None:
774                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
775                expression.set("nano", milli_to_nano)
776
777            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
778
779        def trycast_sql(self, expression: exp.TryCast) -> str:
780            value = expression.this
781
782            if value.type is None:
783                from sqlglot.optimizer.annotate_types import annotate_types
784
785                value = annotate_types(value)
786
787            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
788                return super().trycast_sql(expression)
789
790            # TRY_CAST only works for string values in Snowflake
791            return self.cast_sql(expression)
792
793        def log_sql(self, expression: exp.Log) -> str:
794            if not expression.expression:
795                return self.func("LN", expression.this)
796
797            return super().log_sql(expression)
798
799        def unnest_sql(self, expression: exp.Unnest) -> str:
800            unnest_alias = expression.args.get("alias")
801            offset = expression.args.get("offset")
802
803            columns = [
804                exp.to_identifier("seq"),
805                exp.to_identifier("key"),
806                exp.to_identifier("path"),
807                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
808                seq_get(unnest_alias.columns if unnest_alias else [], 0)
809                or exp.to_identifier("value"),
810                exp.to_identifier("this"),
811            ]
812
813            if unnest_alias:
814                unnest_alias.set("columns", columns)
815            else:
816                unnest_alias = exp.TableAlias(this="_u", columns=columns)
817
818            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
819            alias = self.sql(unnest_alias)
820            alias = f" AS {alias}" if alias else ""
821            return f"{explode}{alias}"
822
823        def show_sql(self, expression: exp.Show) -> str:
824            scope = self.sql(expression, "scope")
825            scope = f" {scope}" if scope else ""
826
827            scope_kind = self.sql(expression, "scope_kind")
828            if scope_kind:
829                scope_kind = f" IN {scope_kind}"
830
831            return f"SHOW {expression.name}{scope_kind}{scope}"
832
833        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
834            # Other dialects don't support all of the following parameters, so we need to
835            # generate default values as necessary to ensure the transpilation is correct
836            group = expression.args.get("group")
837            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
838            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
839            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
840
841            return self.func(
842                "REGEXP_SUBSTR",
843                expression.this,
844                expression.expression,
845                position,
846                occurrence,
847                parameters,
848                group,
849            )
850
851        def except_op(self, expression: exp.Except) -> str:
852            if not expression.args.get("distinct", False):
853                self.unsupported("EXCEPT with All is not supported in Snowflake")
854            return super().except_op(expression)
855
856        def intersect_op(self, expression: exp.Intersect) -> str:
857            if not expression.args.get("distinct", False):
858                self.unsupported("INTERSECT with All is not supported in Snowflake")
859            return super().intersect_op(expression)
860
861        def describe_sql(self, expression: exp.Describe) -> str:
862            # Default to table if kind is unknown
863            kind_value = expression.args.get("kind") or "TABLE"
864            kind = f" {kind_value}" if kind_value else ""
865            this = f" {self.sql(expression, 'this')}"
866            expressions = self.expressions(expression, flat=True)
867            expressions = f" {expressions}" if expressions else ""
868            return f"DESCRIBE{kind}{this}{expressions}"
869
870        def generatedasidentitycolumnconstraint_sql(
871            self, expression: exp.GeneratedAsIdentityColumnConstraint
872        ) -> str:
873            start = expression.args.get("start")
874            start = f" START {start}" if start else ""
875            increment = expression.args.get("increment")
876            increment = f" INCREMENT {increment}" if increment else ""
877            return f"AUTOINCREMENT{start}{increment}"
878
879        def swaptable_sql(self, expression: exp.SwapTable) -> str:
880            this = self.sql(expression, "this")
881            return f"SWAP WITH {this}"
882
883        def with_properties(self, properties: exp.Properties) -> str:
884            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Indicates the default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Determines whether or not user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Determines whether or not SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Determines whether or not a size in the table sample clause represents percentage.

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime format.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
383    def quote_identifier(self, expression: E, identify: bool = True) -> E:
384        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
385        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
386        if (
387            isinstance(expression, exp.Identifier)
388            and isinstance(expression.parent, exp.Table)
389            and expression.name.lower() == "dual"
390        ):
391            return t.cast(E, expression)
392
393        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
tokenizer_class = <class 'Snowflake.Tokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_ESCAPE_SEQUENCES: Dict[str, str] = {}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
395    class Parser(parser.Parser):
396        IDENTIFY_PIVOT_STRINGS = True
397
398        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
399
400        FUNCTIONS = {
401            **parser.Parser.FUNCTIONS,
402            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
403            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
404            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
405                this=seq_get(args, 1), expression=seq_get(args, 0)
406            ),
407            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
408                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
409                start=seq_get(args, 0),
410                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
411                step=seq_get(args, 2),
412            ),
413            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
414            "BITXOR": binary_from_function(exp.BitwiseXor),
415            "BIT_XOR": binary_from_function(exp.BitwiseXor),
416            "BOOLXOR": binary_from_function(exp.Xor),
417            "CONVERT_TIMEZONE": _parse_convert_timezone,
418            "DATE_TRUNC": _date_trunc_to_time,
419            "DATEADD": lambda args: exp.DateAdd(
420                this=seq_get(args, 2),
421                expression=seq_get(args, 1),
422                unit=_map_date_part(seq_get(args, 0)),
423            ),
424            "DATEDIFF": _parse_datediff,
425            "DIV0": _div0_to_if,
426            "FLATTEN": exp.Explode.from_arg_list,
427            "IFF": exp.If.from_arg_list,
428            "LAST_DAY": lambda args: exp.LastDay(
429                this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1))
430            ),
431            "LISTAGG": exp.GroupConcat.from_arg_list,
432            "NULLIFZERO": _nullifzero_to_if,
433            "OBJECT_CONSTRUCT": _parse_object_construct,
434            "REGEXP_REPLACE": _parse_regexp_replace,
435            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
436            "RLIKE": exp.RegexpLike.from_arg_list,
437            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
438            "TIMEDIFF": _parse_datediff,
439            "TIMESTAMPDIFF": _parse_datediff,
440            "TIMESTAMPFROMPARTS": _parse_timestamp_from_parts,
441            "TIMESTAMP_FROM_PARTS": _parse_timestamp_from_parts,
442            "TO_TIMESTAMP": _parse_to_timestamp,
443            "TO_VARCHAR": exp.ToChar.from_arg_list,
444            "ZEROIFNULL": _zeroifnull_to_if,
445        }
446
447        FUNCTION_PARSERS = {
448            **parser.Parser.FUNCTION_PARSERS,
449            "DATE_PART": _parse_date_part,
450            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
451        }
452        FUNCTION_PARSERS.pop("TRIM")
453
454        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
455
456        RANGE_PARSERS = {
457            **parser.Parser.RANGE_PARSERS,
458            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
459            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
460            TokenType.COLON: _parse_colon_get_path,
461        }
462
463        ALTER_PARSERS = {
464            **parser.Parser.ALTER_PARSERS,
465            "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")),
466            "UNSET": lambda self: self.expression(
467                exp.Set,
468                tag=self._match_text_seq("TAG"),
469                expressions=self._parse_csv(self._parse_id_var),
470                unset=True,
471            ),
472            "SWAP": lambda self: self._parse_alter_table_swap(),
473        }
474
475        STATEMENT_PARSERS = {
476            **parser.Parser.STATEMENT_PARSERS,
477            TokenType.SHOW: lambda self: self._parse_show(),
478        }
479
480        PROPERTY_PARSERS = {
481            **parser.Parser.PROPERTY_PARSERS,
482            "LOCATION": lambda self: self._parse_location(),
483        }
484
485        SHOW_PARSERS = {
486            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
487            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
488        }
489
490        STAGED_FILE_SINGLE_TOKENS = {
491            TokenType.DOT,
492            TokenType.MOD,
493            TokenType.SLASH,
494        }
495
496        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
497
498        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
499            if is_map:
500                # Keys are strings in Snowflake's objects, see also:
501                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
502                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
503                return self._parse_slice(self._parse_string())
504
505            return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True))
506
507        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
508            lateral = super()._parse_lateral()
509            if not lateral:
510                return lateral
511
512            if isinstance(lateral.this, exp.Explode):
513                table_alias = lateral.args.get("alias")
514                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
515                if table_alias and not table_alias.args.get("columns"):
516                    table_alias.set("columns", columns)
517                elif not table_alias:
518                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
519
520            return lateral
521
522        def _parse_at_before(self, table: exp.Table) -> exp.Table:
523            # https://docs.snowflake.com/en/sql-reference/constructs/at-before
524            index = self._index
525            if self._match_texts(("AT", "BEFORE")):
526                this = self._prev.text.upper()
527                kind = (
528                    self._match(TokenType.L_PAREN)
529                    and self._match_texts(self.HISTORICAL_DATA_KIND)
530                    and self._prev.text.upper()
531                )
532                expression = self._match(TokenType.FARROW) and self._parse_bitwise()
533
534                if expression:
535                    self._match_r_paren()
536                    when = self.expression(
537                        exp.HistoricalData, this=this, kind=kind, expression=expression
538                    )
539                    table.set("when", when)
540                else:
541                    self._retreat(index)
542
543            return table
544
545        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
546            # https://docs.snowflake.com/en/user-guide/querying-stage
547            if self._match(TokenType.STRING, advance=False):
548                table = self._parse_string()
549            elif self._match_text_seq("@", advance=False):
550                table = self._parse_location_path()
551            else:
552                table = None
553
554            if table:
555                file_format = None
556                pattern = None
557
558                self._match(TokenType.L_PAREN)
559                while self._curr and not self._match(TokenType.R_PAREN):
560                    if self._match_text_seq("FILE_FORMAT", "=>"):
561                        file_format = self._parse_string() or super()._parse_table_parts()
562                    elif self._match_text_seq("PATTERN", "=>"):
563                        pattern = self._parse_string()
564                    else:
565                        break
566
567                    self._match(TokenType.COMMA)
568
569                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
570            else:
571                table = super()._parse_table_parts(schema=schema)
572
573            return self._parse_at_before(table)
574
575        def _parse_id_var(
576            self,
577            any_token: bool = True,
578            tokens: t.Optional[t.Collection[TokenType]] = None,
579        ) -> t.Optional[exp.Expression]:
580            if self._match_text_seq("IDENTIFIER", "("):
581                identifier = (
582                    super()._parse_id_var(any_token=any_token, tokens=tokens)
583                    or self._parse_string()
584                )
585                self._match_r_paren()
586                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
587
588            return super()._parse_id_var(any_token=any_token, tokens=tokens)
589
590        def _parse_show_snowflake(self, this: str) -> exp.Show:
591            scope = None
592            scope_kind = None
593
594            if self._match(TokenType.IN):
595                if self._match_text_seq("ACCOUNT"):
596                    scope_kind = "ACCOUNT"
597                elif self._match_set(self.DB_CREATABLES):
598                    scope_kind = self._prev.text
599                    if self._curr:
600                        scope = self._parse_table()
601                elif self._curr:
602                    scope_kind = "TABLE"
603                    scope = self._parse_table()
604
605            return self.expression(exp.Show, this=this, scope=scope, scope_kind=scope_kind)
606
607        def _parse_alter_table_swap(self) -> exp.SwapTable:
608            self._match_text_seq("WITH")
609            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
610
611        def _parse_location(self) -> exp.LocationProperty:
612            self._match(TokenType.EQ)
613            return self.expression(exp.LocationProperty, this=self._parse_location_path())
614
615        def _parse_location_path(self) -> exp.Var:
616            parts = [self._advance_any(ignore_reserved=True)]
617
618            # We avoid consuming a comma token because external tables like @foo and @bar
619            # can be joined in a query with a comma separator.
620            while self._is_connected() and not self._match(TokenType.COMMA, advance=False):
621                parts.append(self._advance_any(ignore_reserved=True))
622
623            return exp.var("".join(part.text for part in parts if part))

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
TABLE_ALIAS_TOKENS = {<TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.SET: 'SET'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.FINAL: 'FINAL'>, <TokenType.SHOW: 'SHOW'>, <TokenType.INT: 'INT'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.XML: 'XML'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.UINT256: 'UINT256'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.NULL: 'NULL'>, <TokenType.USE: 'USE'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.INT256: 'INT256'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.SUPER: 'SUPER'>, <TokenType.VIEW: 'VIEW'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.DESC: 'DESC'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.ROW: 'ROW'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.ROWS: 'ROWS'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.INET: 'INET'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.TRUE: 'TRUE'>, <TokenType.UINT128: 'UINT128'>, <TokenType.BIT: 'BIT'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.MERGE: 'MERGE'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SOME: 'SOME'>, <TokenType.TEXT: 'TEXT'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.JSON: 'JSON'>, <TokenType.ANY: 'ANY'>, <TokenType.IS: 'IS'>, <TokenType.LOAD: 'LOAD'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.INT128: 'INT128'>, <TokenType.MAP: 'MAP'>, <TokenType.YEAR: 'YEAR'>, <TokenType.CHAR: 'CHAR'>, <TokenType.FALSE: 'FALSE'>, <TokenType.CASE: 'CASE'>, <TokenType.UINT: 'UINT'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.FILTER: 'FILTER'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.TIME: 'TIME'>, <TokenType.ENUM: 'ENUM'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.DELETE: 'DELETE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.KEEP: 'KEEP'>, <TokenType.END: 'END'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.CACHE: 'CACHE'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UUID: 'UUID'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.DATE: 'DATE'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ALL: 'ALL'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.JSONB: 'JSONB'>, <TokenType.VAR: 'VAR'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.RANGE: 'RANGE'>, <TokenType.ASC: 'ASC'>, <TokenType.MODEL: 'MODEL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.TOP: 'TOP'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.ANTI: 'ANTI'>, <TokenType.KILL: 'KILL'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.BINARY: 'BINARY'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Date'>>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _parse_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GET_PATH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GetPath'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function parse_logarithm>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _parse_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function _parse_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'CONVERT_TIMEZONE': <function _parse_convert_timezone>, 'DATEADD': <function Snowflake.Parser.<lambda>>, 'DIV0': <function _div0_to_if>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'NULLIFZERO': <function _nullifzero_to_if>, 'OBJECT_CONSTRUCT': <function _parse_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEDIFF': <function _parse_datediff>, 'TIMESTAMPDIFF': <function _parse_datediff>, 'TO_TIMESTAMP': <function _parse_to_timestamp>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _zeroifnull_to_if>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function _parse_date_part>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>.<lambda>>, <TokenType.COLON: 'COLON'>: <function _parse_colon_get_path>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Snowflake.Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
SHOW_PARSERS = {'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>, <TokenType.DOT: 'DOT'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SHOW_TRIE: Dict = {'PRIMARY': {'KEYS': {0: True}}, 'TERSE': {'PRIMARY': {'KEYS': {0: True}}}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
625    class Tokenizer(tokens.Tokenizer):
626        STRING_ESCAPES = ["\\", "'"]
627        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
628        RAW_STRINGS = ["$$"]
629        COMMENTS = ["--", "//", ("/*", "*/")]
630
631        KEYWORDS = {
632            **tokens.Tokenizer.KEYWORDS,
633            "BYTEINT": TokenType.INT,
634            "CHAR VARYING": TokenType.VARCHAR,
635            "CHARACTER VARYING": TokenType.VARCHAR,
636            "EXCLUDE": TokenType.EXCEPT,
637            "ILIKE ANY": TokenType.ILIKE_ANY,
638            "LIKE ANY": TokenType.LIKE_ANY,
639            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
640            "MINUS": TokenType.EXCEPT,
641            "NCHAR VARYING": TokenType.VARCHAR,
642            "PUT": TokenType.COMMAND,
643            "RENAME": TokenType.REPLACE,
644            "SAMPLE": TokenType.TABLE_SAMPLE,
645            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
646            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
647            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
648            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
649            "TOP": TokenType.TOP,
650        }
651
652        SINGLE_TOKENS = {
653            **tokens.Tokenizer.SINGLE_TOKENS,
654            "$": TokenType.PARAMETER,
655        }
656
657        VAR_SINGLE_TOKENS = {"$"}
658
659        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'RENAME': <TokenType.REPLACE: 'REPLACE'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TOP': <TokenType.TOP: 'TOP'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, "'": <TokenType.QUOTE: 'QUOTE'>, '`': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '"': <TokenType.IDENTIFIER: 'IDENTIFIER'>, '#': <TokenType.HASH: 'HASH'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.EXECUTE: 'EXECUTE'>, <TokenType.FETCH: 'FETCH'>, <TokenType.COMMAND: 'COMMAND'>}
class Snowflake.Generator(sqlglot.generator.Generator):
661    class Generator(generator.Generator):
662        PARAMETER_TOKEN = "$"
663        MATCHED_BY_SOURCE = False
664        SINGLE_STRING_INTERVAL = True
665        JOIN_HINTS = False
666        TABLE_HINTS = False
667        QUERY_HINTS = False
668        AGGREGATE_FILTER_SUPPORTED = False
669        SUPPORTS_TABLE_COPY = False
670        COLLATE_IS_FUNC = True
671        LIMIT_ONLY_LITERALS = True
672
673        TRANSFORMS = {
674            **generator.Generator.TRANSFORMS,
675            exp.ArgMax: rename_func("MAX_BY"),
676            exp.ArgMin: rename_func("MIN_BY"),
677            exp.Array: inline_array_sql,
678            exp.ArrayConcat: rename_func("ARRAY_CAT"),
679            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
680            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
681            exp.AtTimeZone: lambda self, e: self.func(
682                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
683            ),
684            exp.BitwiseXor: rename_func("BITXOR"),
685            exp.DateAdd: date_delta_sql("DATEADD"),
686            exp.DateDiff: date_delta_sql("DATEDIFF"),
687            exp.DateStrToDate: datestrtodate_sql,
688            exp.DataType: _datatype_sql,
689            exp.DayOfMonth: rename_func("DAYOFMONTH"),
690            exp.DayOfWeek: rename_func("DAYOFWEEK"),
691            exp.DayOfYear: rename_func("DAYOFYEAR"),
692            exp.Explode: rename_func("FLATTEN"),
693            exp.Extract: rename_func("DATE_PART"),
694            exp.GenerateSeries: lambda self, e: self.func(
695                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
696            ),
697            exp.GroupConcat: rename_func("LISTAGG"),
698            exp.If: if_sql(name="IFF", false_value="NULL"),
699            exp.JSONExtract: lambda self, e: f"{self.sql(e, 'this')}[{self.sql(e, 'expression')}]",
700            exp.JSONKeyValue: json_keyvalue_comma_sql,
701            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
702            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
703            exp.LogicalOr: rename_func("BOOLOR_AGG"),
704            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
705            exp.Max: max_or_greatest,
706            exp.Min: min_or_least,
707            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
708            exp.PercentileCont: transforms.preprocess(
709                [transforms.add_within_group_for_percentiles]
710            ),
711            exp.PercentileDisc: transforms.preprocess(
712                [transforms.add_within_group_for_percentiles]
713            ),
714            exp.RegexpILike: _regexpilike_sql,
715            exp.Rand: rename_func("RANDOM"),
716            exp.Select: transforms.preprocess(
717                [
718                    transforms.eliminate_distinct_on,
719                    transforms.explode_to_unnest(),
720                    transforms.eliminate_semi_and_anti_joins,
721                ]
722            ),
723            exp.SHA: rename_func("SHA1"),
724            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
725            exp.StartsWith: rename_func("STARTSWITH"),
726            exp.StrPosition: lambda self, e: self.func(
727                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
728            ),
729            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
730            exp.Struct: lambda self, e: self.func(
731                "OBJECT_CONSTRUCT",
732                *(arg for expression in e.expressions for arg in expression.flatten()),
733            ),
734            exp.Stuff: rename_func("INSERT"),
735            exp.TimestampDiff: lambda self, e: self.func(
736                "TIMESTAMPDIFF", e.unit, e.expression, e.this
737            ),
738            exp.TimestampTrunc: timestamptrunc_sql,
739            exp.TimeStrToTime: timestrtotime_sql,
740            exp.TimeToStr: lambda self, e: self.func(
741                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
742            ),
743            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
744            exp.ToArray: rename_func("TO_ARRAY"),
745            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
746            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
747            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
748            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
749            exp.UnixToTime: _unix_to_time_sql,
750            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
751            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
752            exp.Xor: rename_func("BOOLXOR"),
753        }
754
755        TYPE_MAPPING = {
756            **generator.Generator.TYPE_MAPPING,
757            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
758        }
759
760        STAR_MAPPING = {
761            "except": "EXCLUDE",
762            "replace": "RENAME",
763        }
764
765        PROPERTIES_LOCATION = {
766            **generator.Generator.PROPERTIES_LOCATION,
767            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
768            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
769        }
770
771        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
772            milli = expression.args.get("milli")
773            if milli is not None:
774                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
775                expression.set("nano", milli_to_nano)
776
777            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
778
779        def trycast_sql(self, expression: exp.TryCast) -> str:
780            value = expression.this
781
782            if value.type is None:
783                from sqlglot.optimizer.annotate_types import annotate_types
784
785                value = annotate_types(value)
786
787            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
788                return super().trycast_sql(expression)
789
790            # TRY_CAST only works for string values in Snowflake
791            return self.cast_sql(expression)
792
793        def log_sql(self, expression: exp.Log) -> str:
794            if not expression.expression:
795                return self.func("LN", expression.this)
796
797            return super().log_sql(expression)
798
799        def unnest_sql(self, expression: exp.Unnest) -> str:
800            unnest_alias = expression.args.get("alias")
801            offset = expression.args.get("offset")
802
803            columns = [
804                exp.to_identifier("seq"),
805                exp.to_identifier("key"),
806                exp.to_identifier("path"),
807                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
808                seq_get(unnest_alias.columns if unnest_alias else [], 0)
809                or exp.to_identifier("value"),
810                exp.to_identifier("this"),
811            ]
812
813            if unnest_alias:
814                unnest_alias.set("columns", columns)
815            else:
816                unnest_alias = exp.TableAlias(this="_u", columns=columns)
817
818            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
819            alias = self.sql(unnest_alias)
820            alias = f" AS {alias}" if alias else ""
821            return f"{explode}{alias}"
822
823        def show_sql(self, expression: exp.Show) -> str:
824            scope = self.sql(expression, "scope")
825            scope = f" {scope}" if scope else ""
826
827            scope_kind = self.sql(expression, "scope_kind")
828            if scope_kind:
829                scope_kind = f" IN {scope_kind}"
830
831            return f"SHOW {expression.name}{scope_kind}{scope}"
832
833        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
834            # Other dialects don't support all of the following parameters, so we need to
835            # generate default values as necessary to ensure the transpilation is correct
836            group = expression.args.get("group")
837            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
838            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
839            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
840
841            return self.func(
842                "REGEXP_SUBSTR",
843                expression.this,
844                expression.expression,
845                position,
846                occurrence,
847                parameters,
848                group,
849            )
850
851        def except_op(self, expression: exp.Except) -> str:
852            if not expression.args.get("distinct", False):
853                self.unsupported("EXCEPT with All is not supported in Snowflake")
854            return super().except_op(expression)
855
856        def intersect_op(self, expression: exp.Intersect) -> str:
857            if not expression.args.get("distinct", False):
858                self.unsupported("INTERSECT with All is not supported in Snowflake")
859            return super().intersect_op(expression)
860
861        def describe_sql(self, expression: exp.Describe) -> str:
862            # Default to table if kind is unknown
863            kind_value = expression.args.get("kind") or "TABLE"
864            kind = f" {kind_value}" if kind_value else ""
865            this = f" {self.sql(expression, 'this')}"
866            expressions = self.expressions(expression, flat=True)
867            expressions = f" {expressions}" if expressions else ""
868            return f"DESCRIBE{kind}{this}{expressions}"
869
870        def generatedasidentitycolumnconstraint_sql(
871            self, expression: exp.GeneratedAsIdentityColumnConstraint
872        ) -> str:
873            start = expression.args.get("start")
874            start = f" START {start}" if start else ""
875            increment = expression.args.get("increment")
876            increment = f" INCREMENT {increment}" if increment else ""
877            return f"AUTOINCREMENT{start}{increment}"
878
879        def swaptable_sql(self, expression: exp.SwapTable) -> str:
880            this = self.sql(expression, "this")
881            return f"SWAP WITH {this}"
882
883        def with_properties(self, properties: exp.Properties) -> str:
884            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayJoin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DataType'>: <function _datatype_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Struct'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.UnixToTime'>: <function _unix_to_time_sql>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.TIMESTAMP: 'TIMESTAMP'>: 'TIMESTAMPNTZ'}
STAR_MAPPING = {'except': 'EXCLUDE', 'replace': 'RENAME'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
771        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
772            milli = expression.args.get("milli")
773            if milli is not None:
774                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
775                expression.set("nano", milli_to_nano)
776
777            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
779        def trycast_sql(self, expression: exp.TryCast) -> str:
780            value = expression.this
781
782            if value.type is None:
783                from sqlglot.optimizer.annotate_types import annotate_types
784
785                value = annotate_types(value)
786
787            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
788                return super().trycast_sql(expression)
789
790            # TRY_CAST only works for string values in Snowflake
791            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
793        def log_sql(self, expression: exp.Log) -> str:
794            if not expression.expression:
795                return self.func("LN", expression.this)
796
797            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
799        def unnest_sql(self, expression: exp.Unnest) -> str:
800            unnest_alias = expression.args.get("alias")
801            offset = expression.args.get("offset")
802
803            columns = [
804                exp.to_identifier("seq"),
805                exp.to_identifier("key"),
806                exp.to_identifier("path"),
807                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
808                seq_get(unnest_alias.columns if unnest_alias else [], 0)
809                or exp.to_identifier("value"),
810                exp.to_identifier("this"),
811            ]
812
813            if unnest_alias:
814                unnest_alias.set("columns", columns)
815            else:
816                unnest_alias = exp.TableAlias(this="_u", columns=columns)
817
818            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
819            alias = self.sql(unnest_alias)
820            alias = f" AS {alias}" if alias else ""
821            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
823        def show_sql(self, expression: exp.Show) -> str:
824            scope = self.sql(expression, "scope")
825            scope = f" {scope}" if scope else ""
826
827            scope_kind = self.sql(expression, "scope_kind")
828            if scope_kind:
829                scope_kind = f" IN {scope_kind}"
830
831            return f"SHOW {expression.name}{scope_kind}{scope}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
833        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
834            # Other dialects don't support all of the following parameters, so we need to
835            # generate default values as necessary to ensure the transpilation is correct
836            group = expression.args.get("group")
837            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
838            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
839            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
840
841            return self.func(
842                "REGEXP_SUBSTR",
843                expression.this,
844                expression.expression,
845                position,
846                occurrence,
847                parameters,
848                group,
849            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
851        def except_op(self, expression: exp.Except) -> str:
852            if not expression.args.get("distinct", False):
853                self.unsupported("EXCEPT with All is not supported in Snowflake")
854            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
856        def intersect_op(self, expression: exp.Intersect) -> str:
857            if not expression.args.get("distinct", False):
858                self.unsupported("INTERSECT with All is not supported in Snowflake")
859            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
861        def describe_sql(self, expression: exp.Describe) -> str:
862            # Default to table if kind is unknown
863            kind_value = expression.args.get("kind") or "TABLE"
864            kind = f" {kind_value}" if kind_value else ""
865            this = f" {self.sql(expression, 'this')}"
866            expressions = self.expressions(expression, flat=True)
867            expressions = f" {expressions}" if expressions else ""
868            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
870        def generatedasidentitycolumnconstraint_sql(
871            self, expression: exp.GeneratedAsIdentityColumnConstraint
872        ) -> str:
873            start = expression.args.get("start")
874            start = f" START {start}" if start else ""
875            increment = expression.args.get("increment")
876            increment = f" INCREMENT {increment}" if increment else ""
877            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
879        def swaptable_sql(self, expression: exp.SwapTable) -> str:
880            this = self.sql(expression, "this")
881            return f"SWAP WITH {this}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
883        def with_properties(self, properties: exp.Properties) -> str:
884            return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ")
SELECT_KINDS: Tuple[str, ...] = ()
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
EXPLICIT_UNION
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
EXPRESSIONS_WITHOUT_NESTED_CTES
KEY_VALUE_DEFINITIONS
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
atindex_sql
attimezone_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
operator_sql
toarray_sql
tsordstotime_sql
tsordstodate_sql
unixdate_sql
lastday_sql