Edit on GitHub

sqlglot.dialects.snowflake

  1from __future__ import annotations
  2
  3import typing as t
  4
  5from sqlglot import exp, generator, parser, tokens, transforms
  6from sqlglot.dialects.dialect import (
  7    Dialect,
  8    date_trunc_to_time,
  9    datestrtodate_sql,
 10    format_time_lambda,
 11    inline_array_sql,
 12    max_or_greatest,
 13    min_or_least,
 14    rename_func,
 15    timestamptrunc_sql,
 16    timestrtotime_sql,
 17    ts_or_ds_to_date_sql,
 18    var_map_sql,
 19)
 20from sqlglot.expressions import Literal
 21from sqlglot.helper import flatten, seq_get
 22from sqlglot.parser import binary_range_parser
 23from sqlglot.tokens import TokenType
 24
 25
 26def _check_int(s: str) -> bool:
 27    if s[0] in ("-", "+"):
 28        return s[1:].isdigit()
 29    return s.isdigit()
 30
 31
 32# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
 33def _snowflake_to_timestamp(args: t.Sequence) -> t.Union[exp.StrToTime, exp.UnixToTime]:
 34    if len(args) == 2:
 35        first_arg, second_arg = args
 36        if second_arg.is_string:
 37            # case: <string_expr> [ , <format> ]
 38            return format_time_lambda(exp.StrToTime, "snowflake")(args)
 39
 40        # case: <numeric_expr> [ , <scale> ]
 41        if second_arg.name not in ["0", "3", "9"]:
 42            raise ValueError(
 43                f"Scale for snowflake numeric timestamp is {second_arg}, but should be 0, 3, or 9"
 44            )
 45
 46        if second_arg.name == "0":
 47            timescale = exp.UnixToTime.SECONDS
 48        elif second_arg.name == "3":
 49            timescale = exp.UnixToTime.MILLIS
 50        elif second_arg.name == "9":
 51            timescale = exp.UnixToTime.MICROS
 52
 53        return exp.UnixToTime(this=first_arg, scale=timescale)
 54
 55    from sqlglot.optimizer.simplify import simplify_literals
 56
 57    # The first argument might be an expression like 40 * 365 * 86400, so we try to
 58    # reduce it using `simplify_literals` first and then check if it's a Literal.
 59    first_arg = seq_get(args, 0)
 60    if not isinstance(simplify_literals(first_arg, root=True), Literal):
 61        # case: <variant_expr>
 62        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
 63
 64    if first_arg.is_string:
 65        if _check_int(first_arg.this):
 66            # case: <integer>
 67            return exp.UnixToTime.from_arg_list(args)
 68
 69        # case: <date_expr>
 70        return format_time_lambda(exp.StrToTime, "snowflake", default=True)(args)
 71
 72    # case: <numeric_expr>
 73    return exp.UnixToTime.from_arg_list(args)
 74
 75
 76def _unix_to_time_sql(self: generator.Generator, expression: exp.UnixToTime) -> str:
 77    scale = expression.args.get("scale")
 78    timestamp = self.sql(expression, "this")
 79    if scale in [None, exp.UnixToTime.SECONDS]:
 80        return f"TO_TIMESTAMP({timestamp})"
 81    if scale == exp.UnixToTime.MILLIS:
 82        return f"TO_TIMESTAMP({timestamp}, 3)"
 83    if scale == exp.UnixToTime.MICROS:
 84        return f"TO_TIMESTAMP({timestamp}, 9)"
 85
 86    raise ValueError("Improper scale for timestamp")
 87
 88
 89# https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 90# https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 91def _parse_date_part(self: parser.Parser) -> t.Optional[exp.Expression]:
 92    this = self._parse_var() or self._parse_type()
 93
 94    if not this:
 95        return None
 96
 97    self._match(TokenType.COMMA)
 98    expression = self._parse_bitwise()
 99
100    name = this.name.upper()
101    if name.startswith("EPOCH"):
102        if name.startswith("EPOCH_MILLISECOND"):
103            scale = 10**3
104        elif name.startswith("EPOCH_MICROSECOND"):
105            scale = 10**6
106        elif name.startswith("EPOCH_NANOSECOND"):
107            scale = 10**9
108        else:
109            scale = None
110
111        ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
112        to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
113
114        if scale:
115            to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
116
117        return to_unix
118
119    return self.expression(exp.Extract, this=this, expression=expression)
120
121
122# https://docs.snowflake.com/en/sql-reference/functions/div0
123def _div0_to_if(args: t.Sequence) -> exp.Expression:
124    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0))
125    true = exp.Literal.number(0)
126    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
127    return exp.If(this=cond, true=true, false=false)
128
129
130# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
131def _zeroifnull_to_if(args: t.Sequence) -> exp.Expression:
132    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
133    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
134
135
136# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
137def _nullifzero_to_if(args: t.Sequence) -> exp.Expression:
138    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
139    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
140
141
142def _datatype_sql(self: generator.Generator, expression: exp.DataType) -> str:
143    if expression.this == exp.DataType.Type.ARRAY:
144        return "ARRAY"
145    elif expression.this == exp.DataType.Type.MAP:
146        return "OBJECT"
147    return self.datatype_sql(expression)
148
149
150def _parse_convert_timezone(args: t.Sequence) -> exp.Expression:
151    if len(args) == 3:
152        return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args)
153    return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0))
154
155
156class Snowflake(Dialect):
157    null_ordering = "nulls_are_large"
158    time_format = "'yyyy-mm-dd hh24:mi:ss'"
159
160    time_mapping = {
161        "YYYY": "%Y",
162        "yyyy": "%Y",
163        "YY": "%y",
164        "yy": "%y",
165        "MMMM": "%B",
166        "mmmm": "%B",
167        "MON": "%b",
168        "mon": "%b",
169        "MM": "%m",
170        "mm": "%m",
171        "DD": "%d",
172        "dd": "%-d",
173        "DY": "%a",
174        "dy": "%w",
175        "HH24": "%H",
176        "hh24": "%H",
177        "HH12": "%I",
178        "hh12": "%I",
179        "MI": "%M",
180        "mi": "%M",
181        "SS": "%S",
182        "ss": "%S",
183        "FF": "%f",
184        "ff": "%f",
185        "FF6": "%f",
186        "ff6": "%f",
187    }
188
189    class Parser(parser.Parser):
190        IDENTIFY_PIVOT_STRINGS = True
191
192        FUNCTIONS = {
193            **parser.Parser.FUNCTIONS,
194            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
195            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
196            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
197            "CONVERT_TIMEZONE": _parse_convert_timezone,
198            "DATE_TRUNC": date_trunc_to_time,
199            "DATEADD": lambda args: exp.DateAdd(
200                this=seq_get(args, 2),
201                expression=seq_get(args, 1),
202                unit=seq_get(args, 0),
203            ),
204            "DATEDIFF": lambda args: exp.DateDiff(
205                this=seq_get(args, 2),
206                expression=seq_get(args, 1),
207                unit=seq_get(args, 0),
208            ),
209            "DIV0": _div0_to_if,
210            "IFF": exp.If.from_arg_list,
211            "NULLIFZERO": _nullifzero_to_if,
212            "OBJECT_CONSTRUCT": parser.parse_var_map,
213            "RLIKE": exp.RegexpLike.from_arg_list,
214            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
215            "TO_ARRAY": exp.Array.from_arg_list,
216            "TO_VARCHAR": exp.ToChar.from_arg_list,
217            "TO_TIMESTAMP": _snowflake_to_timestamp,
218            "ZEROIFNULL": _zeroifnull_to_if,
219        }
220
221        FUNCTION_PARSERS = {
222            **parser.Parser.FUNCTION_PARSERS,
223            "DATE_PART": _parse_date_part,
224        }
225        FUNCTION_PARSERS.pop("TRIM")
226
227        FUNC_TOKENS = {
228            *parser.Parser.FUNC_TOKENS,
229            TokenType.RLIKE,
230            TokenType.TABLE,
231        }
232
233        COLUMN_OPERATORS = {
234            **parser.Parser.COLUMN_OPERATORS,  # type: ignore
235            TokenType.COLON: lambda self, this, path: self.expression(
236                exp.Bracket,
237                this=this,
238                expressions=[path],
239            ),
240        }
241
242        RANGE_PARSERS = {
243            **parser.Parser.RANGE_PARSERS,  # type: ignore
244            TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny),
245            TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny),
246        }
247
248        ALTER_PARSERS = {
249            **parser.Parser.ALTER_PARSERS,  # type: ignore
250            "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True),
251            "SET": lambda self: self._parse_alter_table_set_tag(),
252        }
253
254        def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression:
255            self._match_text_seq("TAG")
256            parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction)
257            return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset)
258
259    class Tokenizer(tokens.Tokenizer):
260        QUOTES = ["'", "$$"]
261        STRING_ESCAPES = ["\\", "'"]
262        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
263
264        KEYWORDS = {
265            **tokens.Tokenizer.KEYWORDS,
266            "CHAR VARYING": TokenType.VARCHAR,
267            "CHARACTER VARYING": TokenType.VARCHAR,
268            "EXCLUDE": TokenType.EXCEPT,
269            "ILIKE ANY": TokenType.ILIKE_ANY,
270            "LIKE ANY": TokenType.LIKE_ANY,
271            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
272            "MINUS": TokenType.EXCEPT,
273            "NCHAR VARYING": TokenType.VARCHAR,
274            "PUT": TokenType.COMMAND,
275            "RENAME": TokenType.REPLACE,
276            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
277            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
278            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
279            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
280            "SAMPLE": TokenType.TABLE_SAMPLE,
281        }
282
283        SINGLE_TOKENS = {
284            **tokens.Tokenizer.SINGLE_TOKENS,
285            "$": TokenType.PARAMETER,
286        }
287
288        VAR_SINGLE_TOKENS = {"$"}
289
290    class Generator(generator.Generator):
291        PARAMETER_TOKEN = "$"
292        MATCHED_BY_SOURCE = False
293        SINGLE_STRING_INTERVAL = True
294        JOIN_HINTS = False
295        TABLE_HINTS = False
296
297        TRANSFORMS = {
298            **generator.Generator.TRANSFORMS,  # type: ignore
299            exp.Array: inline_array_sql,
300            exp.ArrayConcat: rename_func("ARRAY_CAT"),
301            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
302            exp.AtTimeZone: lambda self, e: self.func(
303                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
304            ),
305            exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this),
306            exp.DateDiff: lambda self, e: self.func(
307                "DATEDIFF", e.text("unit"), e.expression, e.this
308            ),
309            exp.DateStrToDate: datestrtodate_sql,
310            exp.DataType: _datatype_sql,
311            exp.DayOfWeek: rename_func("DAYOFWEEK"),
312            exp.If: rename_func("IFF"),
313            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
314            exp.LogicalOr: rename_func("BOOLOR_AGG"),
315            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
316            exp.Max: max_or_greatest,
317            exp.Min: min_or_least,
318            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
319            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
320            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
321            exp.StrPosition: lambda self, e: self.func(
322                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
323            ),
324            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
325            exp.TimeStrToTime: timestrtotime_sql,
326            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
327            exp.TimeToStr: lambda self, e: self.func(
328                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
329            ),
330            exp.TimestampTrunc: timestamptrunc_sql,
331            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
332            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
333            exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
334            exp.UnixToTime: _unix_to_time_sql,
335            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
336        }
337
338        TYPE_MAPPING = {
339            **generator.Generator.TYPE_MAPPING,  # type: ignore
340            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
341        }
342
343        STAR_MAPPING = {
344            "except": "EXCLUDE",
345            "replace": "RENAME",
346        }
347
348        PROPERTIES_LOCATION = {
349            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
350            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
351            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
352        }
353
354        def except_op(self, expression: exp.Except) -> str:
355            if not expression.args.get("distinct", False):
356                self.unsupported("EXCEPT with All is not supported in Snowflake")
357            return super().except_op(expression)
358
359        def intersect_op(self, expression: exp.Intersect) -> str:
360            if not expression.args.get("distinct", False):
361                self.unsupported("INTERSECT with All is not supported in Snowflake")
362            return super().intersect_op(expression)
363
364        def values_sql(self, expression: exp.Values) -> str:
365            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
366
367            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
368            from adding quotes to the column by using the `identify` argument when generating the SQL.
369            """
370            alias = expression.args.get("alias")
371            if alias and alias.args.get("columns"):
372                expression = expression.transform(
373                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
374                    if isinstance(node, exp.Identifier)
375                    and isinstance(node.parent, exp.TableAlias)
376                    and node.arg_key == "columns"
377                    else node,
378                )
379                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
380            return super().values_sql(expression)
381
382        def settag_sql(self, expression: exp.SetTag) -> str:
383            action = "UNSET" if expression.args.get("unset") else "SET"
384            return f"{action} TAG {self.expressions(expression)}"
385
386        def select_sql(self, expression: exp.Select) -> str:
387            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
388            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
389            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
390            generating the SQL.
391
392            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
393            expression. This might not be true in a case where the same column name can be sourced from another table that can
394            properly quote but should be true in most cases.
395            """
396            values_identifiers = set(
397                flatten(
398                    (v.args.get("alias") or exp.Alias()).args.get("columns", [])
399                    for v in expression.find_all(exp.Values)
400                )
401            )
402            if values_identifiers:
403                expression = expression.transform(
404                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
405                    if isinstance(node, exp.Identifier) and node in values_identifiers
406                    else node,
407                )
408                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
409            return super().select_sql(expression)
410
411        def describe_sql(self, expression: exp.Describe) -> str:
412            # Default to table if kind is unknown
413            kind_value = expression.args.get("kind") or "TABLE"
414            kind = f" {kind_value}" if kind_value else ""
415            this = f" {self.sql(expression, 'this')}"
416            return f"DESCRIBE{kind}{this}"
417
418        def generatedasidentitycolumnconstraint_sql(
419            self, expression: exp.GeneratedAsIdentityColumnConstraint
420        ) -> str:
421            start = expression.args.get("start")
422            start = f" START {start}" if start else ""
423            increment = expression.args.get("increment")
424            increment = f" INCREMENT {increment}" if increment else ""
425            return f"AUTOINCREMENT{start}{increment}"
class Snowflake(sqlglot.dialects.dialect.Dialect):
157class Snowflake(Dialect):
158    null_ordering = "nulls_are_large"
159    time_format = "'yyyy-mm-dd hh24:mi:ss'"
160
161    time_mapping = {
162        "YYYY": "%Y",
163        "yyyy": "%Y",
164        "YY": "%y",
165        "yy": "%y",
166        "MMMM": "%B",
167        "mmmm": "%B",
168        "MON": "%b",
169        "mon": "%b",
170        "MM": "%m",
171        "mm": "%m",
172        "DD": "%d",
173        "dd": "%-d",
174        "DY": "%a",
175        "dy": "%w",
176        "HH24": "%H",
177        "hh24": "%H",
178        "HH12": "%I",
179        "hh12": "%I",
180        "MI": "%M",
181        "mi": "%M",
182        "SS": "%S",
183        "ss": "%S",
184        "FF": "%f",
185        "ff": "%f",
186        "FF6": "%f",
187        "ff6": "%f",
188    }
189
190    class Parser(parser.Parser):
191        IDENTIFY_PIVOT_STRINGS = True
192
193        FUNCTIONS = {
194            **parser.Parser.FUNCTIONS,
195            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
196            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
197            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
198            "CONVERT_TIMEZONE": _parse_convert_timezone,
199            "DATE_TRUNC": date_trunc_to_time,
200            "DATEADD": lambda args: exp.DateAdd(
201                this=seq_get(args, 2),
202                expression=seq_get(args, 1),
203                unit=seq_get(args, 0),
204            ),
205            "DATEDIFF": lambda args: exp.DateDiff(
206                this=seq_get(args, 2),
207                expression=seq_get(args, 1),
208                unit=seq_get(args, 0),
209            ),
210            "DIV0": _div0_to_if,
211            "IFF": exp.If.from_arg_list,
212            "NULLIFZERO": _nullifzero_to_if,
213            "OBJECT_CONSTRUCT": parser.parse_var_map,
214            "RLIKE": exp.RegexpLike.from_arg_list,
215            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
216            "TO_ARRAY": exp.Array.from_arg_list,
217            "TO_VARCHAR": exp.ToChar.from_arg_list,
218            "TO_TIMESTAMP": _snowflake_to_timestamp,
219            "ZEROIFNULL": _zeroifnull_to_if,
220        }
221
222        FUNCTION_PARSERS = {
223            **parser.Parser.FUNCTION_PARSERS,
224            "DATE_PART": _parse_date_part,
225        }
226        FUNCTION_PARSERS.pop("TRIM")
227
228        FUNC_TOKENS = {
229            *parser.Parser.FUNC_TOKENS,
230            TokenType.RLIKE,
231            TokenType.TABLE,
232        }
233
234        COLUMN_OPERATORS = {
235            **parser.Parser.COLUMN_OPERATORS,  # type: ignore
236            TokenType.COLON: lambda self, this, path: self.expression(
237                exp.Bracket,
238                this=this,
239                expressions=[path],
240            ),
241        }
242
243        RANGE_PARSERS = {
244            **parser.Parser.RANGE_PARSERS,  # type: ignore
245            TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny),
246            TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny),
247        }
248
249        ALTER_PARSERS = {
250            **parser.Parser.ALTER_PARSERS,  # type: ignore
251            "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True),
252            "SET": lambda self: self._parse_alter_table_set_tag(),
253        }
254
255        def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression:
256            self._match_text_seq("TAG")
257            parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction)
258            return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset)
259
260    class Tokenizer(tokens.Tokenizer):
261        QUOTES = ["'", "$$"]
262        STRING_ESCAPES = ["\\", "'"]
263        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
264
265        KEYWORDS = {
266            **tokens.Tokenizer.KEYWORDS,
267            "CHAR VARYING": TokenType.VARCHAR,
268            "CHARACTER VARYING": TokenType.VARCHAR,
269            "EXCLUDE": TokenType.EXCEPT,
270            "ILIKE ANY": TokenType.ILIKE_ANY,
271            "LIKE ANY": TokenType.LIKE_ANY,
272            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
273            "MINUS": TokenType.EXCEPT,
274            "NCHAR VARYING": TokenType.VARCHAR,
275            "PUT": TokenType.COMMAND,
276            "RENAME": TokenType.REPLACE,
277            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
278            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
279            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
280            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
281            "SAMPLE": TokenType.TABLE_SAMPLE,
282        }
283
284        SINGLE_TOKENS = {
285            **tokens.Tokenizer.SINGLE_TOKENS,
286            "$": TokenType.PARAMETER,
287        }
288
289        VAR_SINGLE_TOKENS = {"$"}
290
291    class Generator(generator.Generator):
292        PARAMETER_TOKEN = "$"
293        MATCHED_BY_SOURCE = False
294        SINGLE_STRING_INTERVAL = True
295        JOIN_HINTS = False
296        TABLE_HINTS = False
297
298        TRANSFORMS = {
299            **generator.Generator.TRANSFORMS,  # type: ignore
300            exp.Array: inline_array_sql,
301            exp.ArrayConcat: rename_func("ARRAY_CAT"),
302            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
303            exp.AtTimeZone: lambda self, e: self.func(
304                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
305            ),
306            exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this),
307            exp.DateDiff: lambda self, e: self.func(
308                "DATEDIFF", e.text("unit"), e.expression, e.this
309            ),
310            exp.DateStrToDate: datestrtodate_sql,
311            exp.DataType: _datatype_sql,
312            exp.DayOfWeek: rename_func("DAYOFWEEK"),
313            exp.If: rename_func("IFF"),
314            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
315            exp.LogicalOr: rename_func("BOOLOR_AGG"),
316            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
317            exp.Max: max_or_greatest,
318            exp.Min: min_or_least,
319            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
320            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
321            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
322            exp.StrPosition: lambda self, e: self.func(
323                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
324            ),
325            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
326            exp.TimeStrToTime: timestrtotime_sql,
327            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
328            exp.TimeToStr: lambda self, e: self.func(
329                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
330            ),
331            exp.TimestampTrunc: timestamptrunc_sql,
332            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
333            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
334            exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
335            exp.UnixToTime: _unix_to_time_sql,
336            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
337        }
338
339        TYPE_MAPPING = {
340            **generator.Generator.TYPE_MAPPING,  # type: ignore
341            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
342        }
343
344        STAR_MAPPING = {
345            "except": "EXCLUDE",
346            "replace": "RENAME",
347        }
348
349        PROPERTIES_LOCATION = {
350            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
351            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
352            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
353        }
354
355        def except_op(self, expression: exp.Except) -> str:
356            if not expression.args.get("distinct", False):
357                self.unsupported("EXCEPT with All is not supported in Snowflake")
358            return super().except_op(expression)
359
360        def intersect_op(self, expression: exp.Intersect) -> str:
361            if not expression.args.get("distinct", False):
362                self.unsupported("INTERSECT with All is not supported in Snowflake")
363            return super().intersect_op(expression)
364
365        def values_sql(self, expression: exp.Values) -> str:
366            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
367
368            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
369            from adding quotes to the column by using the `identify` argument when generating the SQL.
370            """
371            alias = expression.args.get("alias")
372            if alias and alias.args.get("columns"):
373                expression = expression.transform(
374                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
375                    if isinstance(node, exp.Identifier)
376                    and isinstance(node.parent, exp.TableAlias)
377                    and node.arg_key == "columns"
378                    else node,
379                )
380                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
381            return super().values_sql(expression)
382
383        def settag_sql(self, expression: exp.SetTag) -> str:
384            action = "UNSET" if expression.args.get("unset") else "SET"
385            return f"{action} TAG {self.expressions(expression)}"
386
387        def select_sql(self, expression: exp.Select) -> str:
388            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
389            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
390            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
391            generating the SQL.
392
393            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
394            expression. This might not be true in a case where the same column name can be sourced from another table that can
395            properly quote but should be true in most cases.
396            """
397            values_identifiers = set(
398                flatten(
399                    (v.args.get("alias") or exp.Alias()).args.get("columns", [])
400                    for v in expression.find_all(exp.Values)
401                )
402            )
403            if values_identifiers:
404                expression = expression.transform(
405                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
406                    if isinstance(node, exp.Identifier) and node in values_identifiers
407                    else node,
408                )
409                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
410            return super().select_sql(expression)
411
412        def describe_sql(self, expression: exp.Describe) -> str:
413            # Default to table if kind is unknown
414            kind_value = expression.args.get("kind") or "TABLE"
415            kind = f" {kind_value}" if kind_value else ""
416            this = f" {self.sql(expression, 'this')}"
417            return f"DESCRIBE{kind}{this}"
418
419        def generatedasidentitycolumnconstraint_sql(
420            self, expression: exp.GeneratedAsIdentityColumnConstraint
421        ) -> str:
422            start = expression.args.get("start")
423            start = f" START {start}" if start else ""
424            increment = expression.args.get("increment")
425            increment = f" INCREMENT {increment}" if increment else ""
426            return f"AUTOINCREMENT{start}{increment}"
class Snowflake.Parser(sqlglot.parser.Parser):
190    class Parser(parser.Parser):
191        IDENTIFY_PIVOT_STRINGS = True
192
193        FUNCTIONS = {
194            **parser.Parser.FUNCTIONS,
195            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
196            "ARRAY_CONSTRUCT": exp.Array.from_arg_list,
197            "ARRAY_TO_STRING": exp.ArrayJoin.from_arg_list,
198            "CONVERT_TIMEZONE": _parse_convert_timezone,
199            "DATE_TRUNC": date_trunc_to_time,
200            "DATEADD": lambda args: exp.DateAdd(
201                this=seq_get(args, 2),
202                expression=seq_get(args, 1),
203                unit=seq_get(args, 0),
204            ),
205            "DATEDIFF": lambda args: exp.DateDiff(
206                this=seq_get(args, 2),
207                expression=seq_get(args, 1),
208                unit=seq_get(args, 0),
209            ),
210            "DIV0": _div0_to_if,
211            "IFF": exp.If.from_arg_list,
212            "NULLIFZERO": _nullifzero_to_if,
213            "OBJECT_CONSTRUCT": parser.parse_var_map,
214            "RLIKE": exp.RegexpLike.from_arg_list,
215            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
216            "TO_ARRAY": exp.Array.from_arg_list,
217            "TO_VARCHAR": exp.ToChar.from_arg_list,
218            "TO_TIMESTAMP": _snowflake_to_timestamp,
219            "ZEROIFNULL": _zeroifnull_to_if,
220        }
221
222        FUNCTION_PARSERS = {
223            **parser.Parser.FUNCTION_PARSERS,
224            "DATE_PART": _parse_date_part,
225        }
226        FUNCTION_PARSERS.pop("TRIM")
227
228        FUNC_TOKENS = {
229            *parser.Parser.FUNC_TOKENS,
230            TokenType.RLIKE,
231            TokenType.TABLE,
232        }
233
234        COLUMN_OPERATORS = {
235            **parser.Parser.COLUMN_OPERATORS,  # type: ignore
236            TokenType.COLON: lambda self, this, path: self.expression(
237                exp.Bracket,
238                this=this,
239                expressions=[path],
240            ),
241        }
242
243        RANGE_PARSERS = {
244            **parser.Parser.RANGE_PARSERS,  # type: ignore
245            TokenType.LIKE_ANY: binary_range_parser(exp.LikeAny),
246            TokenType.ILIKE_ANY: binary_range_parser(exp.ILikeAny),
247        }
248
249        ALTER_PARSERS = {
250            **parser.Parser.ALTER_PARSERS,  # type: ignore
251            "UNSET": lambda self: self._parse_alter_table_set_tag(unset=True),
252            "SET": lambda self: self._parse_alter_table_set_tag(),
253        }
254
255        def _parse_alter_table_set_tag(self, unset: bool = False) -> exp.Expression:
256            self._match_text_seq("TAG")
257            parser = t.cast(t.Callable, self._parse_id_var if unset else self._parse_conjunction)
258            return self.expression(exp.SetTag, expressions=self._parse_csv(parser), unset=unset)

Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: the desired error level. Default: ErrorLevel.RAISE
  • error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
  • index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
  • alias_post_tablesample: If the table alias comes after tablesample. Default: False
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
  • null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
260    class Tokenizer(tokens.Tokenizer):
261        QUOTES = ["'", "$$"]
262        STRING_ESCAPES = ["\\", "'"]
263        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
264
265        KEYWORDS = {
266            **tokens.Tokenizer.KEYWORDS,
267            "CHAR VARYING": TokenType.VARCHAR,
268            "CHARACTER VARYING": TokenType.VARCHAR,
269            "EXCLUDE": TokenType.EXCEPT,
270            "ILIKE ANY": TokenType.ILIKE_ANY,
271            "LIKE ANY": TokenType.LIKE_ANY,
272            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
273            "MINUS": TokenType.EXCEPT,
274            "NCHAR VARYING": TokenType.VARCHAR,
275            "PUT": TokenType.COMMAND,
276            "RENAME": TokenType.REPLACE,
277            "TIMESTAMP_LTZ": TokenType.TIMESTAMPLTZ,
278            "TIMESTAMP_NTZ": TokenType.TIMESTAMP,
279            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
280            "TIMESTAMPNTZ": TokenType.TIMESTAMP,
281            "SAMPLE": TokenType.TABLE_SAMPLE,
282        }
283
284        SINGLE_TOKENS = {
285            **tokens.Tokenizer.SINGLE_TOKENS,
286            "$": TokenType.PARAMETER,
287        }
288
289        VAR_SINGLE_TOKENS = {"$"}
class Snowflake.Generator(sqlglot.generator.Generator):
291    class Generator(generator.Generator):
292        PARAMETER_TOKEN = "$"
293        MATCHED_BY_SOURCE = False
294        SINGLE_STRING_INTERVAL = True
295        JOIN_HINTS = False
296        TABLE_HINTS = False
297
298        TRANSFORMS = {
299            **generator.Generator.TRANSFORMS,  # type: ignore
300            exp.Array: inline_array_sql,
301            exp.ArrayConcat: rename_func("ARRAY_CAT"),
302            exp.ArrayJoin: rename_func("ARRAY_TO_STRING"),
303            exp.AtTimeZone: lambda self, e: self.func(
304                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
305            ),
306            exp.DateAdd: lambda self, e: self.func("DATEADD", e.text("unit"), e.expression, e.this),
307            exp.DateDiff: lambda self, e: self.func(
308                "DATEDIFF", e.text("unit"), e.expression, e.this
309            ),
310            exp.DateStrToDate: datestrtodate_sql,
311            exp.DataType: _datatype_sql,
312            exp.DayOfWeek: rename_func("DAYOFWEEK"),
313            exp.If: rename_func("IFF"),
314            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
315            exp.LogicalOr: rename_func("BOOLOR_AGG"),
316            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
317            exp.Max: max_or_greatest,
318            exp.Min: min_or_least,
319            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
320            exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
321            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
322            exp.StrPosition: lambda self, e: self.func(
323                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
324            ),
325            exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
326            exp.TimeStrToTime: timestrtotime_sql,
327            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
328            exp.TimeToStr: lambda self, e: self.func(
329                "TO_CHAR", exp.cast(e.this, "timestamp"), self.format_time(e)
330            ),
331            exp.TimestampTrunc: timestamptrunc_sql,
332            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
333            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
334            exp.TsOrDsToDate: ts_or_ds_to_date_sql("snowflake"),
335            exp.UnixToTime: _unix_to_time_sql,
336            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
337        }
338
339        TYPE_MAPPING = {
340            **generator.Generator.TYPE_MAPPING,  # type: ignore
341            exp.DataType.Type.TIMESTAMP: "TIMESTAMPNTZ",
342        }
343
344        STAR_MAPPING = {
345            "except": "EXCLUDE",
346            "replace": "RENAME",
347        }
348
349        PROPERTIES_LOCATION = {
350            **generator.Generator.PROPERTIES_LOCATION,  # type: ignore
351            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
352            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
353        }
354
355        def except_op(self, expression: exp.Except) -> str:
356            if not expression.args.get("distinct", False):
357                self.unsupported("EXCEPT with All is not supported in Snowflake")
358            return super().except_op(expression)
359
360        def intersect_op(self, expression: exp.Intersect) -> str:
361            if not expression.args.get("distinct", False):
362                self.unsupported("INTERSECT with All is not supported in Snowflake")
363            return super().intersect_op(expression)
364
365        def values_sql(self, expression: exp.Values) -> str:
366            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
367
368            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
369            from adding quotes to the column by using the `identify` argument when generating the SQL.
370            """
371            alias = expression.args.get("alias")
372            if alias and alias.args.get("columns"):
373                expression = expression.transform(
374                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
375                    if isinstance(node, exp.Identifier)
376                    and isinstance(node.parent, exp.TableAlias)
377                    and node.arg_key == "columns"
378                    else node,
379                )
380                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
381            return super().values_sql(expression)
382
383        def settag_sql(self, expression: exp.SetTag) -> str:
384            action = "UNSET" if expression.args.get("unset") else "SET"
385            return f"{action} TAG {self.expressions(expression)}"
386
387        def select_sql(self, expression: exp.Select) -> str:
388            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
389            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
390            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
391            generating the SQL.
392
393            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
394            expression. This might not be true in a case where the same column name can be sourced from another table that can
395            properly quote but should be true in most cases.
396            """
397            values_identifiers = set(
398                flatten(
399                    (v.args.get("alias") or exp.Alias()).args.get("columns", [])
400                    for v in expression.find_all(exp.Values)
401                )
402            )
403            if values_identifiers:
404                expression = expression.transform(
405                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
406                    if isinstance(node, exp.Identifier) and node in values_identifiers
407                    else node,
408                )
409                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
410            return super().select_sql(expression)
411
412        def describe_sql(self, expression: exp.Describe) -> str:
413            # Default to table if kind is unknown
414            kind_value = expression.args.get("kind") or "TABLE"
415            kind = f" {kind_value}" if kind_value else ""
416            this = f" {self.sql(expression, 'this')}"
417            return f"DESCRIBE{kind}{this}"
418
419        def generatedasidentitycolumnconstraint_sql(
420            self, expression: exp.GeneratedAsIdentityColumnConstraint
421        ) -> str:
422            start = expression.args.get("start")
423            start = f" START {start}" if start else ""
424            increment = expression.args.get("increment")
425            increment = f" INCREMENT {increment}" if increment else ""
426            return f"AUTOINCREMENT{start}{increment}"

Generator interprets the given syntax tree and produces a SQL string as an output.

Arguments:
  • time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
  • time_trie (trie): a trie of the time_mapping keys
  • pretty (bool): if set to True the returned string will be formatted. Default: False.
  • quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
  • quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
  • identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
  • identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
  • bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
  • bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
  • hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
  • hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
  • byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
  • byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
  • identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
  • normalize (bool): if set to True all identifiers will lower cased
  • string_escape (str): specifies a string escape character. Default: '.
  • identifier_escape (str): specifies an identifier escape character. Default: ".
  • pad (int): determines padding in a formatted string. Default: 2.
  • indent (int): determines the size of indentation in a formatted string. Default: 4.
  • unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
  • normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
  • alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
  • unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
  • max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
def except_op(self, expression: sqlglot.expressions.Except) -> str:
355        def except_op(self, expression: exp.Except) -> str:
356            if not expression.args.get("distinct", False):
357                self.unsupported("EXCEPT with All is not supported in Snowflake")
358            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
360        def intersect_op(self, expression: exp.Intersect) -> str:
361            if not expression.args.get("distinct", False):
362                self.unsupported("INTERSECT with All is not supported in Snowflake")
363            return super().intersect_op(expression)
def values_sql(self, expression: sqlglot.expressions.Values) -> str:
365        def values_sql(self, expression: exp.Values) -> str:
366            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.
367
368            We also want to make sure that after we find matches where we need to unquote a column that we prevent users
369            from adding quotes to the column by using the `identify` argument when generating the SQL.
370            """
371            alias = expression.args.get("alias")
372            if alias and alias.args.get("columns"):
373                expression = expression.transform(
374                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
375                    if isinstance(node, exp.Identifier)
376                    and isinstance(node.parent, exp.TableAlias)
377                    and node.arg_key == "columns"
378                    else node,
379                )
380                return self.no_identify(lambda: super(self.__class__, self).values_sql(expression))
381            return super().values_sql(expression)

Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted.

We also want to make sure that after we find matches where we need to unquote a column that we prevent users from adding quotes to the column by using the identify argument when generating the SQL.

def settag_sql(self, expression: sqlglot.expressions.SetTag) -> str:
383        def settag_sql(self, expression: exp.SetTag) -> str:
384            action = "UNSET" if expression.args.get("unset") else "SET"
385            return f"{action} TAG {self.expressions(expression)}"
def select_sql(self, expression: sqlglot.expressions.Select) -> str:
387        def select_sql(self, expression: exp.Select) -> str:
388            """Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also
389            that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need
390            to unquote a column that we prevent users from adding quotes to the column by using the `identify` argument when
391            generating the SQL.
392
393            Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the
394            expression. This might not be true in a case where the same column name can be sourced from another table that can
395            properly quote but should be true in most cases.
396            """
397            values_identifiers = set(
398                flatten(
399                    (v.args.get("alias") or exp.Alias()).args.get("columns", [])
400                    for v in expression.find_all(exp.Values)
401                )
402            )
403            if values_identifiers:
404                expression = expression.transform(
405                    lambda node: exp.Identifier(**{**node.args, "quoted": False})
406                    if isinstance(node, exp.Identifier) and node in values_identifiers
407                    else node,
408                )
409                return self.no_identify(lambda: super(self.__class__, self).select_sql(expression))
410            return super().select_sql(expression)

Due to a bug in Snowflake we want to make sure that all columns in a VALUES table alias are unquoted and also that all columns in a SELECT are unquoted. We also want to make sure that after we find matches where we need to unquote a column that we prevent users from adding quotes to the column by using the identify argument when generating the SQL.

Note: We make an assumption that any columns referenced in a VALUES expression should be unquoted throughout the expression. This might not be true in a case where the same column name can be sourced from another table that can properly quote but should be true in most cases.

def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
412        def describe_sql(self, expression: exp.Describe) -> str:
413            # Default to table if kind is unknown
414            kind_value = expression.args.get("kind") or "TABLE"
415            kind = f" {kind_value}" if kind_value else ""
416            this = f" {self.sql(expression, 'this')}"
417            return f"DESCRIBE{kind}{this}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
419        def generatedasidentitycolumnconstraint_sql(
420            self, expression: exp.GeneratedAsIdentityColumnConstraint
421        ) -> str:
422            start = expression.args.get("start")
423            start = f" START {start}" if start else ""
424            increment = expression.args.get("increment")
425            increment = f" INCREMENT {increment}" if increment else ""
426            return f"AUTOINCREMENT{start}{increment}"
Inherited Members
sqlglot.generator.Generator
Generator
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
create_sql
clone_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
datatypesize_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
with_properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
afterjournalproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
pseudotype_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
group_sql
having_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
after_having_modifiers
after_limit_modifiers
schema_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
concat_sql
check_sql
foreignkey_sql
primarykey_sql
unique_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonobject_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
attimezone_sql
add_sql
and_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
trycast_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql