sqlglot.dialects.snowflake
1from __future__ import annotations 2 3import typing as t 4 5from sqlglot import exp, generator, parser, tokens, transforms 6from sqlglot.dialects.dialect import ( 7 Dialect, 8 NormalizationStrategy, 9 binary_from_function, 10 date_delta_sql, 11 date_trunc_to_time, 12 datestrtodate_sql, 13 build_formatted_time, 14 if_sql, 15 inline_array_sql, 16 max_or_greatest, 17 min_or_least, 18 rename_func, 19 timestamptrunc_sql, 20 timestrtotime_sql, 21 var_map_sql, 22) 23from sqlglot.helper import flatten, is_float, is_int, seq_get 24from sqlglot.tokens import TokenType 25 26if t.TYPE_CHECKING: 27 from sqlglot._typing import E 28 29 30# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html 31def _build_datetime( 32 name: str, kind: exp.DataType.Type, safe: bool = False 33) -> t.Callable[[t.List], exp.Func]: 34 def _builder(args: t.List) -> exp.Func: 35 value = seq_get(args, 0) 36 int_value = value is not None and is_int(value.name) 37 38 if isinstance(value, exp.Literal): 39 # Converts calls like `TO_TIME('01:02:03')` into casts 40 if len(args) == 1 and value.is_string and not int_value: 41 return exp.cast(value, kind) 42 43 # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special 44 # cases so we can transpile them, since they're relatively common 45 if kind == exp.DataType.Type.TIMESTAMP: 46 if int_value: 47 return exp.UnixToTime(this=value, scale=seq_get(args, 1)) 48 if not is_float(value.this): 49 return build_formatted_time(exp.StrToTime, "snowflake")(args) 50 51 if kind == exp.DataType.Type.DATE and not int_value: 52 formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args) 53 formatted_exp.set("safe", safe) 54 return formatted_exp 55 56 return exp.Anonymous(this=name, expressions=args) 57 58 return _builder 59 60 61def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]: 62 expression = parser.build_var_map(args) 63 64 if isinstance(expression, exp.StarMap): 65 return expression 66 67 return exp.Struct( 68 expressions=[ 69 exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values) 70 ] 71 ) 72 73 74def _build_datediff(args: t.List) -> exp.DateDiff: 75 return exp.DateDiff( 76 this=seq_get(args, 2), expression=seq_get(args, 1), unit=_map_date_part(seq_get(args, 0)) 77 ) 78 79 80def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]: 81 def _builder(args: t.List) -> E: 82 return expr_type( 83 this=seq_get(args, 2), 84 expression=seq_get(args, 1), 85 unit=_map_date_part(seq_get(args, 0)), 86 ) 87 88 return _builder 89 90 91# https://docs.snowflake.com/en/sql-reference/functions/div0 92def _build_if_from_div0(args: t.List) -> exp.If: 93 cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)) 94 true = exp.Literal.number(0) 95 false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1)) 96 return exp.If(this=cond, true=true, false=false) 97 98 99# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull 100def _build_if_from_zeroifnull(args: t.List) -> exp.If: 101 cond = exp.Is(this=seq_get(args, 0), expression=exp.Null()) 102 return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0)) 103 104 105# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull 106def _build_if_from_nullifzero(args: t.List) -> exp.If: 107 cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0)) 108 return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0)) 109 110 111def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str: 112 flag = expression.text("flag") 113 114 if "i" not in flag: 115 flag += "i" 116 117 return self.func( 118 "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag) 119 ) 120 121 122def _build_convert_timezone(args: t.List) -> t.Union[exp.Anonymous, exp.AtTimeZone]: 123 if len(args) == 3: 124 return exp.Anonymous(this="CONVERT_TIMEZONE", expressions=args) 125 return exp.AtTimeZone(this=seq_get(args, 1), zone=seq_get(args, 0)) 126 127 128def _build_regexp_replace(args: t.List) -> exp.RegexpReplace: 129 regexp_replace = exp.RegexpReplace.from_arg_list(args) 130 131 if not regexp_replace.args.get("replacement"): 132 regexp_replace.set("replacement", exp.Literal.string("")) 133 134 return regexp_replace 135 136 137def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]: 138 def _parse(self: Snowflake.Parser) -> exp.Show: 139 return self._parse_show_snowflake(*args, **kwargs) 140 141 return _parse 142 143 144DATE_PART_MAPPING = { 145 "Y": "YEAR", 146 "YY": "YEAR", 147 "YYY": "YEAR", 148 "YYYY": "YEAR", 149 "YR": "YEAR", 150 "YEARS": "YEAR", 151 "YRS": "YEAR", 152 "MM": "MONTH", 153 "MON": "MONTH", 154 "MONS": "MONTH", 155 "MONTHS": "MONTH", 156 "D": "DAY", 157 "DD": "DAY", 158 "DAYS": "DAY", 159 "DAYOFMONTH": "DAY", 160 "WEEKDAY": "DAYOFWEEK", 161 "DOW": "DAYOFWEEK", 162 "DW": "DAYOFWEEK", 163 "WEEKDAY_ISO": "DAYOFWEEKISO", 164 "DOW_ISO": "DAYOFWEEKISO", 165 "DW_ISO": "DAYOFWEEKISO", 166 "YEARDAY": "DAYOFYEAR", 167 "DOY": "DAYOFYEAR", 168 "DY": "DAYOFYEAR", 169 "W": "WEEK", 170 "WK": "WEEK", 171 "WEEKOFYEAR": "WEEK", 172 "WOY": "WEEK", 173 "WY": "WEEK", 174 "WEEK_ISO": "WEEKISO", 175 "WEEKOFYEARISO": "WEEKISO", 176 "WEEKOFYEAR_ISO": "WEEKISO", 177 "Q": "QUARTER", 178 "QTR": "QUARTER", 179 "QTRS": "QUARTER", 180 "QUARTERS": "QUARTER", 181 "H": "HOUR", 182 "HH": "HOUR", 183 "HR": "HOUR", 184 "HOURS": "HOUR", 185 "HRS": "HOUR", 186 "M": "MINUTE", 187 "MI": "MINUTE", 188 "MIN": "MINUTE", 189 "MINUTES": "MINUTE", 190 "MINS": "MINUTE", 191 "S": "SECOND", 192 "SEC": "SECOND", 193 "SECONDS": "SECOND", 194 "SECS": "SECOND", 195 "MS": "MILLISECOND", 196 "MSEC": "MILLISECOND", 197 "MILLISECONDS": "MILLISECOND", 198 "US": "MICROSECOND", 199 "USEC": "MICROSECOND", 200 "MICROSECONDS": "MICROSECOND", 201 "NS": "NANOSECOND", 202 "NSEC": "NANOSECOND", 203 "NANOSEC": "NANOSECOND", 204 "NSECOND": "NANOSECOND", 205 "NSECONDS": "NANOSECOND", 206 "NANOSECS": "NANOSECOND", 207 "EPOCH": "EPOCH_SECOND", 208 "EPOCH_SECONDS": "EPOCH_SECOND", 209 "EPOCH_MILLISECONDS": "EPOCH_MILLISECOND", 210 "EPOCH_MICROSECONDS": "EPOCH_MICROSECOND", 211 "EPOCH_NANOSECONDS": "EPOCH_NANOSECOND", 212 "TZH": "TIMEZONE_HOUR", 213 "TZM": "TIMEZONE_MINUTE", 214} 215 216 217@t.overload 218def _map_date_part(part: exp.Expression) -> exp.Var: 219 pass 220 221 222@t.overload 223def _map_date_part(part: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 224 pass 225 226 227def _map_date_part(part): 228 mapped = DATE_PART_MAPPING.get(part.name.upper()) if part else None 229 return exp.var(mapped) if mapped else part 230 231 232def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc: 233 trunc = date_trunc_to_time(args) 234 trunc.set("unit", _map_date_part(trunc.args["unit"])) 235 return trunc 236 237 238def _build_timestamp_from_parts(args: t.List) -> exp.Func: 239 if len(args) == 2: 240 # Other dialects don't have the TIMESTAMP_FROM_PARTS(date, time) concept, 241 # so we parse this into Anonymous for now instead of introducing complexity 242 return exp.Anonymous(this="TIMESTAMP_FROM_PARTS", expressions=args) 243 244 return exp.TimestampFromParts.from_arg_list(args) 245 246 247def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression: 248 """ 249 Snowflake doesn't allow columns referenced in UNPIVOT to be qualified, 250 so we need to unqualify them. 251 252 Example: 253 >>> from sqlglot import parse_one 254 >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))") 255 >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake")) 256 SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april)) 257 """ 258 if isinstance(expression, exp.Pivot) and expression.unpivot: 259 expression = transforms.unqualify_columns(expression) 260 261 return expression 262 263 264def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression: 265 assert isinstance(expression, exp.Create) 266 267 def _flatten_structured_type(expression: exp.DataType) -> exp.DataType: 268 if expression.this in exp.DataType.NESTED_TYPES: 269 expression.set("expressions", None) 270 return expression 271 272 props = expression.args.get("properties") 273 if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)): 274 for schema_expression in expression.this.expressions: 275 if isinstance(schema_expression, exp.ColumnDef): 276 column_type = schema_expression.kind 277 if isinstance(column_type, exp.DataType): 278 column_type.transform(_flatten_structured_type, copy=False) 279 280 return expression 281 282 283class Snowflake(Dialect): 284 # https://docs.snowflake.com/en/sql-reference/identifiers-syntax 285 NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE 286 NULL_ORDERING = "nulls_are_large" 287 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 288 SUPPORTS_USER_DEFINED_TYPES = False 289 SUPPORTS_SEMI_ANTI_JOIN = False 290 PREFER_CTE_ALIAS_COLUMN = True 291 TABLESAMPLE_SIZE_IS_PERCENT = True 292 COPY_PARAMS_ARE_CSV = False 293 294 TIME_MAPPING = { 295 "YYYY": "%Y", 296 "yyyy": "%Y", 297 "YY": "%y", 298 "yy": "%y", 299 "MMMM": "%B", 300 "mmmm": "%B", 301 "MON": "%b", 302 "mon": "%b", 303 "MM": "%m", 304 "mm": "%m", 305 "DD": "%d", 306 "dd": "%-d", 307 "DY": "%a", 308 "dy": "%w", 309 "HH24": "%H", 310 "hh24": "%H", 311 "HH12": "%I", 312 "hh12": "%I", 313 "MI": "%M", 314 "mi": "%M", 315 "SS": "%S", 316 "ss": "%S", 317 "FF": "%f", 318 "ff": "%f", 319 "FF6": "%f", 320 "ff6": "%f", 321 } 322 323 def quote_identifier(self, expression: E, identify: bool = True) -> E: 324 # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an 325 # unquoted DUAL keyword in a special way and does not map it to a user-defined table 326 if ( 327 isinstance(expression, exp.Identifier) 328 and isinstance(expression.parent, exp.Table) 329 and expression.name.lower() == "dual" 330 ): 331 return expression # type: ignore 332 333 return super().quote_identifier(expression, identify=identify) 334 335 class Parser(parser.Parser): 336 IDENTIFY_PIVOT_STRINGS = True 337 338 ID_VAR_TOKENS = { 339 *parser.Parser.ID_VAR_TOKENS, 340 TokenType.MATCH_CONDITION, 341 } 342 343 TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW} 344 TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION) 345 346 FUNCTIONS = { 347 **parser.Parser.FUNCTIONS, 348 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 349 "ARRAYAGG": exp.ArrayAgg.from_arg_list, 350 "ARRAY_CONSTRUCT": exp.Array.from_arg_list, 351 "ARRAY_CONTAINS": lambda args: exp.ArrayContains( 352 this=seq_get(args, 1), expression=seq_get(args, 0) 353 ), 354 "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries( 355 # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive 356 start=seq_get(args, 0), 357 end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)), 358 step=seq_get(args, 2), 359 ), 360 "BITXOR": binary_from_function(exp.BitwiseXor), 361 "BIT_XOR": binary_from_function(exp.BitwiseXor), 362 "BOOLXOR": binary_from_function(exp.Xor), 363 "CONVERT_TIMEZONE": _build_convert_timezone, 364 "DATE": _build_datetime("DATE", exp.DataType.Type.DATE), 365 "DATE_TRUNC": _date_trunc_to_time, 366 "DATEADD": _build_date_time_add(exp.DateAdd), 367 "DATEDIFF": _build_datediff, 368 "DIV0": _build_if_from_div0, 369 "FLATTEN": exp.Explode.from_arg_list, 370 "GET_PATH": lambda args, dialect: exp.JSONExtract( 371 this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1)) 372 ), 373 "IFF": exp.If.from_arg_list, 374 "LAST_DAY": lambda args: exp.LastDay( 375 this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1)) 376 ), 377 "LISTAGG": exp.GroupConcat.from_arg_list, 378 "MEDIAN": lambda args: exp.PercentileCont( 379 this=seq_get(args, 0), expression=exp.Literal.number(0.5) 380 ), 381 "NULLIFZERO": _build_if_from_nullifzero, 382 "OBJECT_CONSTRUCT": _build_object_construct, 383 "REGEXP_REPLACE": _build_regexp_replace, 384 "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list, 385 "RLIKE": exp.RegexpLike.from_arg_list, 386 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 387 "TIMEADD": _build_date_time_add(exp.TimeAdd), 388 "TIMEDIFF": _build_datediff, 389 "TIMESTAMPADD": _build_date_time_add(exp.DateAdd), 390 "TIMESTAMPDIFF": _build_datediff, 391 "TIMESTAMPFROMPARTS": _build_timestamp_from_parts, 392 "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts, 393 "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True), 394 "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE), 395 "TO_NUMBER": lambda args: exp.ToNumber( 396 this=seq_get(args, 0), 397 format=seq_get(args, 1), 398 precision=seq_get(args, 2), 399 scale=seq_get(args, 3), 400 ), 401 "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME), 402 "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP), 403 "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ), 404 "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP), 405 "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ), 406 "TO_VARCHAR": exp.ToChar.from_arg_list, 407 "ZEROIFNULL": _build_if_from_zeroifnull, 408 } 409 410 FUNCTION_PARSERS = { 411 **parser.Parser.FUNCTION_PARSERS, 412 "DATE_PART": lambda self: self._parse_date_part(), 413 "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(), 414 } 415 FUNCTION_PARSERS.pop("TRIM") 416 417 TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME} 418 419 RANGE_PARSERS = { 420 **parser.Parser.RANGE_PARSERS, 421 TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny), 422 TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny), 423 } 424 425 ALTER_PARSERS = { 426 **parser.Parser.ALTER_PARSERS, 427 "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")), 428 "UNSET": lambda self: self.expression( 429 exp.Set, 430 tag=self._match_text_seq("TAG"), 431 expressions=self._parse_csv(self._parse_id_var), 432 unset=True, 433 ), 434 "SWAP": lambda self: self._parse_alter_table_swap(), 435 } 436 437 STATEMENT_PARSERS = { 438 **parser.Parser.STATEMENT_PARSERS, 439 TokenType.SHOW: lambda self: self._parse_show(), 440 } 441 442 PROPERTY_PARSERS = { 443 **parser.Parser.PROPERTY_PARSERS, 444 "LOCATION": lambda self: self._parse_location_property(), 445 } 446 447 SHOW_PARSERS = { 448 "SCHEMAS": _show_parser("SCHEMAS"), 449 "TERSE SCHEMAS": _show_parser("SCHEMAS"), 450 "OBJECTS": _show_parser("OBJECTS"), 451 "TERSE OBJECTS": _show_parser("OBJECTS"), 452 "TABLES": _show_parser("TABLES"), 453 "TERSE TABLES": _show_parser("TABLES"), 454 "VIEWS": _show_parser("VIEWS"), 455 "TERSE VIEWS": _show_parser("VIEWS"), 456 "PRIMARY KEYS": _show_parser("PRIMARY KEYS"), 457 "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"), 458 "IMPORTED KEYS": _show_parser("IMPORTED KEYS"), 459 "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"), 460 "UNIQUE KEYS": _show_parser("UNIQUE KEYS"), 461 "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"), 462 "SEQUENCES": _show_parser("SEQUENCES"), 463 "TERSE SEQUENCES": _show_parser("SEQUENCES"), 464 "COLUMNS": _show_parser("COLUMNS"), 465 "USERS": _show_parser("USERS"), 466 "TERSE USERS": _show_parser("USERS"), 467 } 468 469 STAGED_FILE_SINGLE_TOKENS = { 470 TokenType.DOT, 471 TokenType.MOD, 472 TokenType.SLASH, 473 } 474 475 FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"] 476 477 SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"} 478 479 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 480 this = super()._parse_column_ops(this) 481 482 casts = [] 483 json_path = [] 484 485 while self._match(TokenType.COLON): 486 path = super()._parse_column_ops(self._parse_field(any_token=True)) 487 488 # The cast :: operator has a lower precedence than the extraction operator :, so 489 # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH 490 while isinstance(path, exp.Cast): 491 casts.append(path.to) 492 path = path.this 493 494 if path: 495 json_path.append(path.sql(dialect="snowflake", copy=False)) 496 497 if json_path: 498 this = self.expression( 499 exp.JSONExtract, 500 this=this, 501 expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))), 502 ) 503 504 while casts: 505 this = self.expression(exp.Cast, this=this, to=casts.pop()) 506 507 return this 508 509 # https://docs.snowflake.com/en/sql-reference/functions/date_part.html 510 # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts 511 def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]: 512 this = self._parse_var() or self._parse_type() 513 514 if not this: 515 return None 516 517 self._match(TokenType.COMMA) 518 expression = self._parse_bitwise() 519 this = _map_date_part(this) 520 name = this.name.upper() 521 522 if name.startswith("EPOCH"): 523 if name == "EPOCH_MILLISECOND": 524 scale = 10**3 525 elif name == "EPOCH_MICROSECOND": 526 scale = 10**6 527 elif name == "EPOCH_NANOSECOND": 528 scale = 10**9 529 else: 530 scale = None 531 532 ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP")) 533 to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts) 534 535 if scale: 536 to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale)) 537 538 return to_unix 539 540 return self.expression(exp.Extract, this=this, expression=expression) 541 542 def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]: 543 if is_map: 544 # Keys are strings in Snowflake's objects, see also: 545 # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured 546 # - https://docs.snowflake.com/en/sql-reference/functions/object_construct 547 return self._parse_slice(self._parse_string()) 548 549 return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True)) 550 551 def _parse_lateral(self) -> t.Optional[exp.Lateral]: 552 lateral = super()._parse_lateral() 553 if not lateral: 554 return lateral 555 556 if isinstance(lateral.this, exp.Explode): 557 table_alias = lateral.args.get("alias") 558 columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS] 559 if table_alias and not table_alias.args.get("columns"): 560 table_alias.set("columns", columns) 561 elif not table_alias: 562 exp.alias_(lateral, "_flattened", table=columns, copy=False) 563 564 return lateral 565 566 def _parse_at_before(self, table: exp.Table) -> exp.Table: 567 # https://docs.snowflake.com/en/sql-reference/constructs/at-before 568 index = self._index 569 if self._match_texts(("AT", "BEFORE")): 570 this = self._prev.text.upper() 571 kind = ( 572 self._match(TokenType.L_PAREN) 573 and self._match_texts(self.HISTORICAL_DATA_KIND) 574 and self._prev.text.upper() 575 ) 576 expression = self._match(TokenType.FARROW) and self._parse_bitwise() 577 578 if expression: 579 self._match_r_paren() 580 when = self.expression( 581 exp.HistoricalData, this=this, kind=kind, expression=expression 582 ) 583 table.set("when", when) 584 else: 585 self._retreat(index) 586 587 return table 588 589 def _parse_table_parts( 590 self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False 591 ) -> exp.Table: 592 # https://docs.snowflake.com/en/user-guide/querying-stage 593 if self._match(TokenType.STRING, advance=False): 594 table = self._parse_string() 595 elif self._match_text_seq("@", advance=False): 596 table = self._parse_location_path() 597 else: 598 table = None 599 600 if table: 601 file_format = None 602 pattern = None 603 604 wrapped = self._match(TokenType.L_PAREN) 605 while self._curr and wrapped and not self._match(TokenType.R_PAREN): 606 if self._match_text_seq("FILE_FORMAT", "=>"): 607 file_format = self._parse_string() or super()._parse_table_parts( 608 is_db_reference=is_db_reference 609 ) 610 elif self._match_text_seq("PATTERN", "=>"): 611 pattern = self._parse_string() 612 else: 613 break 614 615 self._match(TokenType.COMMA) 616 617 table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern) 618 else: 619 table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference) 620 621 return self._parse_at_before(table) 622 623 def _parse_id_var( 624 self, 625 any_token: bool = True, 626 tokens: t.Optional[t.Collection[TokenType]] = None, 627 ) -> t.Optional[exp.Expression]: 628 if self._match_text_seq("IDENTIFIER", "("): 629 identifier = ( 630 super()._parse_id_var(any_token=any_token, tokens=tokens) 631 or self._parse_string() 632 ) 633 self._match_r_paren() 634 return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier]) 635 636 return super()._parse_id_var(any_token=any_token, tokens=tokens) 637 638 def _parse_show_snowflake(self, this: str) -> exp.Show: 639 scope = None 640 scope_kind = None 641 642 # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS 643 # which is syntactically valid but has no effect on the output 644 terse = self._tokens[self._index - 2].text.upper() == "TERSE" 645 646 history = self._match_text_seq("HISTORY") 647 648 like = self._parse_string() if self._match(TokenType.LIKE) else None 649 650 if self._match(TokenType.IN): 651 if self._match_text_seq("ACCOUNT"): 652 scope_kind = "ACCOUNT" 653 elif self._match_set(self.DB_CREATABLES): 654 scope_kind = self._prev.text.upper() 655 if self._curr: 656 scope = self._parse_table_parts() 657 elif self._curr: 658 scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE" 659 scope = self._parse_table_parts() 660 661 return self.expression( 662 exp.Show, 663 **{ 664 "terse": terse, 665 "this": this, 666 "history": history, 667 "like": like, 668 "scope": scope, 669 "scope_kind": scope_kind, 670 "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(), 671 "limit": self._parse_limit(), 672 "from": self._parse_string() if self._match(TokenType.FROM) else None, 673 }, 674 ) 675 676 def _parse_alter_table_swap(self) -> exp.SwapTable: 677 self._match_text_seq("WITH") 678 return self.expression(exp.SwapTable, this=self._parse_table(schema=True)) 679 680 def _parse_location_property(self) -> exp.LocationProperty: 681 self._match(TokenType.EQ) 682 return self.expression(exp.LocationProperty, this=self._parse_location_path()) 683 684 def _parse_file_location(self) -> t.Optional[exp.Expression]: 685 # Parse either a subquery or a staged file 686 return ( 687 self._parse_select(table=True) 688 if self._match(TokenType.L_PAREN, advance=False) 689 else self._parse_table_parts() 690 ) 691 692 def _parse_location_path(self) -> exp.Var: 693 parts = [self._advance_any(ignore_reserved=True)] 694 695 # We avoid consuming a comma token because external tables like @foo and @bar 696 # can be joined in a query with a comma separator, as well as closing paren 697 # in case of subqueries 698 while self._is_connected() and not self._match_set( 699 (TokenType.COMMA, TokenType.R_PAREN), advance=False 700 ): 701 parts.append(self._advance_any(ignore_reserved=True)) 702 703 return exp.var("".join(part.text for part in parts if part)) 704 705 class Tokenizer(tokens.Tokenizer): 706 STRING_ESCAPES = ["\\", "'"] 707 HEX_STRINGS = [("x'", "'"), ("X'", "'")] 708 RAW_STRINGS = ["$$"] 709 COMMENTS = ["--", "//", ("/*", "*/")] 710 711 KEYWORDS = { 712 **tokens.Tokenizer.KEYWORDS, 713 "BYTEINT": TokenType.INT, 714 "CHAR VARYING": TokenType.VARCHAR, 715 "CHARACTER VARYING": TokenType.VARCHAR, 716 "EXCLUDE": TokenType.EXCEPT, 717 "ILIKE ANY": TokenType.ILIKE_ANY, 718 "LIKE ANY": TokenType.LIKE_ANY, 719 "MATCH_CONDITION": TokenType.MATCH_CONDITION, 720 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 721 "MINUS": TokenType.EXCEPT, 722 "NCHAR VARYING": TokenType.VARCHAR, 723 "PUT": TokenType.COMMAND, 724 "REMOVE": TokenType.COMMAND, 725 "RENAME": TokenType.REPLACE, 726 "RM": TokenType.COMMAND, 727 "SAMPLE": TokenType.TABLE_SAMPLE, 728 "SQL_DOUBLE": TokenType.DOUBLE, 729 "SQL_VARCHAR": TokenType.VARCHAR, 730 "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION, 731 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, 732 "TOP": TokenType.TOP, 733 } 734 735 SINGLE_TOKENS = { 736 **tokens.Tokenizer.SINGLE_TOKENS, 737 "$": TokenType.PARAMETER, 738 } 739 740 VAR_SINGLE_TOKENS = {"$"} 741 742 COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW} 743 744 class Generator(generator.Generator): 745 PARAMETER_TOKEN = "$" 746 MATCHED_BY_SOURCE = False 747 SINGLE_STRING_INTERVAL = True 748 JOIN_HINTS = False 749 TABLE_HINTS = False 750 QUERY_HINTS = False 751 AGGREGATE_FILTER_SUPPORTED = False 752 SUPPORTS_TABLE_COPY = False 753 COLLATE_IS_FUNC = True 754 LIMIT_ONLY_LITERALS = True 755 JSON_KEY_VALUE_PAIR_SEP = "," 756 INSERT_OVERWRITE = " OVERWRITE INTO" 757 STRUCT_DELIMITER = ("(", ")") 758 COPY_PARAMS_ARE_WRAPPED = False 759 COPY_PARAMS_EQ_REQUIRED = True 760 761 TRANSFORMS = { 762 **generator.Generator.TRANSFORMS, 763 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 764 exp.ArgMax: rename_func("MAX_BY"), 765 exp.ArgMin: rename_func("MIN_BY"), 766 exp.Array: inline_array_sql, 767 exp.ArrayConcat: rename_func("ARRAY_CAT"), 768 exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this), 769 exp.AtTimeZone: lambda self, e: self.func( 770 "CONVERT_TIMEZONE", e.args.get("zone"), e.this 771 ), 772 exp.BitwiseXor: rename_func("BITXOR"), 773 exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]), 774 exp.DateAdd: date_delta_sql("DATEADD"), 775 exp.DateDiff: date_delta_sql("DATEDIFF"), 776 exp.DateStrToDate: datestrtodate_sql, 777 exp.DayOfMonth: rename_func("DAYOFMONTH"), 778 exp.DayOfWeek: rename_func("DAYOFWEEK"), 779 exp.DayOfYear: rename_func("DAYOFYEAR"), 780 exp.Explode: rename_func("FLATTEN"), 781 exp.Extract: rename_func("DATE_PART"), 782 exp.FromTimeZone: lambda self, e: self.func( 783 "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this 784 ), 785 exp.GenerateSeries: lambda self, e: self.func( 786 "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step") 787 ), 788 exp.GroupConcat: rename_func("LISTAGG"), 789 exp.If: if_sql(name="IFF", false_value="NULL"), 790 exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression), 791 exp.JSONExtractScalar: lambda self, e: self.func( 792 "JSON_EXTRACT_PATH_TEXT", e.this, e.expression 793 ), 794 exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions), 795 exp.JSONPathRoot: lambda *_: "", 796 exp.LogicalAnd: rename_func("BOOLAND_AGG"), 797 exp.LogicalOr: rename_func("BOOLOR_AGG"), 798 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 799 exp.Max: max_or_greatest, 800 exp.Min: min_or_least, 801 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 802 exp.PercentileCont: transforms.preprocess( 803 [transforms.add_within_group_for_percentiles] 804 ), 805 exp.PercentileDisc: transforms.preprocess( 806 [transforms.add_within_group_for_percentiles] 807 ), 808 exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]), 809 exp.RegexpILike: _regexpilike_sql, 810 exp.Rand: rename_func("RANDOM"), 811 exp.Select: transforms.preprocess( 812 [ 813 transforms.eliminate_distinct_on, 814 transforms.explode_to_unnest(), 815 transforms.eliminate_semi_and_anti_joins, 816 ] 817 ), 818 exp.SHA: rename_func("SHA1"), 819 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), 820 exp.StartsWith: rename_func("STARTSWITH"), 821 exp.StrPosition: lambda self, e: self.func( 822 "POSITION", e.args.get("substr"), e.this, e.args.get("position") 823 ), 824 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 825 exp.Stuff: rename_func("INSERT"), 826 exp.TimeAdd: date_delta_sql("TIMEADD"), 827 exp.TimestampDiff: lambda self, e: self.func( 828 "TIMESTAMPDIFF", e.unit, e.expression, e.this 829 ), 830 exp.TimestampTrunc: timestamptrunc_sql, 831 exp.TimeStrToTime: timestrtotime_sql, 832 exp.TimeToStr: lambda self, e: self.func( 833 "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e) 834 ), 835 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", 836 exp.ToArray: rename_func("TO_ARRAY"), 837 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 838 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), 839 exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True), 840 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 841 exp.TsOrDsToDate: lambda self, e: self.func( 842 "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e) 843 ), 844 exp.UnixToTime: rename_func("TO_TIMESTAMP"), 845 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 846 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 847 exp.Xor: rename_func("BOOLXOR"), 848 } 849 850 SUPPORTED_JSON_PATH_PARTS = { 851 exp.JSONPathKey, 852 exp.JSONPathRoot, 853 exp.JSONPathSubscript, 854 } 855 856 TYPE_MAPPING = { 857 **generator.Generator.TYPE_MAPPING, 858 exp.DataType.Type.NESTED: "OBJECT", 859 exp.DataType.Type.STRUCT: "OBJECT", 860 } 861 862 STAR_MAPPING = { 863 "except": "EXCLUDE", 864 "replace": "RENAME", 865 } 866 867 PROPERTIES_LOCATION = { 868 **generator.Generator.PROPERTIES_LOCATION, 869 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, 870 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 871 } 872 873 UNSUPPORTED_VALUES_EXPRESSIONS = { 874 exp.Map, 875 exp.StarMap, 876 exp.Struct, 877 exp.VarMap, 878 } 879 880 def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str: 881 if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS): 882 values_as_table = False 883 884 return super().values_sql(expression, values_as_table=values_as_table) 885 886 def datatype_sql(self, expression: exp.DataType) -> str: 887 expressions = expression.expressions 888 if ( 889 expressions 890 and expression.is_type(*exp.DataType.STRUCT_TYPES) 891 and any(isinstance(field_type, exp.DataType) for field_type in expressions) 892 ): 893 # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ] 894 return "OBJECT" 895 896 return super().datatype_sql(expression) 897 898 def tonumber_sql(self, expression: exp.ToNumber) -> str: 899 return self.func( 900 "TO_NUMBER", 901 expression.this, 902 expression.args.get("format"), 903 expression.args.get("precision"), 904 expression.args.get("scale"), 905 ) 906 907 def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str: 908 milli = expression.args.get("milli") 909 if milli is not None: 910 milli_to_nano = milli.pop() * exp.Literal.number(1000000) 911 expression.set("nano", milli_to_nano) 912 913 return rename_func("TIMESTAMP_FROM_PARTS")(self, expression) 914 915 def trycast_sql(self, expression: exp.TryCast) -> str: 916 value = expression.this 917 918 if value.type is None: 919 from sqlglot.optimizer.annotate_types import annotate_types 920 921 value = annotate_types(value) 922 923 if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN): 924 return super().trycast_sql(expression) 925 926 # TRY_CAST only works for string values in Snowflake 927 return self.cast_sql(expression) 928 929 def log_sql(self, expression: exp.Log) -> str: 930 if not expression.expression: 931 return self.func("LN", expression.this) 932 933 return super().log_sql(expression) 934 935 def unnest_sql(self, expression: exp.Unnest) -> str: 936 unnest_alias = expression.args.get("alias") 937 offset = expression.args.get("offset") 938 939 columns = [ 940 exp.to_identifier("seq"), 941 exp.to_identifier("key"), 942 exp.to_identifier("path"), 943 offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"), 944 seq_get(unnest_alias.columns if unnest_alias else [], 0) 945 or exp.to_identifier("value"), 946 exp.to_identifier("this"), 947 ] 948 949 if unnest_alias: 950 unnest_alias.set("columns", columns) 951 else: 952 unnest_alias = exp.TableAlias(this="_u", columns=columns) 953 954 explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))" 955 alias = self.sql(unnest_alias) 956 alias = f" AS {alias}" if alias else "" 957 return f"{explode}{alias}" 958 959 def show_sql(self, expression: exp.Show) -> str: 960 terse = "TERSE " if expression.args.get("terse") else "" 961 history = " HISTORY" if expression.args.get("history") else "" 962 like = self.sql(expression, "like") 963 like = f" LIKE {like}" if like else "" 964 965 scope = self.sql(expression, "scope") 966 scope = f" {scope}" if scope else "" 967 968 scope_kind = self.sql(expression, "scope_kind") 969 if scope_kind: 970 scope_kind = f" IN {scope_kind}" 971 972 starts_with = self.sql(expression, "starts_with") 973 if starts_with: 974 starts_with = f" STARTS WITH {starts_with}" 975 976 limit = self.sql(expression, "limit") 977 978 from_ = self.sql(expression, "from") 979 if from_: 980 from_ = f" FROM {from_}" 981 982 return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}" 983 984 def regexpextract_sql(self, expression: exp.RegexpExtract) -> str: 985 # Other dialects don't support all of the following parameters, so we need to 986 # generate default values as necessary to ensure the transpilation is correct 987 group = expression.args.get("group") 988 parameters = expression.args.get("parameters") or (group and exp.Literal.string("c")) 989 occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1)) 990 position = expression.args.get("position") or (occurrence and exp.Literal.number(1)) 991 992 return self.func( 993 "REGEXP_SUBSTR", 994 expression.this, 995 expression.expression, 996 position, 997 occurrence, 998 parameters, 999 group, 1000 ) 1001 1002 def except_op(self, expression: exp.Except) -> str: 1003 if not expression.args.get("distinct"): 1004 self.unsupported("EXCEPT with All is not supported in Snowflake") 1005 return super().except_op(expression) 1006 1007 def intersect_op(self, expression: exp.Intersect) -> str: 1008 if not expression.args.get("distinct"): 1009 self.unsupported("INTERSECT with All is not supported in Snowflake") 1010 return super().intersect_op(expression) 1011 1012 def describe_sql(self, expression: exp.Describe) -> str: 1013 # Default to table if kind is unknown 1014 kind_value = expression.args.get("kind") or "TABLE" 1015 kind = f" {kind_value}" if kind_value else "" 1016 this = f" {self.sql(expression, 'this')}" 1017 expressions = self.expressions(expression, flat=True) 1018 expressions = f" {expressions}" if expressions else "" 1019 return f"DESCRIBE{kind}{this}{expressions}" 1020 1021 def generatedasidentitycolumnconstraint_sql( 1022 self, expression: exp.GeneratedAsIdentityColumnConstraint 1023 ) -> str: 1024 start = expression.args.get("start") 1025 start = f" START {start}" if start else "" 1026 increment = expression.args.get("increment") 1027 increment = f" INCREMENT {increment}" if increment else "" 1028 return f"AUTOINCREMENT{start}{increment}" 1029 1030 def swaptable_sql(self, expression: exp.SwapTable) -> str: 1031 this = self.sql(expression, "this") 1032 return f"SWAP WITH {this}" 1033 1034 def with_properties(self, properties: exp.Properties) -> str: 1035 return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ") 1036 1037 def cluster_sql(self, expression: exp.Cluster) -> str: 1038 return f"CLUSTER BY ({self.expressions(expression, flat=True)})" 1039 1040 def struct_sql(self, expression: exp.Struct) -> str: 1041 keys = [] 1042 values = [] 1043 1044 for i, e in enumerate(expression.expressions): 1045 if isinstance(e, exp.PropertyEQ): 1046 keys.append( 1047 exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this 1048 ) 1049 values.append(e.expression) 1050 else: 1051 keys.append(exp.Literal.string(f"_{i}")) 1052 values.append(e) 1053 1054 return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values))) 1055 1056 def copyparameter_sql(self, expression: exp.CopyParameter) -> str: 1057 option = self.sql(expression, "this").upper() 1058 if option == "FILE_FORMAT": 1059 values = self.expressions(expression, key="expression", flat=True, sep=" ") 1060 return f"{option} = ({values})" 1061 1062 return super().copyparameter_sql(expression) 1063 1064 def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str: 1065 if expression.args.get("weight") or expression.args.get("accuracy"): 1066 self.unsupported( 1067 "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake" 1068 ) 1069 1070 return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
284class Snowflake(Dialect): 285 # https://docs.snowflake.com/en/sql-reference/identifiers-syntax 286 NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE 287 NULL_ORDERING = "nulls_are_large" 288 TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'" 289 SUPPORTS_USER_DEFINED_TYPES = False 290 SUPPORTS_SEMI_ANTI_JOIN = False 291 PREFER_CTE_ALIAS_COLUMN = True 292 TABLESAMPLE_SIZE_IS_PERCENT = True 293 COPY_PARAMS_ARE_CSV = False 294 295 TIME_MAPPING = { 296 "YYYY": "%Y", 297 "yyyy": "%Y", 298 "YY": "%y", 299 "yy": "%y", 300 "MMMM": "%B", 301 "mmmm": "%B", 302 "MON": "%b", 303 "mon": "%b", 304 "MM": "%m", 305 "mm": "%m", 306 "DD": "%d", 307 "dd": "%-d", 308 "DY": "%a", 309 "dy": "%w", 310 "HH24": "%H", 311 "hh24": "%H", 312 "HH12": "%I", 313 "hh12": "%I", 314 "MI": "%M", 315 "mi": "%M", 316 "SS": "%S", 317 "ss": "%S", 318 "FF": "%f", 319 "ff": "%f", 320 "FF6": "%f", 321 "ff6": "%f", 322 } 323 324 def quote_identifier(self, expression: E, identify: bool = True) -> E: 325 # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an 326 # unquoted DUAL keyword in a special way and does not map it to a user-defined table 327 if ( 328 isinstance(expression, exp.Identifier) 329 and isinstance(expression.parent, exp.Table) 330 and expression.name.lower() == "dual" 331 ): 332 return expression # type: ignore 333 334 return super().quote_identifier(expression, identify=identify) 335 336 class Parser(parser.Parser): 337 IDENTIFY_PIVOT_STRINGS = True 338 339 ID_VAR_TOKENS = { 340 *parser.Parser.ID_VAR_TOKENS, 341 TokenType.MATCH_CONDITION, 342 } 343 344 TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW} 345 TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION) 346 347 FUNCTIONS = { 348 **parser.Parser.FUNCTIONS, 349 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 350 "ARRAYAGG": exp.ArrayAgg.from_arg_list, 351 "ARRAY_CONSTRUCT": exp.Array.from_arg_list, 352 "ARRAY_CONTAINS": lambda args: exp.ArrayContains( 353 this=seq_get(args, 1), expression=seq_get(args, 0) 354 ), 355 "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries( 356 # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive 357 start=seq_get(args, 0), 358 end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)), 359 step=seq_get(args, 2), 360 ), 361 "BITXOR": binary_from_function(exp.BitwiseXor), 362 "BIT_XOR": binary_from_function(exp.BitwiseXor), 363 "BOOLXOR": binary_from_function(exp.Xor), 364 "CONVERT_TIMEZONE": _build_convert_timezone, 365 "DATE": _build_datetime("DATE", exp.DataType.Type.DATE), 366 "DATE_TRUNC": _date_trunc_to_time, 367 "DATEADD": _build_date_time_add(exp.DateAdd), 368 "DATEDIFF": _build_datediff, 369 "DIV0": _build_if_from_div0, 370 "FLATTEN": exp.Explode.from_arg_list, 371 "GET_PATH": lambda args, dialect: exp.JSONExtract( 372 this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1)) 373 ), 374 "IFF": exp.If.from_arg_list, 375 "LAST_DAY": lambda args: exp.LastDay( 376 this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1)) 377 ), 378 "LISTAGG": exp.GroupConcat.from_arg_list, 379 "MEDIAN": lambda args: exp.PercentileCont( 380 this=seq_get(args, 0), expression=exp.Literal.number(0.5) 381 ), 382 "NULLIFZERO": _build_if_from_nullifzero, 383 "OBJECT_CONSTRUCT": _build_object_construct, 384 "REGEXP_REPLACE": _build_regexp_replace, 385 "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list, 386 "RLIKE": exp.RegexpLike.from_arg_list, 387 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 388 "TIMEADD": _build_date_time_add(exp.TimeAdd), 389 "TIMEDIFF": _build_datediff, 390 "TIMESTAMPADD": _build_date_time_add(exp.DateAdd), 391 "TIMESTAMPDIFF": _build_datediff, 392 "TIMESTAMPFROMPARTS": _build_timestamp_from_parts, 393 "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts, 394 "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True), 395 "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE), 396 "TO_NUMBER": lambda args: exp.ToNumber( 397 this=seq_get(args, 0), 398 format=seq_get(args, 1), 399 precision=seq_get(args, 2), 400 scale=seq_get(args, 3), 401 ), 402 "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME), 403 "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP), 404 "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ), 405 "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP), 406 "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ), 407 "TO_VARCHAR": exp.ToChar.from_arg_list, 408 "ZEROIFNULL": _build_if_from_zeroifnull, 409 } 410 411 FUNCTION_PARSERS = { 412 **parser.Parser.FUNCTION_PARSERS, 413 "DATE_PART": lambda self: self._parse_date_part(), 414 "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(), 415 } 416 FUNCTION_PARSERS.pop("TRIM") 417 418 TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME} 419 420 RANGE_PARSERS = { 421 **parser.Parser.RANGE_PARSERS, 422 TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny), 423 TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny), 424 } 425 426 ALTER_PARSERS = { 427 **parser.Parser.ALTER_PARSERS, 428 "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")), 429 "UNSET": lambda self: self.expression( 430 exp.Set, 431 tag=self._match_text_seq("TAG"), 432 expressions=self._parse_csv(self._parse_id_var), 433 unset=True, 434 ), 435 "SWAP": lambda self: self._parse_alter_table_swap(), 436 } 437 438 STATEMENT_PARSERS = { 439 **parser.Parser.STATEMENT_PARSERS, 440 TokenType.SHOW: lambda self: self._parse_show(), 441 } 442 443 PROPERTY_PARSERS = { 444 **parser.Parser.PROPERTY_PARSERS, 445 "LOCATION": lambda self: self._parse_location_property(), 446 } 447 448 SHOW_PARSERS = { 449 "SCHEMAS": _show_parser("SCHEMAS"), 450 "TERSE SCHEMAS": _show_parser("SCHEMAS"), 451 "OBJECTS": _show_parser("OBJECTS"), 452 "TERSE OBJECTS": _show_parser("OBJECTS"), 453 "TABLES": _show_parser("TABLES"), 454 "TERSE TABLES": _show_parser("TABLES"), 455 "VIEWS": _show_parser("VIEWS"), 456 "TERSE VIEWS": _show_parser("VIEWS"), 457 "PRIMARY KEYS": _show_parser("PRIMARY KEYS"), 458 "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"), 459 "IMPORTED KEYS": _show_parser("IMPORTED KEYS"), 460 "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"), 461 "UNIQUE KEYS": _show_parser("UNIQUE KEYS"), 462 "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"), 463 "SEQUENCES": _show_parser("SEQUENCES"), 464 "TERSE SEQUENCES": _show_parser("SEQUENCES"), 465 "COLUMNS": _show_parser("COLUMNS"), 466 "USERS": _show_parser("USERS"), 467 "TERSE USERS": _show_parser("USERS"), 468 } 469 470 STAGED_FILE_SINGLE_TOKENS = { 471 TokenType.DOT, 472 TokenType.MOD, 473 TokenType.SLASH, 474 } 475 476 FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"] 477 478 SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"} 479 480 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 481 this = super()._parse_column_ops(this) 482 483 casts = [] 484 json_path = [] 485 486 while self._match(TokenType.COLON): 487 path = super()._parse_column_ops(self._parse_field(any_token=True)) 488 489 # The cast :: operator has a lower precedence than the extraction operator :, so 490 # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH 491 while isinstance(path, exp.Cast): 492 casts.append(path.to) 493 path = path.this 494 495 if path: 496 json_path.append(path.sql(dialect="snowflake", copy=False)) 497 498 if json_path: 499 this = self.expression( 500 exp.JSONExtract, 501 this=this, 502 expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))), 503 ) 504 505 while casts: 506 this = self.expression(exp.Cast, this=this, to=casts.pop()) 507 508 return this 509 510 # https://docs.snowflake.com/en/sql-reference/functions/date_part.html 511 # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts 512 def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]: 513 this = self._parse_var() or self._parse_type() 514 515 if not this: 516 return None 517 518 self._match(TokenType.COMMA) 519 expression = self._parse_bitwise() 520 this = _map_date_part(this) 521 name = this.name.upper() 522 523 if name.startswith("EPOCH"): 524 if name == "EPOCH_MILLISECOND": 525 scale = 10**3 526 elif name == "EPOCH_MICROSECOND": 527 scale = 10**6 528 elif name == "EPOCH_NANOSECOND": 529 scale = 10**9 530 else: 531 scale = None 532 533 ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP")) 534 to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts) 535 536 if scale: 537 to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale)) 538 539 return to_unix 540 541 return self.expression(exp.Extract, this=this, expression=expression) 542 543 def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]: 544 if is_map: 545 # Keys are strings in Snowflake's objects, see also: 546 # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured 547 # - https://docs.snowflake.com/en/sql-reference/functions/object_construct 548 return self._parse_slice(self._parse_string()) 549 550 return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True)) 551 552 def _parse_lateral(self) -> t.Optional[exp.Lateral]: 553 lateral = super()._parse_lateral() 554 if not lateral: 555 return lateral 556 557 if isinstance(lateral.this, exp.Explode): 558 table_alias = lateral.args.get("alias") 559 columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS] 560 if table_alias and not table_alias.args.get("columns"): 561 table_alias.set("columns", columns) 562 elif not table_alias: 563 exp.alias_(lateral, "_flattened", table=columns, copy=False) 564 565 return lateral 566 567 def _parse_at_before(self, table: exp.Table) -> exp.Table: 568 # https://docs.snowflake.com/en/sql-reference/constructs/at-before 569 index = self._index 570 if self._match_texts(("AT", "BEFORE")): 571 this = self._prev.text.upper() 572 kind = ( 573 self._match(TokenType.L_PAREN) 574 and self._match_texts(self.HISTORICAL_DATA_KIND) 575 and self._prev.text.upper() 576 ) 577 expression = self._match(TokenType.FARROW) and self._parse_bitwise() 578 579 if expression: 580 self._match_r_paren() 581 when = self.expression( 582 exp.HistoricalData, this=this, kind=kind, expression=expression 583 ) 584 table.set("when", when) 585 else: 586 self._retreat(index) 587 588 return table 589 590 def _parse_table_parts( 591 self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False 592 ) -> exp.Table: 593 # https://docs.snowflake.com/en/user-guide/querying-stage 594 if self._match(TokenType.STRING, advance=False): 595 table = self._parse_string() 596 elif self._match_text_seq("@", advance=False): 597 table = self._parse_location_path() 598 else: 599 table = None 600 601 if table: 602 file_format = None 603 pattern = None 604 605 wrapped = self._match(TokenType.L_PAREN) 606 while self._curr and wrapped and not self._match(TokenType.R_PAREN): 607 if self._match_text_seq("FILE_FORMAT", "=>"): 608 file_format = self._parse_string() or super()._parse_table_parts( 609 is_db_reference=is_db_reference 610 ) 611 elif self._match_text_seq("PATTERN", "=>"): 612 pattern = self._parse_string() 613 else: 614 break 615 616 self._match(TokenType.COMMA) 617 618 table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern) 619 else: 620 table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference) 621 622 return self._parse_at_before(table) 623 624 def _parse_id_var( 625 self, 626 any_token: bool = True, 627 tokens: t.Optional[t.Collection[TokenType]] = None, 628 ) -> t.Optional[exp.Expression]: 629 if self._match_text_seq("IDENTIFIER", "("): 630 identifier = ( 631 super()._parse_id_var(any_token=any_token, tokens=tokens) 632 or self._parse_string() 633 ) 634 self._match_r_paren() 635 return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier]) 636 637 return super()._parse_id_var(any_token=any_token, tokens=tokens) 638 639 def _parse_show_snowflake(self, this: str) -> exp.Show: 640 scope = None 641 scope_kind = None 642 643 # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS 644 # which is syntactically valid but has no effect on the output 645 terse = self._tokens[self._index - 2].text.upper() == "TERSE" 646 647 history = self._match_text_seq("HISTORY") 648 649 like = self._parse_string() if self._match(TokenType.LIKE) else None 650 651 if self._match(TokenType.IN): 652 if self._match_text_seq("ACCOUNT"): 653 scope_kind = "ACCOUNT" 654 elif self._match_set(self.DB_CREATABLES): 655 scope_kind = self._prev.text.upper() 656 if self._curr: 657 scope = self._parse_table_parts() 658 elif self._curr: 659 scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE" 660 scope = self._parse_table_parts() 661 662 return self.expression( 663 exp.Show, 664 **{ 665 "terse": terse, 666 "this": this, 667 "history": history, 668 "like": like, 669 "scope": scope, 670 "scope_kind": scope_kind, 671 "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(), 672 "limit": self._parse_limit(), 673 "from": self._parse_string() if self._match(TokenType.FROM) else None, 674 }, 675 ) 676 677 def _parse_alter_table_swap(self) -> exp.SwapTable: 678 self._match_text_seq("WITH") 679 return self.expression(exp.SwapTable, this=self._parse_table(schema=True)) 680 681 def _parse_location_property(self) -> exp.LocationProperty: 682 self._match(TokenType.EQ) 683 return self.expression(exp.LocationProperty, this=self._parse_location_path()) 684 685 def _parse_file_location(self) -> t.Optional[exp.Expression]: 686 # Parse either a subquery or a staged file 687 return ( 688 self._parse_select(table=True) 689 if self._match(TokenType.L_PAREN, advance=False) 690 else self._parse_table_parts() 691 ) 692 693 def _parse_location_path(self) -> exp.Var: 694 parts = [self._advance_any(ignore_reserved=True)] 695 696 # We avoid consuming a comma token because external tables like @foo and @bar 697 # can be joined in a query with a comma separator, as well as closing paren 698 # in case of subqueries 699 while self._is_connected() and not self._match_set( 700 (TokenType.COMMA, TokenType.R_PAREN), advance=False 701 ): 702 parts.append(self._advance_any(ignore_reserved=True)) 703 704 return exp.var("".join(part.text for part in parts if part)) 705 706 class Tokenizer(tokens.Tokenizer): 707 STRING_ESCAPES = ["\\", "'"] 708 HEX_STRINGS = [("x'", "'"), ("X'", "'")] 709 RAW_STRINGS = ["$$"] 710 COMMENTS = ["--", "//", ("/*", "*/")] 711 712 KEYWORDS = { 713 **tokens.Tokenizer.KEYWORDS, 714 "BYTEINT": TokenType.INT, 715 "CHAR VARYING": TokenType.VARCHAR, 716 "CHARACTER VARYING": TokenType.VARCHAR, 717 "EXCLUDE": TokenType.EXCEPT, 718 "ILIKE ANY": TokenType.ILIKE_ANY, 719 "LIKE ANY": TokenType.LIKE_ANY, 720 "MATCH_CONDITION": TokenType.MATCH_CONDITION, 721 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 722 "MINUS": TokenType.EXCEPT, 723 "NCHAR VARYING": TokenType.VARCHAR, 724 "PUT": TokenType.COMMAND, 725 "REMOVE": TokenType.COMMAND, 726 "RENAME": TokenType.REPLACE, 727 "RM": TokenType.COMMAND, 728 "SAMPLE": TokenType.TABLE_SAMPLE, 729 "SQL_DOUBLE": TokenType.DOUBLE, 730 "SQL_VARCHAR": TokenType.VARCHAR, 731 "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION, 732 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, 733 "TOP": TokenType.TOP, 734 } 735 736 SINGLE_TOKENS = { 737 **tokens.Tokenizer.SINGLE_TOKENS, 738 "$": TokenType.PARAMETER, 739 } 740 741 VAR_SINGLE_TOKENS = {"$"} 742 743 COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW} 744 745 class Generator(generator.Generator): 746 PARAMETER_TOKEN = "$" 747 MATCHED_BY_SOURCE = False 748 SINGLE_STRING_INTERVAL = True 749 JOIN_HINTS = False 750 TABLE_HINTS = False 751 QUERY_HINTS = False 752 AGGREGATE_FILTER_SUPPORTED = False 753 SUPPORTS_TABLE_COPY = False 754 COLLATE_IS_FUNC = True 755 LIMIT_ONLY_LITERALS = True 756 JSON_KEY_VALUE_PAIR_SEP = "," 757 INSERT_OVERWRITE = " OVERWRITE INTO" 758 STRUCT_DELIMITER = ("(", ")") 759 COPY_PARAMS_ARE_WRAPPED = False 760 COPY_PARAMS_EQ_REQUIRED = True 761 762 TRANSFORMS = { 763 **generator.Generator.TRANSFORMS, 764 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 765 exp.ArgMax: rename_func("MAX_BY"), 766 exp.ArgMin: rename_func("MIN_BY"), 767 exp.Array: inline_array_sql, 768 exp.ArrayConcat: rename_func("ARRAY_CAT"), 769 exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this), 770 exp.AtTimeZone: lambda self, e: self.func( 771 "CONVERT_TIMEZONE", e.args.get("zone"), e.this 772 ), 773 exp.BitwiseXor: rename_func("BITXOR"), 774 exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]), 775 exp.DateAdd: date_delta_sql("DATEADD"), 776 exp.DateDiff: date_delta_sql("DATEDIFF"), 777 exp.DateStrToDate: datestrtodate_sql, 778 exp.DayOfMonth: rename_func("DAYOFMONTH"), 779 exp.DayOfWeek: rename_func("DAYOFWEEK"), 780 exp.DayOfYear: rename_func("DAYOFYEAR"), 781 exp.Explode: rename_func("FLATTEN"), 782 exp.Extract: rename_func("DATE_PART"), 783 exp.FromTimeZone: lambda self, e: self.func( 784 "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this 785 ), 786 exp.GenerateSeries: lambda self, e: self.func( 787 "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step") 788 ), 789 exp.GroupConcat: rename_func("LISTAGG"), 790 exp.If: if_sql(name="IFF", false_value="NULL"), 791 exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression), 792 exp.JSONExtractScalar: lambda self, e: self.func( 793 "JSON_EXTRACT_PATH_TEXT", e.this, e.expression 794 ), 795 exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions), 796 exp.JSONPathRoot: lambda *_: "", 797 exp.LogicalAnd: rename_func("BOOLAND_AGG"), 798 exp.LogicalOr: rename_func("BOOLOR_AGG"), 799 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 800 exp.Max: max_or_greatest, 801 exp.Min: min_or_least, 802 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 803 exp.PercentileCont: transforms.preprocess( 804 [transforms.add_within_group_for_percentiles] 805 ), 806 exp.PercentileDisc: transforms.preprocess( 807 [transforms.add_within_group_for_percentiles] 808 ), 809 exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]), 810 exp.RegexpILike: _regexpilike_sql, 811 exp.Rand: rename_func("RANDOM"), 812 exp.Select: transforms.preprocess( 813 [ 814 transforms.eliminate_distinct_on, 815 transforms.explode_to_unnest(), 816 transforms.eliminate_semi_and_anti_joins, 817 ] 818 ), 819 exp.SHA: rename_func("SHA1"), 820 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), 821 exp.StartsWith: rename_func("STARTSWITH"), 822 exp.StrPosition: lambda self, e: self.func( 823 "POSITION", e.args.get("substr"), e.this, e.args.get("position") 824 ), 825 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 826 exp.Stuff: rename_func("INSERT"), 827 exp.TimeAdd: date_delta_sql("TIMEADD"), 828 exp.TimestampDiff: lambda self, e: self.func( 829 "TIMESTAMPDIFF", e.unit, e.expression, e.this 830 ), 831 exp.TimestampTrunc: timestamptrunc_sql, 832 exp.TimeStrToTime: timestrtotime_sql, 833 exp.TimeToStr: lambda self, e: self.func( 834 "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e) 835 ), 836 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", 837 exp.ToArray: rename_func("TO_ARRAY"), 838 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 839 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), 840 exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True), 841 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 842 exp.TsOrDsToDate: lambda self, e: self.func( 843 "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e) 844 ), 845 exp.UnixToTime: rename_func("TO_TIMESTAMP"), 846 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 847 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 848 exp.Xor: rename_func("BOOLXOR"), 849 } 850 851 SUPPORTED_JSON_PATH_PARTS = { 852 exp.JSONPathKey, 853 exp.JSONPathRoot, 854 exp.JSONPathSubscript, 855 } 856 857 TYPE_MAPPING = { 858 **generator.Generator.TYPE_MAPPING, 859 exp.DataType.Type.NESTED: "OBJECT", 860 exp.DataType.Type.STRUCT: "OBJECT", 861 } 862 863 STAR_MAPPING = { 864 "except": "EXCLUDE", 865 "replace": "RENAME", 866 } 867 868 PROPERTIES_LOCATION = { 869 **generator.Generator.PROPERTIES_LOCATION, 870 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, 871 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 872 } 873 874 UNSUPPORTED_VALUES_EXPRESSIONS = { 875 exp.Map, 876 exp.StarMap, 877 exp.Struct, 878 exp.VarMap, 879 } 880 881 def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str: 882 if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS): 883 values_as_table = False 884 885 return super().values_sql(expression, values_as_table=values_as_table) 886 887 def datatype_sql(self, expression: exp.DataType) -> str: 888 expressions = expression.expressions 889 if ( 890 expressions 891 and expression.is_type(*exp.DataType.STRUCT_TYPES) 892 and any(isinstance(field_type, exp.DataType) for field_type in expressions) 893 ): 894 # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ] 895 return "OBJECT" 896 897 return super().datatype_sql(expression) 898 899 def tonumber_sql(self, expression: exp.ToNumber) -> str: 900 return self.func( 901 "TO_NUMBER", 902 expression.this, 903 expression.args.get("format"), 904 expression.args.get("precision"), 905 expression.args.get("scale"), 906 ) 907 908 def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str: 909 milli = expression.args.get("milli") 910 if milli is not None: 911 milli_to_nano = milli.pop() * exp.Literal.number(1000000) 912 expression.set("nano", milli_to_nano) 913 914 return rename_func("TIMESTAMP_FROM_PARTS")(self, expression) 915 916 def trycast_sql(self, expression: exp.TryCast) -> str: 917 value = expression.this 918 919 if value.type is None: 920 from sqlglot.optimizer.annotate_types import annotate_types 921 922 value = annotate_types(value) 923 924 if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN): 925 return super().trycast_sql(expression) 926 927 # TRY_CAST only works for string values in Snowflake 928 return self.cast_sql(expression) 929 930 def log_sql(self, expression: exp.Log) -> str: 931 if not expression.expression: 932 return self.func("LN", expression.this) 933 934 return super().log_sql(expression) 935 936 def unnest_sql(self, expression: exp.Unnest) -> str: 937 unnest_alias = expression.args.get("alias") 938 offset = expression.args.get("offset") 939 940 columns = [ 941 exp.to_identifier("seq"), 942 exp.to_identifier("key"), 943 exp.to_identifier("path"), 944 offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"), 945 seq_get(unnest_alias.columns if unnest_alias else [], 0) 946 or exp.to_identifier("value"), 947 exp.to_identifier("this"), 948 ] 949 950 if unnest_alias: 951 unnest_alias.set("columns", columns) 952 else: 953 unnest_alias = exp.TableAlias(this="_u", columns=columns) 954 955 explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))" 956 alias = self.sql(unnest_alias) 957 alias = f" AS {alias}" if alias else "" 958 return f"{explode}{alias}" 959 960 def show_sql(self, expression: exp.Show) -> str: 961 terse = "TERSE " if expression.args.get("terse") else "" 962 history = " HISTORY" if expression.args.get("history") else "" 963 like = self.sql(expression, "like") 964 like = f" LIKE {like}" if like else "" 965 966 scope = self.sql(expression, "scope") 967 scope = f" {scope}" if scope else "" 968 969 scope_kind = self.sql(expression, "scope_kind") 970 if scope_kind: 971 scope_kind = f" IN {scope_kind}" 972 973 starts_with = self.sql(expression, "starts_with") 974 if starts_with: 975 starts_with = f" STARTS WITH {starts_with}" 976 977 limit = self.sql(expression, "limit") 978 979 from_ = self.sql(expression, "from") 980 if from_: 981 from_ = f" FROM {from_}" 982 983 return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}" 984 985 def regexpextract_sql(self, expression: exp.RegexpExtract) -> str: 986 # Other dialects don't support all of the following parameters, so we need to 987 # generate default values as necessary to ensure the transpilation is correct 988 group = expression.args.get("group") 989 parameters = expression.args.get("parameters") or (group and exp.Literal.string("c")) 990 occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1)) 991 position = expression.args.get("position") or (occurrence and exp.Literal.number(1)) 992 993 return self.func( 994 "REGEXP_SUBSTR", 995 expression.this, 996 expression.expression, 997 position, 998 occurrence, 999 parameters, 1000 group, 1001 ) 1002 1003 def except_op(self, expression: exp.Except) -> str: 1004 if not expression.args.get("distinct"): 1005 self.unsupported("EXCEPT with All is not supported in Snowflake") 1006 return super().except_op(expression) 1007 1008 def intersect_op(self, expression: exp.Intersect) -> str: 1009 if not expression.args.get("distinct"): 1010 self.unsupported("INTERSECT with All is not supported in Snowflake") 1011 return super().intersect_op(expression) 1012 1013 def describe_sql(self, expression: exp.Describe) -> str: 1014 # Default to table if kind is unknown 1015 kind_value = expression.args.get("kind") or "TABLE" 1016 kind = f" {kind_value}" if kind_value else "" 1017 this = f" {self.sql(expression, 'this')}" 1018 expressions = self.expressions(expression, flat=True) 1019 expressions = f" {expressions}" if expressions else "" 1020 return f"DESCRIBE{kind}{this}{expressions}" 1021 1022 def generatedasidentitycolumnconstraint_sql( 1023 self, expression: exp.GeneratedAsIdentityColumnConstraint 1024 ) -> str: 1025 start = expression.args.get("start") 1026 start = f" START {start}" if start else "" 1027 increment = expression.args.get("increment") 1028 increment = f" INCREMENT {increment}" if increment else "" 1029 return f"AUTOINCREMENT{start}{increment}" 1030 1031 def swaptable_sql(self, expression: exp.SwapTable) -> str: 1032 this = self.sql(expression, "this") 1033 return f"SWAP WITH {this}" 1034 1035 def with_properties(self, properties: exp.Properties) -> str: 1036 return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ") 1037 1038 def cluster_sql(self, expression: exp.Cluster) -> str: 1039 return f"CLUSTER BY ({self.expressions(expression, flat=True)})" 1040 1041 def struct_sql(self, expression: exp.Struct) -> str: 1042 keys = [] 1043 values = [] 1044 1045 for i, e in enumerate(expression.expressions): 1046 if isinstance(e, exp.PropertyEQ): 1047 keys.append( 1048 exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this 1049 ) 1050 values.append(e.expression) 1051 else: 1052 keys.append(exp.Literal.string(f"_{i}")) 1053 values.append(e) 1054 1055 return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values))) 1056 1057 def copyparameter_sql(self, expression: exp.CopyParameter) -> str: 1058 option = self.sql(expression, "this").upper() 1059 if option == "FILE_FORMAT": 1060 values = self.expressions(expression, key="expression", flat=True, sep=" ") 1061 return f"{option} = ({values})" 1062 1063 return super().copyparameter_sql(expression) 1064 1065 def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str: 1066 if expression.args.get("weight") or expression.args.get("accuracy"): 1067 self.unsupported( 1068 "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake" 1069 ) 1070 1071 return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
Specifies the strategy according to which identifiers should be normalized.
Default NULL
ordering method to use if not explicitly set.
Possible values: "nulls_are_small"
, "nulls_are_large"
, "nulls_are_last"
Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.
For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;
will be rewritten as
WITH y(c) AS (
SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
Associates this dialect's time formats with their equivalent Python strftime
formats.
324 def quote_identifier(self, expression: E, identify: bool = True) -> E: 325 # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an 326 # unquoted DUAL keyword in a special way and does not map it to a user-defined table 327 if ( 328 isinstance(expression, exp.Identifier) 329 and isinstance(expression.parent, exp.Table) 330 and expression.name.lower() == "dual" 331 ): 332 return expression # type: ignore 333 334 return super().quote_identifier(expression, identify=identify)
Adds quotes to a given identifier.
Arguments:
- expression: The expression of interest. If it's not an
Identifier
, this method is a no-op. - identify: If set to
False
, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
Mapping of an escaped sequence (\n
) to its unescaped version (
).
Inherited Members
- sqlglot.dialects.dialect.Dialect
- Dialect
- INDEX_OFFSET
- WEEK_OFFSET
- UNNEST_COLUMN_ONLY
- ALIAS_POST_TABLESAMPLE
- IDENTIFIERS_CAN_START_WITH_DIGIT
- DPIPE_IS_STRING_CONCAT
- STRICT_STRING_CONCAT
- NORMALIZE_FUNCTIONS
- LOG_BASE_FIRST
- TYPED_DIVISION
- SAFE_DIVISION
- CONCAT_COALESCE
- DATE_FORMAT
- DATEINT_FORMAT
- FORMAT_MAPPING
- PSEUDOCOLUMNS
- get_or_raise
- format_time
- normalize_identifier
- case_sensitive
- can_identify
- to_json_path
- parse
- parse_into
- generate
- transpile
- tokenize
- tokenizer
- parser
- generator
336 class Parser(parser.Parser): 337 IDENTIFY_PIVOT_STRINGS = True 338 339 ID_VAR_TOKENS = { 340 *parser.Parser.ID_VAR_TOKENS, 341 TokenType.MATCH_CONDITION, 342 } 343 344 TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW} 345 TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION) 346 347 FUNCTIONS = { 348 **parser.Parser.FUNCTIONS, 349 "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list, 350 "ARRAYAGG": exp.ArrayAgg.from_arg_list, 351 "ARRAY_CONSTRUCT": exp.Array.from_arg_list, 352 "ARRAY_CONTAINS": lambda args: exp.ArrayContains( 353 this=seq_get(args, 1), expression=seq_get(args, 0) 354 ), 355 "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries( 356 # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive 357 start=seq_get(args, 0), 358 end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)), 359 step=seq_get(args, 2), 360 ), 361 "BITXOR": binary_from_function(exp.BitwiseXor), 362 "BIT_XOR": binary_from_function(exp.BitwiseXor), 363 "BOOLXOR": binary_from_function(exp.Xor), 364 "CONVERT_TIMEZONE": _build_convert_timezone, 365 "DATE": _build_datetime("DATE", exp.DataType.Type.DATE), 366 "DATE_TRUNC": _date_trunc_to_time, 367 "DATEADD": _build_date_time_add(exp.DateAdd), 368 "DATEDIFF": _build_datediff, 369 "DIV0": _build_if_from_div0, 370 "FLATTEN": exp.Explode.from_arg_list, 371 "GET_PATH": lambda args, dialect: exp.JSONExtract( 372 this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1)) 373 ), 374 "IFF": exp.If.from_arg_list, 375 "LAST_DAY": lambda args: exp.LastDay( 376 this=seq_get(args, 0), unit=_map_date_part(seq_get(args, 1)) 377 ), 378 "LISTAGG": exp.GroupConcat.from_arg_list, 379 "MEDIAN": lambda args: exp.PercentileCont( 380 this=seq_get(args, 0), expression=exp.Literal.number(0.5) 381 ), 382 "NULLIFZERO": _build_if_from_nullifzero, 383 "OBJECT_CONSTRUCT": _build_object_construct, 384 "REGEXP_REPLACE": _build_regexp_replace, 385 "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list, 386 "RLIKE": exp.RegexpLike.from_arg_list, 387 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)), 388 "TIMEADD": _build_date_time_add(exp.TimeAdd), 389 "TIMEDIFF": _build_datediff, 390 "TIMESTAMPADD": _build_date_time_add(exp.DateAdd), 391 "TIMESTAMPDIFF": _build_datediff, 392 "TIMESTAMPFROMPARTS": _build_timestamp_from_parts, 393 "TIMESTAMP_FROM_PARTS": _build_timestamp_from_parts, 394 "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True), 395 "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE), 396 "TO_NUMBER": lambda args: exp.ToNumber( 397 this=seq_get(args, 0), 398 format=seq_get(args, 1), 399 precision=seq_get(args, 2), 400 scale=seq_get(args, 3), 401 ), 402 "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME), 403 "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP), 404 "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ), 405 "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP), 406 "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ), 407 "TO_VARCHAR": exp.ToChar.from_arg_list, 408 "ZEROIFNULL": _build_if_from_zeroifnull, 409 } 410 411 FUNCTION_PARSERS = { 412 **parser.Parser.FUNCTION_PARSERS, 413 "DATE_PART": lambda self: self._parse_date_part(), 414 "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(), 415 } 416 FUNCTION_PARSERS.pop("TRIM") 417 418 TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME} 419 420 RANGE_PARSERS = { 421 **parser.Parser.RANGE_PARSERS, 422 TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny), 423 TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny), 424 } 425 426 ALTER_PARSERS = { 427 **parser.Parser.ALTER_PARSERS, 428 "SET": lambda self: self._parse_set(tag=self._match_text_seq("TAG")), 429 "UNSET": lambda self: self.expression( 430 exp.Set, 431 tag=self._match_text_seq("TAG"), 432 expressions=self._parse_csv(self._parse_id_var), 433 unset=True, 434 ), 435 "SWAP": lambda self: self._parse_alter_table_swap(), 436 } 437 438 STATEMENT_PARSERS = { 439 **parser.Parser.STATEMENT_PARSERS, 440 TokenType.SHOW: lambda self: self._parse_show(), 441 } 442 443 PROPERTY_PARSERS = { 444 **parser.Parser.PROPERTY_PARSERS, 445 "LOCATION": lambda self: self._parse_location_property(), 446 } 447 448 SHOW_PARSERS = { 449 "SCHEMAS": _show_parser("SCHEMAS"), 450 "TERSE SCHEMAS": _show_parser("SCHEMAS"), 451 "OBJECTS": _show_parser("OBJECTS"), 452 "TERSE OBJECTS": _show_parser("OBJECTS"), 453 "TABLES": _show_parser("TABLES"), 454 "TERSE TABLES": _show_parser("TABLES"), 455 "VIEWS": _show_parser("VIEWS"), 456 "TERSE VIEWS": _show_parser("VIEWS"), 457 "PRIMARY KEYS": _show_parser("PRIMARY KEYS"), 458 "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"), 459 "IMPORTED KEYS": _show_parser("IMPORTED KEYS"), 460 "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"), 461 "UNIQUE KEYS": _show_parser("UNIQUE KEYS"), 462 "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"), 463 "SEQUENCES": _show_parser("SEQUENCES"), 464 "TERSE SEQUENCES": _show_parser("SEQUENCES"), 465 "COLUMNS": _show_parser("COLUMNS"), 466 "USERS": _show_parser("USERS"), 467 "TERSE USERS": _show_parser("USERS"), 468 } 469 470 STAGED_FILE_SINGLE_TOKENS = { 471 TokenType.DOT, 472 TokenType.MOD, 473 TokenType.SLASH, 474 } 475 476 FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"] 477 478 SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"} 479 480 def _parse_column_ops(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]: 481 this = super()._parse_column_ops(this) 482 483 casts = [] 484 json_path = [] 485 486 while self._match(TokenType.COLON): 487 path = super()._parse_column_ops(self._parse_field(any_token=True)) 488 489 # The cast :: operator has a lower precedence than the extraction operator :, so 490 # we rearrange the AST appropriately to avoid casting the 2nd argument of GET_PATH 491 while isinstance(path, exp.Cast): 492 casts.append(path.to) 493 path = path.this 494 495 if path: 496 json_path.append(path.sql(dialect="snowflake", copy=False)) 497 498 if json_path: 499 this = self.expression( 500 exp.JSONExtract, 501 this=this, 502 expression=self.dialect.to_json_path(exp.Literal.string(".".join(json_path))), 503 ) 504 505 while casts: 506 this = self.expression(exp.Cast, this=this, to=casts.pop()) 507 508 return this 509 510 # https://docs.snowflake.com/en/sql-reference/functions/date_part.html 511 # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts 512 def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]: 513 this = self._parse_var() or self._parse_type() 514 515 if not this: 516 return None 517 518 self._match(TokenType.COMMA) 519 expression = self._parse_bitwise() 520 this = _map_date_part(this) 521 name = this.name.upper() 522 523 if name.startswith("EPOCH"): 524 if name == "EPOCH_MILLISECOND": 525 scale = 10**3 526 elif name == "EPOCH_MICROSECOND": 527 scale = 10**6 528 elif name == "EPOCH_NANOSECOND": 529 scale = 10**9 530 else: 531 scale = None 532 533 ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP")) 534 to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts) 535 536 if scale: 537 to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale)) 538 539 return to_unix 540 541 return self.expression(exp.Extract, this=this, expression=expression) 542 543 def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]: 544 if is_map: 545 # Keys are strings in Snowflake's objects, see also: 546 # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured 547 # - https://docs.snowflake.com/en/sql-reference/functions/object_construct 548 return self._parse_slice(self._parse_string()) 549 550 return self._parse_slice(self._parse_alias(self._parse_conjunction(), explicit=True)) 551 552 def _parse_lateral(self) -> t.Optional[exp.Lateral]: 553 lateral = super()._parse_lateral() 554 if not lateral: 555 return lateral 556 557 if isinstance(lateral.this, exp.Explode): 558 table_alias = lateral.args.get("alias") 559 columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS] 560 if table_alias and not table_alias.args.get("columns"): 561 table_alias.set("columns", columns) 562 elif not table_alias: 563 exp.alias_(lateral, "_flattened", table=columns, copy=False) 564 565 return lateral 566 567 def _parse_at_before(self, table: exp.Table) -> exp.Table: 568 # https://docs.snowflake.com/en/sql-reference/constructs/at-before 569 index = self._index 570 if self._match_texts(("AT", "BEFORE")): 571 this = self._prev.text.upper() 572 kind = ( 573 self._match(TokenType.L_PAREN) 574 and self._match_texts(self.HISTORICAL_DATA_KIND) 575 and self._prev.text.upper() 576 ) 577 expression = self._match(TokenType.FARROW) and self._parse_bitwise() 578 579 if expression: 580 self._match_r_paren() 581 when = self.expression( 582 exp.HistoricalData, this=this, kind=kind, expression=expression 583 ) 584 table.set("when", when) 585 else: 586 self._retreat(index) 587 588 return table 589 590 def _parse_table_parts( 591 self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False 592 ) -> exp.Table: 593 # https://docs.snowflake.com/en/user-guide/querying-stage 594 if self._match(TokenType.STRING, advance=False): 595 table = self._parse_string() 596 elif self._match_text_seq("@", advance=False): 597 table = self._parse_location_path() 598 else: 599 table = None 600 601 if table: 602 file_format = None 603 pattern = None 604 605 wrapped = self._match(TokenType.L_PAREN) 606 while self._curr and wrapped and not self._match(TokenType.R_PAREN): 607 if self._match_text_seq("FILE_FORMAT", "=>"): 608 file_format = self._parse_string() or super()._parse_table_parts( 609 is_db_reference=is_db_reference 610 ) 611 elif self._match_text_seq("PATTERN", "=>"): 612 pattern = self._parse_string() 613 else: 614 break 615 616 self._match(TokenType.COMMA) 617 618 table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern) 619 else: 620 table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference) 621 622 return self._parse_at_before(table) 623 624 def _parse_id_var( 625 self, 626 any_token: bool = True, 627 tokens: t.Optional[t.Collection[TokenType]] = None, 628 ) -> t.Optional[exp.Expression]: 629 if self._match_text_seq("IDENTIFIER", "("): 630 identifier = ( 631 super()._parse_id_var(any_token=any_token, tokens=tokens) 632 or self._parse_string() 633 ) 634 self._match_r_paren() 635 return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier]) 636 637 return super()._parse_id_var(any_token=any_token, tokens=tokens) 638 639 def _parse_show_snowflake(self, this: str) -> exp.Show: 640 scope = None 641 scope_kind = None 642 643 # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS 644 # which is syntactically valid but has no effect on the output 645 terse = self._tokens[self._index - 2].text.upper() == "TERSE" 646 647 history = self._match_text_seq("HISTORY") 648 649 like = self._parse_string() if self._match(TokenType.LIKE) else None 650 651 if self._match(TokenType.IN): 652 if self._match_text_seq("ACCOUNT"): 653 scope_kind = "ACCOUNT" 654 elif self._match_set(self.DB_CREATABLES): 655 scope_kind = self._prev.text.upper() 656 if self._curr: 657 scope = self._parse_table_parts() 658 elif self._curr: 659 scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE" 660 scope = self._parse_table_parts() 661 662 return self.expression( 663 exp.Show, 664 **{ 665 "terse": terse, 666 "this": this, 667 "history": history, 668 "like": like, 669 "scope": scope, 670 "scope_kind": scope_kind, 671 "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(), 672 "limit": self._parse_limit(), 673 "from": self._parse_string() if self._match(TokenType.FROM) else None, 674 }, 675 ) 676 677 def _parse_alter_table_swap(self) -> exp.SwapTable: 678 self._match_text_seq("WITH") 679 return self.expression(exp.SwapTable, this=self._parse_table(schema=True)) 680 681 def _parse_location_property(self) -> exp.LocationProperty: 682 self._match(TokenType.EQ) 683 return self.expression(exp.LocationProperty, this=self._parse_location_path()) 684 685 def _parse_file_location(self) -> t.Optional[exp.Expression]: 686 # Parse either a subquery or a staged file 687 return ( 688 self._parse_select(table=True) 689 if self._match(TokenType.L_PAREN, advance=False) 690 else self._parse_table_parts() 691 ) 692 693 def _parse_location_path(self) -> exp.Var: 694 parts = [self._advance_any(ignore_reserved=True)] 695 696 # We avoid consuming a comma token because external tables like @foo and @bar 697 # can be joined in a query with a comma separator, as well as closing paren 698 # in case of subqueries 699 while self._is_connected() and not self._match_set( 700 (TokenType.COMMA, TokenType.R_PAREN), advance=False 701 ): 702 parts.append(self._advance_any(ignore_reserved=True)) 703 704 return exp.var("".join(part.text for part in parts if part))
Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.
Arguments:
- error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
- error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
Inherited Members
- sqlglot.parser.Parser
- Parser
- NO_PAREN_FUNCTIONS
- STRUCT_TYPE_TOKENS
- NESTED_TYPE_TOKENS
- ENUM_TYPE_TOKENS
- AGGREGATE_TYPE_TOKENS
- TYPE_TOKENS
- SIGNED_TO_UNSIGNED_TYPE_TOKEN
- SUBQUERY_PREDICATES
- RESERVED_TOKENS
- DB_CREATABLES
- CREATABLES
- INTERVAL_VARS
- ALIAS_TOKENS
- COMMENT_TABLE_ALIAS_TOKENS
- UPDATE_ALIAS_TOKENS
- TRIM_TYPES
- FUNC_TOKENS
- CONJUNCTION
- EQUALITY
- COMPARISON
- BITWISE
- TERM
- FACTOR
- EXPONENT
- TIMES
- SET_OPERATIONS
- JOIN_METHODS
- JOIN_SIDES
- JOIN_KINDS
- JOIN_HINTS
- LAMBDAS
- COLUMN_OPERATORS
- EXPRESSION_PARSERS
- UNARY_PARSERS
- STRING_PARSERS
- NUMERIC_PARSERS
- PRIMARY_PARSERS
- PLACEHOLDER_PARSERS
- CONSTRAINT_PARSERS
- ALTER_ALTER_PARSERS
- SCHEMA_UNNAMED_CONSTRAINTS
- NO_PAREN_FUNCTION_PARSERS
- INVALID_FUNC_NAME_TOKENS
- FUNCTIONS_WITH_ALIASED_ARGS
- KEY_VALUE_DEFINITIONS
- QUERY_MODIFIER_PARSERS
- SET_PARSERS
- TYPE_LITERAL_PARSERS
- DDL_SELECT_TOKENS
- PRE_VOLATILE_TOKENS
- TRANSACTION_KIND
- TRANSACTION_CHARACTERISTICS
- CONFLICT_ACTIONS
- CREATE_SEQUENCE
- ISOLATED_LOADING_OPTIONS
- USABLES
- CAST_ACTIONS
- INSERT_ALTERNATIVES
- CLONE_KEYWORDS
- HISTORICAL_DATA_KIND
- OPCLASS_FOLLOW_KEYWORDS
- OPTYPE_FOLLOW_TOKENS
- TABLE_INDEX_HINT_TOKENS
- VIEW_ATTRIBUTES
- WINDOW_ALIAS_TOKENS
- WINDOW_BEFORE_PAREN_TOKENS
- WINDOW_SIDES
- JSON_KEY_VALUE_SEPARATOR_TOKENS
- FETCH_TOKENS
- ADD_CONSTRAINT_TOKENS
- DISTINCT_TOKENS
- NULL_TOKENS
- UNNEST_OFFSET_ALIAS_TOKENS
- SELECT_START_TOKENS
- STRICT_CAST
- PREFIXED_PIVOT_COLUMNS
- LOG_DEFAULTS_TO_LN
- ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
- TABLESAMPLE_CSV
- SET_REQUIRES_ASSIGNMENT_DELIMITER
- TRIM_PATTERN_FIRST
- STRING_ALIASES
- MODIFIERS_ATTACHED_TO_UNION
- UNION_MODIFIERS
- NO_PAREN_IF_COMMANDS
- JSON_ARROWS_REQUIRE_JSON_TYPE
- VALUES_FOLLOWED_BY_PAREN
- SUPPORTS_IMPLICIT_UNNEST
- INTERVAL_SPANS
- SUPPORTS_PARTITION_SELECTION
- error_level
- error_message_context
- max_errors
- dialect
- reset
- parse
- parse_into
- check_errors
- raise_error
- expression
- validate_expression
- errors
- sql
706 class Tokenizer(tokens.Tokenizer): 707 STRING_ESCAPES = ["\\", "'"] 708 HEX_STRINGS = [("x'", "'"), ("X'", "'")] 709 RAW_STRINGS = ["$$"] 710 COMMENTS = ["--", "//", ("/*", "*/")] 711 712 KEYWORDS = { 713 **tokens.Tokenizer.KEYWORDS, 714 "BYTEINT": TokenType.INT, 715 "CHAR VARYING": TokenType.VARCHAR, 716 "CHARACTER VARYING": TokenType.VARCHAR, 717 "EXCLUDE": TokenType.EXCEPT, 718 "ILIKE ANY": TokenType.ILIKE_ANY, 719 "LIKE ANY": TokenType.LIKE_ANY, 720 "MATCH_CONDITION": TokenType.MATCH_CONDITION, 721 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE, 722 "MINUS": TokenType.EXCEPT, 723 "NCHAR VARYING": TokenType.VARCHAR, 724 "PUT": TokenType.COMMAND, 725 "REMOVE": TokenType.COMMAND, 726 "RENAME": TokenType.REPLACE, 727 "RM": TokenType.COMMAND, 728 "SAMPLE": TokenType.TABLE_SAMPLE, 729 "SQL_DOUBLE": TokenType.DOUBLE, 730 "SQL_VARCHAR": TokenType.VARCHAR, 731 "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION, 732 "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ, 733 "TOP": TokenType.TOP, 734 } 735 736 SINGLE_TOKENS = { 737 **tokens.Tokenizer.SINGLE_TOKENS, 738 "$": TokenType.PARAMETER, 739 } 740 741 VAR_SINGLE_TOKENS = {"$"} 742 743 COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
Inherited Members
745 class Generator(generator.Generator): 746 PARAMETER_TOKEN = "$" 747 MATCHED_BY_SOURCE = False 748 SINGLE_STRING_INTERVAL = True 749 JOIN_HINTS = False 750 TABLE_HINTS = False 751 QUERY_HINTS = False 752 AGGREGATE_FILTER_SUPPORTED = False 753 SUPPORTS_TABLE_COPY = False 754 COLLATE_IS_FUNC = True 755 LIMIT_ONLY_LITERALS = True 756 JSON_KEY_VALUE_PAIR_SEP = "," 757 INSERT_OVERWRITE = " OVERWRITE INTO" 758 STRUCT_DELIMITER = ("(", ")") 759 COPY_PARAMS_ARE_WRAPPED = False 760 COPY_PARAMS_EQ_REQUIRED = True 761 762 TRANSFORMS = { 763 **generator.Generator.TRANSFORMS, 764 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 765 exp.ArgMax: rename_func("MAX_BY"), 766 exp.ArgMin: rename_func("MIN_BY"), 767 exp.Array: inline_array_sql, 768 exp.ArrayConcat: rename_func("ARRAY_CAT"), 769 exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this), 770 exp.AtTimeZone: lambda self, e: self.func( 771 "CONVERT_TIMEZONE", e.args.get("zone"), e.this 772 ), 773 exp.BitwiseXor: rename_func("BITXOR"), 774 exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]), 775 exp.DateAdd: date_delta_sql("DATEADD"), 776 exp.DateDiff: date_delta_sql("DATEDIFF"), 777 exp.DateStrToDate: datestrtodate_sql, 778 exp.DayOfMonth: rename_func("DAYOFMONTH"), 779 exp.DayOfWeek: rename_func("DAYOFWEEK"), 780 exp.DayOfYear: rename_func("DAYOFYEAR"), 781 exp.Explode: rename_func("FLATTEN"), 782 exp.Extract: rename_func("DATE_PART"), 783 exp.FromTimeZone: lambda self, e: self.func( 784 "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this 785 ), 786 exp.GenerateSeries: lambda self, e: self.func( 787 "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step") 788 ), 789 exp.GroupConcat: rename_func("LISTAGG"), 790 exp.If: if_sql(name="IFF", false_value="NULL"), 791 exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression), 792 exp.JSONExtractScalar: lambda self, e: self.func( 793 "JSON_EXTRACT_PATH_TEXT", e.this, e.expression 794 ), 795 exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions), 796 exp.JSONPathRoot: lambda *_: "", 797 exp.LogicalAnd: rename_func("BOOLAND_AGG"), 798 exp.LogicalOr: rename_func("BOOLOR_AGG"), 799 exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 800 exp.Max: max_or_greatest, 801 exp.Min: min_or_least, 802 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 803 exp.PercentileCont: transforms.preprocess( 804 [transforms.add_within_group_for_percentiles] 805 ), 806 exp.PercentileDisc: transforms.preprocess( 807 [transforms.add_within_group_for_percentiles] 808 ), 809 exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]), 810 exp.RegexpILike: _regexpilike_sql, 811 exp.Rand: rename_func("RANDOM"), 812 exp.Select: transforms.preprocess( 813 [ 814 transforms.eliminate_distinct_on, 815 transforms.explode_to_unnest(), 816 transforms.eliminate_semi_and_anti_joins, 817 ] 818 ), 819 exp.SHA: rename_func("SHA1"), 820 exp.StarMap: rename_func("OBJECT_CONSTRUCT"), 821 exp.StartsWith: rename_func("STARTSWITH"), 822 exp.StrPosition: lambda self, e: self.func( 823 "POSITION", e.args.get("substr"), e.this, e.args.get("position") 824 ), 825 exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)), 826 exp.Stuff: rename_func("INSERT"), 827 exp.TimeAdd: date_delta_sql("TIMEADD"), 828 exp.TimestampDiff: lambda self, e: self.func( 829 "TIMESTAMPDIFF", e.unit, e.expression, e.this 830 ), 831 exp.TimestampTrunc: timestamptrunc_sql, 832 exp.TimeStrToTime: timestrtotime_sql, 833 exp.TimeToStr: lambda self, e: self.func( 834 "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e) 835 ), 836 exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})", 837 exp.ToArray: rename_func("TO_ARRAY"), 838 exp.ToChar: lambda self, e: self.function_fallback_sql(e), 839 exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression), 840 exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True), 841 exp.TsOrDsDiff: date_delta_sql("DATEDIFF"), 842 exp.TsOrDsToDate: lambda self, e: self.func( 843 "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e) 844 ), 845 exp.UnixToTime: rename_func("TO_TIMESTAMP"), 846 exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"), 847 exp.WeekOfYear: rename_func("WEEKOFYEAR"), 848 exp.Xor: rename_func("BOOLXOR"), 849 } 850 851 SUPPORTED_JSON_PATH_PARTS = { 852 exp.JSONPathKey, 853 exp.JSONPathRoot, 854 exp.JSONPathSubscript, 855 } 856 857 TYPE_MAPPING = { 858 **generator.Generator.TYPE_MAPPING, 859 exp.DataType.Type.NESTED: "OBJECT", 860 exp.DataType.Type.STRUCT: "OBJECT", 861 } 862 863 STAR_MAPPING = { 864 "except": "EXCLUDE", 865 "replace": "RENAME", 866 } 867 868 PROPERTIES_LOCATION = { 869 **generator.Generator.PROPERTIES_LOCATION, 870 exp.SetProperty: exp.Properties.Location.UNSUPPORTED, 871 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 872 } 873 874 UNSUPPORTED_VALUES_EXPRESSIONS = { 875 exp.Map, 876 exp.StarMap, 877 exp.Struct, 878 exp.VarMap, 879 } 880 881 def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str: 882 if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS): 883 values_as_table = False 884 885 return super().values_sql(expression, values_as_table=values_as_table) 886 887 def datatype_sql(self, expression: exp.DataType) -> str: 888 expressions = expression.expressions 889 if ( 890 expressions 891 and expression.is_type(*exp.DataType.STRUCT_TYPES) 892 and any(isinstance(field_type, exp.DataType) for field_type in expressions) 893 ): 894 # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ] 895 return "OBJECT" 896 897 return super().datatype_sql(expression) 898 899 def tonumber_sql(self, expression: exp.ToNumber) -> str: 900 return self.func( 901 "TO_NUMBER", 902 expression.this, 903 expression.args.get("format"), 904 expression.args.get("precision"), 905 expression.args.get("scale"), 906 ) 907 908 def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str: 909 milli = expression.args.get("milli") 910 if milli is not None: 911 milli_to_nano = milli.pop() * exp.Literal.number(1000000) 912 expression.set("nano", milli_to_nano) 913 914 return rename_func("TIMESTAMP_FROM_PARTS")(self, expression) 915 916 def trycast_sql(self, expression: exp.TryCast) -> str: 917 value = expression.this 918 919 if value.type is None: 920 from sqlglot.optimizer.annotate_types import annotate_types 921 922 value = annotate_types(value) 923 924 if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN): 925 return super().trycast_sql(expression) 926 927 # TRY_CAST only works for string values in Snowflake 928 return self.cast_sql(expression) 929 930 def log_sql(self, expression: exp.Log) -> str: 931 if not expression.expression: 932 return self.func("LN", expression.this) 933 934 return super().log_sql(expression) 935 936 def unnest_sql(self, expression: exp.Unnest) -> str: 937 unnest_alias = expression.args.get("alias") 938 offset = expression.args.get("offset") 939 940 columns = [ 941 exp.to_identifier("seq"), 942 exp.to_identifier("key"), 943 exp.to_identifier("path"), 944 offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"), 945 seq_get(unnest_alias.columns if unnest_alias else [], 0) 946 or exp.to_identifier("value"), 947 exp.to_identifier("this"), 948 ] 949 950 if unnest_alias: 951 unnest_alias.set("columns", columns) 952 else: 953 unnest_alias = exp.TableAlias(this="_u", columns=columns) 954 955 explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))" 956 alias = self.sql(unnest_alias) 957 alias = f" AS {alias}" if alias else "" 958 return f"{explode}{alias}" 959 960 def show_sql(self, expression: exp.Show) -> str: 961 terse = "TERSE " if expression.args.get("terse") else "" 962 history = " HISTORY" if expression.args.get("history") else "" 963 like = self.sql(expression, "like") 964 like = f" LIKE {like}" if like else "" 965 966 scope = self.sql(expression, "scope") 967 scope = f" {scope}" if scope else "" 968 969 scope_kind = self.sql(expression, "scope_kind") 970 if scope_kind: 971 scope_kind = f" IN {scope_kind}" 972 973 starts_with = self.sql(expression, "starts_with") 974 if starts_with: 975 starts_with = f" STARTS WITH {starts_with}" 976 977 limit = self.sql(expression, "limit") 978 979 from_ = self.sql(expression, "from") 980 if from_: 981 from_ = f" FROM {from_}" 982 983 return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}" 984 985 def regexpextract_sql(self, expression: exp.RegexpExtract) -> str: 986 # Other dialects don't support all of the following parameters, so we need to 987 # generate default values as necessary to ensure the transpilation is correct 988 group = expression.args.get("group") 989 parameters = expression.args.get("parameters") or (group and exp.Literal.string("c")) 990 occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1)) 991 position = expression.args.get("position") or (occurrence and exp.Literal.number(1)) 992 993 return self.func( 994 "REGEXP_SUBSTR", 995 expression.this, 996 expression.expression, 997 position, 998 occurrence, 999 parameters, 1000 group, 1001 ) 1002 1003 def except_op(self, expression: exp.Except) -> str: 1004 if not expression.args.get("distinct"): 1005 self.unsupported("EXCEPT with All is not supported in Snowflake") 1006 return super().except_op(expression) 1007 1008 def intersect_op(self, expression: exp.Intersect) -> str: 1009 if not expression.args.get("distinct"): 1010 self.unsupported("INTERSECT with All is not supported in Snowflake") 1011 return super().intersect_op(expression) 1012 1013 def describe_sql(self, expression: exp.Describe) -> str: 1014 # Default to table if kind is unknown 1015 kind_value = expression.args.get("kind") or "TABLE" 1016 kind = f" {kind_value}" if kind_value else "" 1017 this = f" {self.sql(expression, 'this')}" 1018 expressions = self.expressions(expression, flat=True) 1019 expressions = f" {expressions}" if expressions else "" 1020 return f"DESCRIBE{kind}{this}{expressions}" 1021 1022 def generatedasidentitycolumnconstraint_sql( 1023 self, expression: exp.GeneratedAsIdentityColumnConstraint 1024 ) -> str: 1025 start = expression.args.get("start") 1026 start = f" START {start}" if start else "" 1027 increment = expression.args.get("increment") 1028 increment = f" INCREMENT {increment}" if increment else "" 1029 return f"AUTOINCREMENT{start}{increment}" 1030 1031 def swaptable_sql(self, expression: exp.SwapTable) -> str: 1032 this = self.sql(expression, "this") 1033 return f"SWAP WITH {this}" 1034 1035 def with_properties(self, properties: exp.Properties) -> str: 1036 return self.properties(properties, wrapped=False, prefix=self.seg(""), sep=" ") 1037 1038 def cluster_sql(self, expression: exp.Cluster) -> str: 1039 return f"CLUSTER BY ({self.expressions(expression, flat=True)})" 1040 1041 def struct_sql(self, expression: exp.Struct) -> str: 1042 keys = [] 1043 values = [] 1044 1045 for i, e in enumerate(expression.expressions): 1046 if isinstance(e, exp.PropertyEQ): 1047 keys.append( 1048 exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this 1049 ) 1050 values.append(e.expression) 1051 else: 1052 keys.append(exp.Literal.string(f"_{i}")) 1053 values.append(e) 1054 1055 return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values))) 1056 1057 def copyparameter_sql(self, expression: exp.CopyParameter) -> str: 1058 option = self.sql(expression, "this").upper() 1059 if option == "FILE_FORMAT": 1060 values = self.expressions(expression, key="expression", flat=True, sep=" ") 1061 return f"{option} = ({values})" 1062 1063 return super().copyparameter_sql(expression) 1064 1065 def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str: 1066 if expression.args.get("weight") or expression.args.get("accuracy"): 1067 self.unsupported( 1068 "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake" 1069 ) 1070 1071 return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
Generator converts a given syntax tree to the corresponding SQL string.
Arguments:
- pretty: Whether to format the produced SQL string. Default: False.
- identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
- normalize: Whether to normalize identifiers to lowercase. Default: False.
- pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
- indent: The indentation size in a formatted string. For example, this affects the
indentation of subqueries and filters under a
WHERE
clause. Default: 2. - normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
- unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether to preserve comments in the output SQL code. Default: True
887 def datatype_sql(self, expression: exp.DataType) -> str: 888 expressions = expression.expressions 889 if ( 890 expressions 891 and expression.is_type(*exp.DataType.STRUCT_TYPES) 892 and any(isinstance(field_type, exp.DataType) for field_type in expressions) 893 ): 894 # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ] 895 return "OBJECT" 896 897 return super().datatype_sql(expression)
908 def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str: 909 milli = expression.args.get("milli") 910 if milli is not None: 911 milli_to_nano = milli.pop() * exp.Literal.number(1000000) 912 expression.set("nano", milli_to_nano) 913 914 return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
916 def trycast_sql(self, expression: exp.TryCast) -> str: 917 value = expression.this 918 919 if value.type is None: 920 from sqlglot.optimizer.annotate_types import annotate_types 921 922 value = annotate_types(value) 923 924 if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN): 925 return super().trycast_sql(expression) 926 927 # TRY_CAST only works for string values in Snowflake 928 return self.cast_sql(expression)
936 def unnest_sql(self, expression: exp.Unnest) -> str: 937 unnest_alias = expression.args.get("alias") 938 offset = expression.args.get("offset") 939 940 columns = [ 941 exp.to_identifier("seq"), 942 exp.to_identifier("key"), 943 exp.to_identifier("path"), 944 offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"), 945 seq_get(unnest_alias.columns if unnest_alias else [], 0) 946 or exp.to_identifier("value"), 947 exp.to_identifier("this"), 948 ] 949 950 if unnest_alias: 951 unnest_alias.set("columns", columns) 952 else: 953 unnest_alias = exp.TableAlias(this="_u", columns=columns) 954 955 explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))" 956 alias = self.sql(unnest_alias) 957 alias = f" AS {alias}" if alias else "" 958 return f"{explode}{alias}"
960 def show_sql(self, expression: exp.Show) -> str: 961 terse = "TERSE " if expression.args.get("terse") else "" 962 history = " HISTORY" if expression.args.get("history") else "" 963 like = self.sql(expression, "like") 964 like = f" LIKE {like}" if like else "" 965 966 scope = self.sql(expression, "scope") 967 scope = f" {scope}" if scope else "" 968 969 scope_kind = self.sql(expression, "scope_kind") 970 if scope_kind: 971 scope_kind = f" IN {scope_kind}" 972 973 starts_with = self.sql(expression, "starts_with") 974 if starts_with: 975 starts_with = f" STARTS WITH {starts_with}" 976 977 limit = self.sql(expression, "limit") 978 979 from_ = self.sql(expression, "from") 980 if from_: 981 from_ = f" FROM {from_}" 982 983 return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
985 def regexpextract_sql(self, expression: exp.RegexpExtract) -> str: 986 # Other dialects don't support all of the following parameters, so we need to 987 # generate default values as necessary to ensure the transpilation is correct 988 group = expression.args.get("group") 989 parameters = expression.args.get("parameters") or (group and exp.Literal.string("c")) 990 occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1)) 991 position = expression.args.get("position") or (occurrence and exp.Literal.number(1)) 992 993 return self.func( 994 "REGEXP_SUBSTR", 995 expression.this, 996 expression.expression, 997 position, 998 occurrence, 999 parameters, 1000 group, 1001 )
1013 def describe_sql(self, expression: exp.Describe) -> str: 1014 # Default to table if kind is unknown 1015 kind_value = expression.args.get("kind") or "TABLE" 1016 kind = f" {kind_value}" if kind_value else "" 1017 this = f" {self.sql(expression, 'this')}" 1018 expressions = self.expressions(expression, flat=True) 1019 expressions = f" {expressions}" if expressions else "" 1020 return f"DESCRIBE{kind}{this}{expressions}"
1022 def generatedasidentitycolumnconstraint_sql( 1023 self, expression: exp.GeneratedAsIdentityColumnConstraint 1024 ) -> str: 1025 start = expression.args.get("start") 1026 start = f" START {start}" if start else "" 1027 increment = expression.args.get("increment") 1028 increment = f" INCREMENT {increment}" if increment else "" 1029 return f"AUTOINCREMENT{start}{increment}"
1041 def struct_sql(self, expression: exp.Struct) -> str: 1042 keys = [] 1043 values = [] 1044 1045 for i, e in enumerate(expression.expressions): 1046 if isinstance(e, exp.PropertyEQ): 1047 keys.append( 1048 exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this 1049 ) 1050 values.append(e.expression) 1051 else: 1052 keys.append(exp.Literal.string(f"_{i}")) 1053 values.append(e) 1054 1055 return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1057 def copyparameter_sql(self, expression: exp.CopyParameter) -> str: 1058 option = self.sql(expression, "this").upper() 1059 if option == "FILE_FORMAT": 1060 values = self.expressions(expression, key="expression", flat=True, sep=" ") 1061 return f"{option} = ({values})" 1062 1063 return super().copyparameter_sql(expression)
1065 def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str: 1066 if expression.args.get("weight") or expression.args.get("accuracy"): 1067 self.unsupported( 1068 "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake" 1069 ) 1070 1071 return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
Inherited Members
- sqlglot.generator.Generator
- Generator
- NULL_ORDERING_SUPPORTED
- IGNORE_NULLS_IN_FUNC
- LOCKING_READS_SUPPORTED
- EXPLICIT_UNION
- WRAP_DERIVED_VALUES
- CREATE_FUNCTION_RETURN_AS
- INTERVAL_ALLOWS_PLURAL_FORM
- LIMIT_FETCH
- RENAME_TABLE_WITH_DB
- GROUPINGS_SEP
- INDEX_ON
- QUERY_HINT_SEP
- IS_BOOL_ALLOWED
- DUPLICATE_KEY_UPDATE_WITH_SET
- LIMIT_IS_TOP
- RETURNING_END
- COLUMN_JOIN_MARKS_SUPPORTED
- EXTRACT_ALLOWS_QUOTES
- TZ_TO_WITH_TIME_ZONE
- NVL2_SUPPORTED
- VALUES_AS_TABLE
- ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
- UNNEST_WITH_ORDINALITY
- SEMI_ANTI_JOIN_WITH_SIDE
- COMPUTED_COLUMN_WITH_TYPE
- TABLESAMPLE_REQUIRES_PARENS
- TABLESAMPLE_SIZE_IS_ROWS
- TABLESAMPLE_KEYWORDS
- TABLESAMPLE_WITH_METHOD
- TABLESAMPLE_SEED_KEYWORD
- DATA_TYPE_SPECIFIERS_ALLOWED
- ENSURE_BOOLS
- CTE_RECURSIVE_KEYWORD_REQUIRED
- SUPPORTS_SINGLE_ARG_CONCAT
- LAST_DAY_SUPPORTS_DATE_PART
- SUPPORTS_TABLE_ALIAS_COLUMNS
- UNPIVOT_ALIASES_ARE_IDENTIFIERS
- SUPPORTS_SELECT_INTO
- SUPPORTS_UNLOGGED_TABLES
- SUPPORTS_CREATE_TABLE_LIKE
- LIKE_PROPERTY_INSIDE_SCHEMA
- MULTI_ARG_DISTINCT
- JSON_TYPE_REQUIRED_FOR_EXTRACTION
- JSON_PATH_BRACKETED_KEY_SUPPORTED
- JSON_PATH_SINGLE_QUOTE_ESCAPE
- CAN_IMPLEMENT_ARRAY_ANY
- SUPPORTS_TO_NUMBER
- OUTER_UNION_MODIFIERS
- COPY_HAS_INTO_KEYWORD
- TIME_PART_SINGULARS
- TOKEN_MAPPING
- NAMED_PLACEHOLDER_TOKEN
- RESERVED_KEYWORDS
- WITH_SEPARATED_COMMENTS
- EXCLUDE_COMMENTS
- UNWRAPPED_INTERVAL_VALUES
- PARAMETERIZABLE_TEXT_TYPES
- EXPRESSIONS_WITHOUT_NESTED_CTES
- SENTINEL_LINE_BREAK
- pretty
- identify
- normalize
- pad
- unsupported_level
- max_unsupported
- leading_comma
- max_text_width
- comments
- dialect
- normalize_functions
- unsupported_messages
- generate
- preprocess
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_parts
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- computedcolumnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasrowcolumnconstraint_sql
- periodforsystemtimeconstraint_sql
- notnullcolumnconstraint_sql
- transformcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- createable_sql
- create_sql
- sequenceproperties_sql
- clone_sql
- heredoc_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- unicodestring_sql
- rawstring_sql
- datatypeparam_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- indexparameters_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_name
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- partitionboundspec_sql
- partitionedofproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- withsystemversioningproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- kill_sql
- pseudotype_sql
- objectidentifier_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- withtablehint_sql
- indextablehint_sql
- historicaldata_sql
- table_parts
- table_sql
- tablesample_sql
- pivot_sql
- version_sql
- tuple_sql
- update_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- connect_sql
- prior_sql
- join_sql
- lambda_sql
- lateral_op
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- escape_str
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- withfill_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognizemeasure_sql
- matchrecognize_sql
- query_modifiers
- queryoption_sql
- offset_limit_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- schema_columns_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- set_operations
- union_sql
- union_op
- prewhere_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_offset_expressions
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- convert_concat_args
- concat_sql
- concatws_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonpath_sql
- json_path_part
- formatjson_sql
- jsonobject_sql
- jsonobjectagg_sql
- jsonarray_sql
- jsonarrayagg_sql
- jsoncolumndef_sql
- jsonschema_sql
- jsontable_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- in_unnest_op
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- pivotalias_sql
- aliases_sql
- atindex_sql
- attimezone_sql
- fromtimezone_sql
- add_sql
- and_sql
- or_sql
- xor_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- currenttimestamp_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- transaction_sql
- commit_sql
- rollback_sql
- altercolumn_sql
- alterdiststyle_sql
- altersortkey_sql
- renametable_sql
- renamecolumn_sql
- altertable_sql
- add_column_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- havingmax_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- propertyeq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- slice_sql
- sub_sql
- try_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- too_wide
- format_time
- expressions
- op_expressions
- naked_property
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql
- dictproperty_sql
- dictrange_sql
- dictsubproperty_sql
- oncluster_sql
- clusteredbyproperty_sql
- anyvalue_sql
- querytransform_sql
- indexconstraintoption_sql
- checkcolumnconstraint_sql
- indexcolumnconstraint_sql
- nvl2_sql
- comprehension_sql
- columnprefix_sql
- opclass_sql
- predict_sql
- forin_sql
- refresh_sql
- operator_sql
- toarray_sql
- tsordstotime_sql
- tsordstotimestamp_sql
- tsordstodate_sql
- unixdate_sql
- lastday_sql
- dateadd_sql
- arrayany_sql
- generateseries_sql
- partitionrange_sql
- truncatetable_sql
- convert_sql
- credentials_sql
- copy_sql
- semicolon_sql