Supports BigQuery Standard SQL.
1"""Supports BigQuery Standard SQL.""" 2 3from __future__ import annotations 4 5import re 6import typing as t 7 8from sqlglot import exp, generator, parser, tokens, transforms 9from sqlglot.dialects.dialect import ( 10 Dialect, 11 datestrtodate_sql, 12 inline_array_sql, 13 max_or_greatest, 14 min_or_least, 15 no_ilike_sql, 16 parse_date_delta_with_interval, 17 rename_func, 18 timestrtotime_sql, 19 ts_or_ds_to_date_sql, 20) 21from sqlglot.helper import seq_get 22from sqlglot.tokens import TokenType 23 24E = t.TypeVar("E", bound=exp.Expression) 25 26 27def _date_add_sql( 28 data_type: str, kind: str 29) -> t.Callable[[generator.Generator, exp.Expression], str]: 30 def func(self, expression): 31 this = self.sql(expression, "this") 32 unit = expression.args.get("unit") 33 unit = exp.var(unit.name.upper() if unit else "DAY") 34 interval = exp.Interval(this=expression.expression, unit=unit) 35 return f"{data_type}_{kind}({this}, {self.sql(interval)})" 36 37 return func 38 39 40def _derived_table_values_to_unnest(self: generator.Generator, expression: exp.Values) -> str: 41 if not isinstance(expression.unnest().parent, exp.From): 42 return self.values_sql(expression) 43 44 alias = expression.args.get("alias") 45 46 structs = [ 47 exp.Struct( 48 expressions=[ 49 exp.alias_(value, column_name) 50 for value, column_name in zip( 51 t.expressions, 52 alias.columns 53 if alias and alias.columns 54 else (f"_c{i}" for i in range(len(t.expressions))), 55 ) 56 ] 57 ) 58 for t in expression.find_all(exp.Tuple) 59 ] 60 61 return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)])) 62 63 64def _returnsproperty_sql(self: generator.Generator, expression: exp.ReturnsProperty) -> str: 65 this = expression.this 66 if isinstance(this, exp.Schema): 67 this = f"{this.this} <{self.expressions(this)}>" 68 else: 69 this = self.sql(this) 70 return f"RETURNS {this}" 71 72 73def _create_sql(self: generator.Generator, expression: exp.Create) -> str: 74 kind = expression.args["kind"] 75 returns = expression.find(exp.ReturnsProperty) 76 if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"): 77 expression = expression.copy() 78 expression.set("kind", "TABLE FUNCTION") 79 if isinstance( 80 expression.expression, 81 ( 82 exp.Subquery, 83 exp.Literal, 84 ), 85 ): 86 expression.set("expression", expression.expression.this) 87 88 return self.create_sql(expression) 89 90 return self.create_sql(expression) 91 92 93def _unqualify_unnest(expression: exp.Expression) -> exp.Expression: 94 """Remove references to unnest table aliases since bigquery doesn't allow them. 95 96 These are added by the optimizer's qualify_column step. 97 """ 98 if isinstance(expression, exp.Select): 99 for unnest in expression.find_all(exp.Unnest): 100 if isinstance(unnest.parent, (exp.From, exp.Join)) and unnest.alias: 101 for select in expression.selects: 102 for column in select.find_all(exp.Column): 103 if column.table == unnest.alias: 104 column.set("table", None) 105 106 return expression 107 108 109class BigQuery(Dialect): 110 unnest_column_only = True 111 time_mapping = { 112 "%M": "%-M", 113 "%d": "%-d", 114 "%m": "%-m", 115 "%y": "%-y", 116 "%H": "%-H", 117 "%I": "%-I", 118 "%S": "%-S", 119 "%j": "%-j", 120 } 121 122 class Tokenizer(tokens.Tokenizer): 123 QUOTES = [ 124 (prefix + quote, quote) if prefix else quote 125 for quote in ["'", '"', '"""', "'''"] 126 for prefix in ["", "r", "R"] 127 ] 128 COMMENTS = ["--", "#", ("/*", "*/")] 129 IDENTIFIERS = ["`"] 130 STRING_ESCAPES = ["\\"] 131 HEX_STRINGS = [("0x", ""), ("0X", "")] 132 BYTE_STRINGS = [("b'", "'"), ("B'", "'")] 133 134 KEYWORDS = { 135 **tokens.Tokenizer.KEYWORDS, 136 "ANY TYPE": TokenType.VARIANT, 137 "BEGIN": TokenType.COMMAND, 138 "BEGIN TRANSACTION": TokenType.BEGIN, 139 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 140 "DECLARE": TokenType.COMMAND, 141 "GEOGRAPHY": TokenType.GEOGRAPHY, 142 "FLOAT64": TokenType.DOUBLE, 143 "INT64": TokenType.BIGINT, 144 "BYTES": TokenType.BINARY, 145 "NOT DETERMINISTIC": TokenType.VOLATILE, 146 "UNKNOWN": TokenType.NULL, 147 } 148 KEYWORDS.pop("DIV") 149 150 class Parser(parser.Parser): 151 PREFIXED_PIVOT_COLUMNS = True 152 153 LOG_BASE_FIRST = False 154 LOG_DEFAULTS_TO_LN = True 155 156 FUNCTIONS = { 157 **parser.Parser.FUNCTIONS, # type: ignore 158 "DATE_TRUNC": lambda args: exp.DateTrunc( 159 unit=exp.Literal.string(str(seq_get(args, 1))), 160 this=seq_get(args, 0), 161 ), 162 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 163 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 164 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 165 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 166 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 167 this=seq_get(args, 0), 168 expression=seq_get(args, 1), 169 position=seq_get(args, 2), 170 occurrence=seq_get(args, 3), 171 group=exp.Literal.number(1) 172 if re.compile(str(seq_get(args, 1))).groups == 1 173 else None, 174 ), 175 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 176 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 177 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 178 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 179 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 180 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 181 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 182 this=seq_get(args, 1), format=seq_get(args, 0) 183 ), 184 } 185 186 FUNCTION_PARSERS = { 187 **parser.Parser.FUNCTION_PARSERS, # type: ignore 188 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 189 } 190 FUNCTION_PARSERS.pop("TRIM") 191 192 NO_PAREN_FUNCTIONS = { 193 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 194 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 195 } 196 197 NESTED_TYPE_TOKENS = { 198 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 199 TokenType.TABLE, 200 } 201 202 ID_VAR_TOKENS = { 203 *parser.Parser.ID_VAR_TOKENS, # type: ignore 204 TokenType.VALUES, 205 } 206 207 PROPERTY_PARSERS = { 208 **parser.Parser.PROPERTY_PARSERS, # type: ignore 209 "NOT DETERMINISTIC": lambda self: self.expression( 210 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 211 ), 212 "OPTIONS": lambda self: self._parse_with_property(), 213 } 214 215 CONSTRAINT_PARSERS = { 216 **parser.Parser.CONSTRAINT_PARSERS, # type: ignore 217 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 218 } 219 220 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 221 this = super()._parse_table_part(schema=schema) 222 223 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 224 if isinstance(this, exp.Identifier): 225 table_name = this.name 226 while self._match(TokenType.DASH, advance=False) and self._next: 227 self._advance(2) 228 table_name += f"-{self._prev.text}" 229 230 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 231 232 return this 233 234 def _parse_table_parts(self, schema: bool = False) -> exp.Expression: 235 table = super()._parse_table_parts(schema=schema) 236 if isinstance(table.this, exp.Identifier) and "." in table.name: 237 table = exp.to_table(table.name, dialect="bigquery") 238 return table 239 240 class Generator(generator.Generator): 241 EXPLICIT_UNION = True 242 INTERVAL_ALLOWS_PLURAL_FORM = False 243 JOIN_HINTS = False 244 TABLE_HINTS = False 245 LIMIT_FETCH = "LIMIT" 246 RENAME_TABLE_WITH_DB = False 247 248 TRANSFORMS = { 249 **generator.Generator.TRANSFORMS, # type: ignore 250 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 251 exp.ArraySize: rename_func("ARRAY_LENGTH"), 252 exp.AtTimeZone: lambda self, e: self.func( 253 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 254 ), 255 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 256 exp.DateAdd: _date_add_sql("DATE", "ADD"), 257 exp.DateSub: _date_add_sql("DATE", "SUB"), 258 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 259 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 260 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 261 exp.DateStrToDate: datestrtodate_sql, 262 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 263 exp.GroupConcat: rename_func("STRING_AGG"), 264 exp.ILike: no_ilike_sql, 265 exp.IntDiv: rename_func("DIV"), 266 exp.Max: max_or_greatest, 267 exp.Min: min_or_least, 268 exp.Select: transforms.preprocess( 269 [_unqualify_unnest, transforms.eliminate_distinct_on] 270 ), 271 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 272 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 273 exp.TimeSub: _date_add_sql("TIME", "SUB"), 274 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 275 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 276 exp.TimeStrToTime: timestrtotime_sql, 277 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 278 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 279 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 280 exp.VariancePop: rename_func("VAR_POP"), 281 exp.Values: _derived_table_values_to_unnest, 282 exp.ReturnsProperty: _returnsproperty_sql, 283 exp.Create: _create_sql, 284 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 285 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 286 if e.name == "IMMUTABLE" 287 else "NOT DETERMINISTIC", 288 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 289 } 290 291 TYPE_MAPPING = { 292 **generator.Generator.TYPE_MAPPING, # type: ignore 293 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 294 exp.DataType.Type.BIGINT: "INT64", 295 exp.DataType.Type.BINARY: "BYTES", 296 exp.DataType.Type.BOOLEAN: "BOOL", 297 exp.DataType.Type.CHAR: "STRING", 298 exp.DataType.Type.DECIMAL: "NUMERIC", 299 exp.DataType.Type.DOUBLE: "FLOAT64", 300 exp.DataType.Type.FLOAT: "FLOAT64", 301 exp.DataType.Type.INT: "INT64", 302 exp.DataType.Type.NCHAR: "STRING", 303 exp.DataType.Type.NVARCHAR: "STRING", 304 exp.DataType.Type.SMALLINT: "INT64", 305 exp.DataType.Type.TEXT: "STRING", 306 exp.DataType.Type.TIMESTAMP: "DATETIME", 307 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 308 exp.DataType.Type.TINYINT: "INT64", 309 exp.DataType.Type.VARBINARY: "BYTES", 310 exp.DataType.Type.VARCHAR: "STRING", 311 exp.DataType.Type.VARIANT: "ANY TYPE", 312 } 313 314 PROPERTIES_LOCATION = { 315 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 316 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 317 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 318 } 319 320 def array_sql(self, expression: exp.Array) -> str: 321 first_arg = seq_get(expression.expressions, 0) 322 if isinstance(first_arg, exp.Subqueryable): 323 return f"ARRAY{self.wrap(self.sql(first_arg))}" 324 325 return inline_array_sql(self, expression) 326 327 def transaction_sql(self, *_) -> str: 328 return "BEGIN TRANSACTION" 329 330 def commit_sql(self, *_) -> str: 331 return "COMMIT TRANSACTION" 332 333 def rollback_sql(self, *_) -> str: 334 return "ROLLBACK TRANSACTION" 335 336 def in_unnest_op(self, expression: exp.Unnest) -> str: 337 return self.sql(expression) 338 339 def except_op(self, expression: exp.Except) -> str: 340 if not expression.args.get("distinct", False): 341 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 342 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 343 344 def intersect_op(self, expression: exp.Intersect) -> str: 345 if not expression.args.get("distinct", False): 346 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 347 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 348 349 def with_properties(self, properties: exp.Properties) -> str: 350 return self.properties(properties, prefix=self.seg("OPTIONS"))
110class BigQuery(Dialect): 111 unnest_column_only = True 112 time_mapping = { 113 "%M": "%-M", 114 "%d": "%-d", 115 "%m": "%-m", 116 "%y": "%-y", 117 "%H": "%-H", 118 "%I": "%-I", 119 "%S": "%-S", 120 "%j": "%-j", 121 } 122 123 class Tokenizer(tokens.Tokenizer): 124 QUOTES = [ 125 (prefix + quote, quote) if prefix else quote 126 for quote in ["'", '"', '"""', "'''"] 127 for prefix in ["", "r", "R"] 128 ] 129 COMMENTS = ["--", "#", ("/*", "*/")] 130 IDENTIFIERS = ["`"] 131 STRING_ESCAPES = ["\\"] 132 HEX_STRINGS = [("0x", ""), ("0X", "")] 133 BYTE_STRINGS = [("b'", "'"), ("B'", "'")] 134 135 KEYWORDS = { 136 **tokens.Tokenizer.KEYWORDS, 137 "ANY TYPE": TokenType.VARIANT, 138 "BEGIN": TokenType.COMMAND, 139 "BEGIN TRANSACTION": TokenType.BEGIN, 140 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 141 "DECLARE": TokenType.COMMAND, 142 "GEOGRAPHY": TokenType.GEOGRAPHY, 143 "FLOAT64": TokenType.DOUBLE, 144 "INT64": TokenType.BIGINT, 145 "BYTES": TokenType.BINARY, 146 "NOT DETERMINISTIC": TokenType.VOLATILE, 147 "UNKNOWN": TokenType.NULL, 148 } 149 KEYWORDS.pop("DIV") 150 151 class Parser(parser.Parser): 152 PREFIXED_PIVOT_COLUMNS = True 153 154 LOG_BASE_FIRST = False 155 LOG_DEFAULTS_TO_LN = True 156 157 FUNCTIONS = { 158 **parser.Parser.FUNCTIONS, # type: ignore 159 "DATE_TRUNC": lambda args: exp.DateTrunc( 160 unit=exp.Literal.string(str(seq_get(args, 1))), 161 this=seq_get(args, 0), 162 ), 163 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 164 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 165 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 166 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 167 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 168 this=seq_get(args, 0), 169 expression=seq_get(args, 1), 170 position=seq_get(args, 2), 171 occurrence=seq_get(args, 3), 172 group=exp.Literal.number(1) 173 if re.compile(str(seq_get(args, 1))).groups == 1 174 else None, 175 ), 176 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 177 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 178 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 179 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 180 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 181 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 182 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 183 this=seq_get(args, 1), format=seq_get(args, 0) 184 ), 185 } 186 187 FUNCTION_PARSERS = { 188 **parser.Parser.FUNCTION_PARSERS, # type: ignore 189 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 190 } 191 FUNCTION_PARSERS.pop("TRIM") 192 193 NO_PAREN_FUNCTIONS = { 194 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 195 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 196 } 197 198 NESTED_TYPE_TOKENS = { 199 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 200 TokenType.TABLE, 201 } 202 203 ID_VAR_TOKENS = { 204 *parser.Parser.ID_VAR_TOKENS, # type: ignore 205 TokenType.VALUES, 206 } 207 208 PROPERTY_PARSERS = { 209 **parser.Parser.PROPERTY_PARSERS, # type: ignore 210 "NOT DETERMINISTIC": lambda self: self.expression( 211 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 212 ), 213 "OPTIONS": lambda self: self._parse_with_property(), 214 } 215 216 CONSTRAINT_PARSERS = { 217 **parser.Parser.CONSTRAINT_PARSERS, # type: ignore 218 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 219 } 220 221 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 222 this = super()._parse_table_part(schema=schema) 223 224 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 225 if isinstance(this, exp.Identifier): 226 table_name = this.name 227 while self._match(TokenType.DASH, advance=False) and self._next: 228 self._advance(2) 229 table_name += f"-{self._prev.text}" 230 231 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 232 233 return this 234 235 def _parse_table_parts(self, schema: bool = False) -> exp.Expression: 236 table = super()._parse_table_parts(schema=schema) 237 if isinstance(table.this, exp.Identifier) and "." in table.name: 238 table = exp.to_table(table.name, dialect="bigquery") 239 return table 240 241 class Generator(generator.Generator): 242 EXPLICIT_UNION = True 243 INTERVAL_ALLOWS_PLURAL_FORM = False 244 JOIN_HINTS = False 245 TABLE_HINTS = False 246 LIMIT_FETCH = "LIMIT" 247 RENAME_TABLE_WITH_DB = False 248 249 TRANSFORMS = { 250 **generator.Generator.TRANSFORMS, # type: ignore 251 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 252 exp.ArraySize: rename_func("ARRAY_LENGTH"), 253 exp.AtTimeZone: lambda self, e: self.func( 254 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 255 ), 256 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 257 exp.DateAdd: _date_add_sql("DATE", "ADD"), 258 exp.DateSub: _date_add_sql("DATE", "SUB"), 259 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 260 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 261 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 262 exp.DateStrToDate: datestrtodate_sql, 263 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 264 exp.GroupConcat: rename_func("STRING_AGG"), 265 exp.ILike: no_ilike_sql, 266 exp.IntDiv: rename_func("DIV"), 267 exp.Max: max_or_greatest, 268 exp.Min: min_or_least, 269 exp.Select: transforms.preprocess( 270 [_unqualify_unnest, transforms.eliminate_distinct_on] 271 ), 272 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 273 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 274 exp.TimeSub: _date_add_sql("TIME", "SUB"), 275 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 276 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 277 exp.TimeStrToTime: timestrtotime_sql, 278 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 279 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 280 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 281 exp.VariancePop: rename_func("VAR_POP"), 282 exp.Values: _derived_table_values_to_unnest, 283 exp.ReturnsProperty: _returnsproperty_sql, 284 exp.Create: _create_sql, 285 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 286 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 287 if e.name == "IMMUTABLE" 288 else "NOT DETERMINISTIC", 289 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 290 } 291 292 TYPE_MAPPING = { 293 **generator.Generator.TYPE_MAPPING, # type: ignore 294 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 295 exp.DataType.Type.BIGINT: "INT64", 296 exp.DataType.Type.BINARY: "BYTES", 297 exp.DataType.Type.BOOLEAN: "BOOL", 298 exp.DataType.Type.CHAR: "STRING", 299 exp.DataType.Type.DECIMAL: "NUMERIC", 300 exp.DataType.Type.DOUBLE: "FLOAT64", 301 exp.DataType.Type.FLOAT: "FLOAT64", 302 exp.DataType.Type.INT: "INT64", 303 exp.DataType.Type.NCHAR: "STRING", 304 exp.DataType.Type.NVARCHAR: "STRING", 305 exp.DataType.Type.SMALLINT: "INT64", 306 exp.DataType.Type.TEXT: "STRING", 307 exp.DataType.Type.TIMESTAMP: "DATETIME", 308 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 309 exp.DataType.Type.TINYINT: "INT64", 310 exp.DataType.Type.VARBINARY: "BYTES", 311 exp.DataType.Type.VARCHAR: "STRING", 312 exp.DataType.Type.VARIANT: "ANY TYPE", 313 } 314 315 PROPERTIES_LOCATION = { 316 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 317 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 318 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 319 } 320 321 def array_sql(self, expression: exp.Array) -> str: 322 first_arg = seq_get(expression.expressions, 0) 323 if isinstance(first_arg, exp.Subqueryable): 324 return f"ARRAY{self.wrap(self.sql(first_arg))}" 325 326 return inline_array_sql(self, expression) 327 328 def transaction_sql(self, *_) -> str: 329 return "BEGIN TRANSACTION" 330 331 def commit_sql(self, *_) -> str: 332 return "COMMIT TRANSACTION" 333 334 def rollback_sql(self, *_) -> str: 335 return "ROLLBACK TRANSACTION" 336 337 def in_unnest_op(self, expression: exp.Unnest) -> str: 338 return self.sql(expression) 339 340 def except_op(self, expression: exp.Except) -> str: 341 if not expression.args.get("distinct", False): 342 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 343 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 344 345 def intersect_op(self, expression: exp.Intersect) -> str: 346 if not expression.args.get("distinct", False): 347 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 348 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 349 350 def with_properties(self, properties: exp.Properties) -> str: 351 return self.properties(properties, prefix=self.seg("OPTIONS"))
123 class Tokenizer(tokens.Tokenizer): 124 QUOTES = [ 125 (prefix + quote, quote) if prefix else quote 126 for quote in ["'", '"', '"""', "'''"] 127 for prefix in ["", "r", "R"] 128 ] 129 COMMENTS = ["--", "#", ("/*", "*/")] 130 IDENTIFIERS = ["`"] 131 STRING_ESCAPES = ["\\"] 132 HEX_STRINGS = [("0x", ""), ("0X", "")] 133 BYTE_STRINGS = [("b'", "'"), ("B'", "'")] 134 135 KEYWORDS = { 136 **tokens.Tokenizer.KEYWORDS, 137 "ANY TYPE": TokenType.VARIANT, 138 "BEGIN": TokenType.COMMAND, 139 "BEGIN TRANSACTION": TokenType.BEGIN, 140 "CURRENT_DATETIME": TokenType.CURRENT_DATETIME, 141 "DECLARE": TokenType.COMMAND, 142 "GEOGRAPHY": TokenType.GEOGRAPHY, 143 "FLOAT64": TokenType.DOUBLE, 144 "INT64": TokenType.BIGINT, 145 "BYTES": TokenType.BINARY, 146 "NOT DETERMINISTIC": TokenType.VOLATILE, 147 "UNKNOWN": TokenType.NULL, 148 } 149 KEYWORDS.pop("DIV")
Inherited Members
151 class Parser(parser.Parser): 152 PREFIXED_PIVOT_COLUMNS = True 153 154 LOG_BASE_FIRST = False 155 LOG_DEFAULTS_TO_LN = True 156 157 FUNCTIONS = { 158 **parser.Parser.FUNCTIONS, # type: ignore 159 "DATE_TRUNC": lambda args: exp.DateTrunc( 160 unit=exp.Literal.string(str(seq_get(args, 1))), 161 this=seq_get(args, 0), 162 ), 163 "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd), 164 "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd), 165 "DIV": lambda args: exp.IntDiv(this=seq_get(args, 0), expression=seq_get(args, 1)), 166 "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list, 167 "REGEXP_EXTRACT": lambda args: exp.RegexpExtract( 168 this=seq_get(args, 0), 169 expression=seq_get(args, 1), 170 position=seq_get(args, 2), 171 occurrence=seq_get(args, 3), 172 group=exp.Literal.number(1) 173 if re.compile(str(seq_get(args, 1))).groups == 1 174 else None, 175 ), 176 "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd), 177 "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd), 178 "DATE_SUB": parse_date_delta_with_interval(exp.DateSub), 179 "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub), 180 "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub), 181 "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub), 182 "PARSE_TIMESTAMP": lambda args: exp.StrToTime( 183 this=seq_get(args, 1), format=seq_get(args, 0) 184 ), 185 } 186 187 FUNCTION_PARSERS = { 188 **parser.Parser.FUNCTION_PARSERS, # type: ignore 189 "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]), 190 } 191 FUNCTION_PARSERS.pop("TRIM") 192 193 NO_PAREN_FUNCTIONS = { 194 **parser.Parser.NO_PAREN_FUNCTIONS, # type: ignore 195 TokenType.CURRENT_DATETIME: exp.CurrentDatetime, 196 } 197 198 NESTED_TYPE_TOKENS = { 199 *parser.Parser.NESTED_TYPE_TOKENS, # type: ignore 200 TokenType.TABLE, 201 } 202 203 ID_VAR_TOKENS = { 204 *parser.Parser.ID_VAR_TOKENS, # type: ignore 205 TokenType.VALUES, 206 } 207 208 PROPERTY_PARSERS = { 209 **parser.Parser.PROPERTY_PARSERS, # type: ignore 210 "NOT DETERMINISTIC": lambda self: self.expression( 211 exp.StabilityProperty, this=exp.Literal.string("VOLATILE") 212 ), 213 "OPTIONS": lambda self: self._parse_with_property(), 214 } 215 216 CONSTRAINT_PARSERS = { 217 **parser.Parser.CONSTRAINT_PARSERS, # type: ignore 218 "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()), 219 } 220 221 def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]: 222 this = super()._parse_table_part(schema=schema) 223 224 # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names 225 if isinstance(this, exp.Identifier): 226 table_name = this.name 227 while self._match(TokenType.DASH, advance=False) and self._next: 228 self._advance(2) 229 table_name += f"-{self._prev.text}" 230 231 this = exp.Identifier(this=table_name, quoted=this.args.get("quoted")) 232 233 return this 234 235 def _parse_table_parts(self, schema: bool = False) -> exp.Expression: 236 table = super()._parse_table_parts(schema=schema) 237 if isinstance(table.this, exp.Identifier) and "." in table.name: 238 table = exp.to_table(table.name, dialect="bigquery") 239 return table
Parser consumes a list of tokens produced by the sqlglot.tokens.Tokenizer
and produces
a parsed syntax tree.
Arguments:
- error_level: the desired error level. Default: ErrorLevel.RAISE
- error_message_context: determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 50.
- index_offset: Index offset for arrays eg ARRAY[0] vs ARRAY[1] as the head of a list. Default: 0
- alias_post_tablesample: If the table alias comes after tablesample. Default: False
- max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
- null_ordering: Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
Inherited Members
241 class Generator(generator.Generator): 242 EXPLICIT_UNION = True 243 INTERVAL_ALLOWS_PLURAL_FORM = False 244 JOIN_HINTS = False 245 TABLE_HINTS = False 246 LIMIT_FETCH = "LIMIT" 247 RENAME_TABLE_WITH_DB = False 248 249 TRANSFORMS = { 250 **generator.Generator.TRANSFORMS, # type: ignore 251 exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"), 252 exp.ArraySize: rename_func("ARRAY_LENGTH"), 253 exp.AtTimeZone: lambda self, e: self.func( 254 "TIMESTAMP", self.func("DATETIME", e.this, e.args.get("zone")) 255 ), 256 exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]), 257 exp.DateAdd: _date_add_sql("DATE", "ADD"), 258 exp.DateSub: _date_add_sql("DATE", "SUB"), 259 exp.DatetimeAdd: _date_add_sql("DATETIME", "ADD"), 260 exp.DatetimeSub: _date_add_sql("DATETIME", "SUB"), 261 exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})", 262 exp.DateStrToDate: datestrtodate_sql, 263 exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")), 264 exp.GroupConcat: rename_func("STRING_AGG"), 265 exp.ILike: no_ilike_sql, 266 exp.IntDiv: rename_func("DIV"), 267 exp.Max: max_or_greatest, 268 exp.Min: min_or_least, 269 exp.Select: transforms.preprocess( 270 [_unqualify_unnest, transforms.eliminate_distinct_on] 271 ), 272 exp.StrToTime: lambda self, e: f"PARSE_TIMESTAMP({self.format_time(e)}, {self.sql(e, 'this')})", 273 exp.TimeAdd: _date_add_sql("TIME", "ADD"), 274 exp.TimeSub: _date_add_sql("TIME", "SUB"), 275 exp.TimestampAdd: _date_add_sql("TIMESTAMP", "ADD"), 276 exp.TimestampSub: _date_add_sql("TIMESTAMP", "SUB"), 277 exp.TimeStrToTime: timestrtotime_sql, 278 exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"), 279 exp.TsOrDsAdd: _date_add_sql("DATE", "ADD"), 280 exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}", 281 exp.VariancePop: rename_func("VAR_POP"), 282 exp.Values: _derived_table_values_to_unnest, 283 exp.ReturnsProperty: _returnsproperty_sql, 284 exp.Create: _create_sql, 285 exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression), 286 exp.StabilityProperty: lambda self, e: f"DETERMINISTIC" 287 if e.name == "IMMUTABLE" 288 else "NOT DETERMINISTIC", 289 exp.RegexpLike: rename_func("REGEXP_CONTAINS"), 290 } 291 292 TYPE_MAPPING = { 293 **generator.Generator.TYPE_MAPPING, # type: ignore 294 exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC", 295 exp.DataType.Type.BIGINT: "INT64", 296 exp.DataType.Type.BINARY: "BYTES", 297 exp.DataType.Type.BOOLEAN: "BOOL", 298 exp.DataType.Type.CHAR: "STRING", 299 exp.DataType.Type.DECIMAL: "NUMERIC", 300 exp.DataType.Type.DOUBLE: "FLOAT64", 301 exp.DataType.Type.FLOAT: "FLOAT64", 302 exp.DataType.Type.INT: "INT64", 303 exp.DataType.Type.NCHAR: "STRING", 304 exp.DataType.Type.NVARCHAR: "STRING", 305 exp.DataType.Type.SMALLINT: "INT64", 306 exp.DataType.Type.TEXT: "STRING", 307 exp.DataType.Type.TIMESTAMP: "DATETIME", 308 exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP", 309 exp.DataType.Type.TINYINT: "INT64", 310 exp.DataType.Type.VARBINARY: "BYTES", 311 exp.DataType.Type.VARCHAR: "STRING", 312 exp.DataType.Type.VARIANT: "ANY TYPE", 313 } 314 315 PROPERTIES_LOCATION = { 316 **generator.Generator.PROPERTIES_LOCATION, # type: ignore 317 exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA, 318 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED, 319 } 320 321 def array_sql(self, expression: exp.Array) -> str: 322 first_arg = seq_get(expression.expressions, 0) 323 if isinstance(first_arg, exp.Subqueryable): 324 return f"ARRAY{self.wrap(self.sql(first_arg))}" 325 326 return inline_array_sql(self, expression) 327 328 def transaction_sql(self, *_) -> str: 329 return "BEGIN TRANSACTION" 330 331 def commit_sql(self, *_) -> str: 332 return "COMMIT TRANSACTION" 333 334 def rollback_sql(self, *_) -> str: 335 return "ROLLBACK TRANSACTION" 336 337 def in_unnest_op(self, expression: exp.Unnest) -> str: 338 return self.sql(expression) 339 340 def except_op(self, expression: exp.Except) -> str: 341 if not expression.args.get("distinct", False): 342 self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery") 343 return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 344 345 def intersect_op(self, expression: exp.Intersect) -> str: 346 if not expression.args.get("distinct", False): 347 self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery") 348 return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}" 349 350 def with_properties(self, properties: exp.Properties) -> str: 351 return self.properties(properties, prefix=self.seg("OPTIONS"))
Generator interprets the given syntax tree and produces a SQL string as an output.
Arguments:
- time_mapping (dict): the dictionary of custom time mappings in which the key represents a python time format and the output the target time format
- time_trie (trie): a trie of the time_mapping keys
- pretty (bool): if set to True the returned string will be formatted. Default: False.
- quote_start (str): specifies which starting character to use to delimit quotes. Default: '.
- quote_end (str): specifies which ending character to use to delimit quotes. Default: '.
- identifier_start (str): specifies which starting character to use to delimit identifiers. Default: ".
- identifier_end (str): specifies which ending character to use to delimit identifiers. Default: ".
- bit_start (str): specifies which starting character to use to delimit bit literals. Default: None.
- bit_end (str): specifies which ending character to use to delimit bit literals. Default: None.
- hex_start (str): specifies which starting character to use to delimit hex literals. Default: None.
- hex_end (str): specifies which ending character to use to delimit hex literals. Default: None.
- byte_start (str): specifies which starting character to use to delimit byte literals. Default: None.
- byte_end (str): specifies which ending character to use to delimit byte literals. Default: None.
- identify (bool | str): 'always': always quote, 'safe': quote identifiers if they don't contain an upcase, True defaults to always.
- normalize (bool): if set to True all identifiers will lower cased
- string_escape (str): specifies a string escape character. Default: '.
- identifier_escape (str): specifies an identifier escape character. Default: ".
- pad (int): determines padding in a formatted string. Default: 2.
- indent (int): determines the size of indentation in a formatted string. Default: 4.
- unnest_column_only (bool): if true unnest table aliases are considered only as column aliases
- normalize_functions (str): normalize function names, "upper", "lower", or None Default: "upper"
- alias_post_tablesample (bool): if the table alias comes after tablesample Default: False
- unsupported_level (ErrorLevel): determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
- null_ordering (str): Indicates the default null ordering method to use if not explicitly set. Options are "nulls_are_small", "nulls_are_large", "nulls_are_last". Default: "nulls_are_small"
- max_unsupported (int): Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
- leading_comma (bool): if the the comma is leading or trailing in select statements Default: False
- max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
- comments: Whether or not to preserve comments in the output SQL code. Default: True
Inherited Members
- sqlglot.generator.Generator
- Generator
- generate
- unsupported
- sep
- seg
- pad_comment
- maybe_comment
- wrap
- no_identify
- normalize_func
- indent
- sql
- uncache_sql
- cache_sql
- characterset_sql
- column_sql
- columnposition_sql
- columndef_sql
- columnconstraint_sql
- autoincrementcolumnconstraint_sql
- compresscolumnconstraint_sql
- generatedasidentitycolumnconstraint_sql
- notnullcolumnconstraint_sql
- primarykeycolumnconstraint_sql
- uniquecolumnconstraint_sql
- create_sql
- clone_sql
- describe_sql
- prepend_ctes
- with_sql
- cte_sql
- tablealias_sql
- bitstring_sql
- hexstring_sql
- bytestring_sql
- datatypesize_sql
- datatype_sql
- directory_sql
- delete_sql
- drop_sql
- except_sql
- fetch_sql
- filter_sql
- hint_sql
- index_sql
- identifier_sql
- inputoutputformat_sql
- national_sql
- partition_sql
- properties_sql
- root_properties
- properties
- locate_properties
- property_sql
- likeproperty_sql
- fallbackproperty_sql
- journalproperty_sql
- freespaceproperty_sql
- afterjournalproperty_sql
- checksumproperty_sql
- mergeblockratioproperty_sql
- datablocksizeproperty_sql
- blockcompressionproperty_sql
- isolatedloadingproperty_sql
- lockingproperty_sql
- withdataproperty_sql
- insert_sql
- intersect_sql
- introducer_sql
- pseudotype_sql
- onconflict_sql
- returning_sql
- rowformatdelimitedproperty_sql
- table_sql
- tablesample_sql
- pivot_sql
- tuple_sql
- update_sql
- values_sql
- var_sql
- into_sql
- from_sql
- group_sql
- having_sql
- join_sql
- lambda_sql
- lateral_sql
- limit_sql
- offset_sql
- setitem_sql
- set_sql
- pragma_sql
- lock_sql
- literal_sql
- loaddata_sql
- null_sql
- boolean_sql
- order_sql
- cluster_sql
- distribute_sql
- sort_sql
- ordered_sql
- matchrecognize_sql
- query_modifiers
- after_having_modifiers
- after_limit_modifiers
- select_sql
- schema_sql
- star_sql
- parameter_sql
- sessionparameter_sql
- placeholder_sql
- subquery_sql
- qualify_sql
- union_sql
- union_op
- unnest_sql
- where_sql
- window_sql
- partition_by_sql
- windowspec_sql
- withingroup_sql
- between_sql
- bracket_sql
- all_sql
- any_sql
- exists_sql
- case_sql
- constraint_sql
- nextvaluefor_sql
- extract_sql
- trim_sql
- concat_sql
- check_sql
- foreignkey_sql
- primarykey_sql
- unique_sql
- if_sql
- matchagainst_sql
- jsonkeyvalue_sql
- jsonobject_sql
- openjsoncolumndef_sql
- openjson_sql
- in_sql
- interval_sql
- return_sql
- reference_sql
- anonymous_sql
- paren_sql
- neg_sql
- not_sql
- alias_sql
- aliases_sql
- attimezone_sql
- add_sql
- and_sql
- connector_sql
- bitwiseand_sql
- bitwiseleftshift_sql
- bitwisenot_sql
- bitwiseor_sql
- bitwiserightshift_sql
- bitwisexor_sql
- cast_sql
- currentdate_sql
- collate_sql
- command_sql
- comment_sql
- mergetreettlaction_sql
- mergetreettl_sql
- altercolumn_sql
- renametable_sql
- altertable_sql
- droppartition_sql
- addconstraint_sql
- distinct_sql
- ignorenulls_sql
- respectnulls_sql
- intdiv_sql
- dpipe_sql
- div_sql
- overlaps_sql
- distance_sql
- dot_sql
- eq_sql
- escape_sql
- glob_sql
- gt_sql
- gte_sql
- ilike_sql
- ilikeany_sql
- is_sql
- like_sql
- likeany_sql
- similarto_sql
- lt_sql
- lte_sql
- mod_sql
- mul_sql
- neq_sql
- nullsafeeq_sql
- nullsafeneq_sql
- or_sql
- slice_sql
- sub_sql
- trycast_sql
- use_sql
- binary
- function_fallback_sql
- func
- format_args
- text_width
- format_time
- expressions
- op_expressions
- naked_property
- set_operation
- tag_sql
- token_sql
- userdefinedfunction_sql
- joinhint_sql
- kwarg_sql
- when_sql
- merge_sql
- tochar_sql