Edit on GitHub

sqlglot.dialects.bigquery

  1from __future__ import annotations
  2
  3import logging
  4import re
  5import typing as t
  6
  7from sqlglot import exp, generator, parser, tokens, transforms
  8from sqlglot._typing import E
  9from sqlglot.dialects.dialect import (
 10    Dialect,
 11    binary_from_function,
 12    date_add_interval_sql,
 13    datestrtodate_sql,
 14    format_time_lambda,
 15    inline_array_sql,
 16    json_keyvalue_comma_sql,
 17    max_or_greatest,
 18    min_or_least,
 19    no_ilike_sql,
 20    parse_date_delta_with_interval,
 21    regexp_replace_sql,
 22    rename_func,
 23    timestrtotime_sql,
 24    ts_or_ds_to_date_sql,
 25)
 26from sqlglot.helper import seq_get, split_num_words
 27from sqlglot.tokens import TokenType
 28
 29logger = logging.getLogger("sqlglot")
 30
 31
 32def _derived_table_values_to_unnest(self: BigQuery.Generator, expression: exp.Values) -> str:
 33    if not expression.find_ancestor(exp.From, exp.Join):
 34        return self.values_sql(expression)
 35
 36    alias = expression.args.get("alias")
 37
 38    structs = [
 39        exp.Struct(
 40            expressions=[
 41                exp.alias_(value, column_name)
 42                for value, column_name in zip(
 43                    t.expressions,
 44                    alias.columns
 45                    if alias and alias.columns
 46                    else (f"_c{i}" for i in range(len(t.expressions))),
 47                )
 48            ]
 49        )
 50        for t in expression.find_all(exp.Tuple)
 51    ]
 52
 53    return self.unnest_sql(exp.Unnest(expressions=[exp.Array(expressions=structs)]))
 54
 55
 56def _returnsproperty_sql(self: BigQuery.Generator, expression: exp.ReturnsProperty) -> str:
 57    this = expression.this
 58    if isinstance(this, exp.Schema):
 59        this = f"{this.this} <{self.expressions(this)}>"
 60    else:
 61        this = self.sql(this)
 62    return f"RETURNS {this}"
 63
 64
 65def _create_sql(self: BigQuery.Generator, expression: exp.Create) -> str:
 66    kind = expression.args["kind"]
 67    returns = expression.find(exp.ReturnsProperty)
 68
 69    if kind.upper() == "FUNCTION" and returns and returns.args.get("is_table"):
 70        expression = expression.copy()
 71        expression.set("kind", "TABLE FUNCTION")
 72
 73        if isinstance(expression.expression, (exp.Subquery, exp.Literal)):
 74            expression.set("expression", expression.expression.this)
 75
 76        return self.create_sql(expression)
 77
 78    return self.create_sql(expression)
 79
 80
 81def _unqualify_unnest(expression: exp.Expression) -> exp.Expression:
 82    """Remove references to unnest table aliases since bigquery doesn't allow them.
 83
 84    These are added by the optimizer's qualify_column step.
 85    """
 86    from sqlglot.optimizer.scope import find_all_in_scope
 87
 88    if isinstance(expression, exp.Select):
 89        unnest_aliases = {
 90            unnest.alias
 91            for unnest in find_all_in_scope(expression, exp.Unnest)
 92            if isinstance(unnest.parent, (exp.From, exp.Join))
 93        }
 94        if unnest_aliases:
 95            for column in expression.find_all(exp.Column):
 96                if column.table in unnest_aliases:
 97                    column.set("table", None)
 98                elif column.db in unnest_aliases:
 99                    column.set("db", None)
100
101    return expression
102
103
104# https://issuetracker.google.com/issues/162294746
105# workaround for bigquery bug when grouping by an expression and then ordering
106# WITH x AS (SELECT 1 y)
107# SELECT y + 1 z
108# FROM x
109# GROUP BY x + 1
110# ORDER by z
111def _alias_ordered_group(expression: exp.Expression) -> exp.Expression:
112    if isinstance(expression, exp.Select):
113        group = expression.args.get("group")
114        order = expression.args.get("order")
115
116        if group and order:
117            aliases = {
118                select.this: select.args["alias"]
119                for select in expression.selects
120                if isinstance(select, exp.Alias)
121            }
122
123            for e in group.expressions:
124                alias = aliases.get(e)
125
126                if alias:
127                    e.replace(exp.column(alias))
128
129    return expression
130
131
132def _pushdown_cte_column_names(expression: exp.Expression) -> exp.Expression:
133    """BigQuery doesn't allow column names when defining a CTE, so we try to push them down."""
134    if isinstance(expression, exp.CTE) and expression.alias_column_names:
135        cte_query = expression.this
136
137        if cte_query.is_star:
138            logger.warning(
139                "Can't push down CTE column names for star queries. Run the query through"
140                " the optimizer or use 'qualify' to expand the star projections first."
141            )
142            return expression
143
144        column_names = expression.alias_column_names
145        expression.args["alias"].set("columns", None)
146
147        for name, select in zip(column_names, cte_query.selects):
148            to_replace = select
149
150            if isinstance(select, exp.Alias):
151                select = select.this
152
153            # Inner aliases are shadowed by the CTE column names
154            to_replace.replace(exp.alias_(select, name))
155
156    return expression
157
158
159def _parse_timestamp(args: t.List) -> exp.StrToTime:
160    this = format_time_lambda(exp.StrToTime, "bigquery")([seq_get(args, 1), seq_get(args, 0)])
161    this.set("zone", seq_get(args, 2))
162    return this
163
164
165def _parse_date(args: t.List) -> exp.Date | exp.DateFromParts:
166    expr_type = exp.DateFromParts if len(args) == 3 else exp.Date
167    return expr_type.from_arg_list(args)
168
169
170def _parse_to_hex(args: t.List) -> exp.Hex | exp.MD5:
171    # TO_HEX(MD5(..)) is common in BigQuery, so it's parsed into MD5 to simplify its transpilation
172    arg = seq_get(args, 0)
173    return exp.MD5(this=arg.this) if isinstance(arg, exp.MD5Digest) else exp.Hex(this=arg)
174
175
176class BigQuery(Dialect):
177    UNNEST_COLUMN_ONLY = True
178    SUPPORTS_USER_DEFINED_TYPES = False
179
180    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
181    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
182
183    # bigquery udfs are case sensitive
184    NORMALIZE_FUNCTIONS = False
185
186    TIME_MAPPING = {
187        "%D": "%m/%d/%y",
188    }
189
190    FORMAT_MAPPING = {
191        "DD": "%d",
192        "MM": "%m",
193        "MON": "%b",
194        "MONTH": "%B",
195        "YYYY": "%Y",
196        "YY": "%y",
197        "HH": "%I",
198        "HH12": "%I",
199        "HH24": "%H",
200        "MI": "%M",
201        "SS": "%S",
202        "SSSSS": "%f",
203        "TZH": "%z",
204    }
205
206    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
207    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
208    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
209
210    @classmethod
211    def normalize_identifier(cls, expression: E) -> E:
212        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
213        # The following check is essentially a heuristic to detect tables based on whether or
214        # not they're qualified.
215        if isinstance(expression, exp.Identifier):
216            parent = expression.parent
217
218            while isinstance(parent, exp.Dot):
219                parent = parent.parent
220
221            if (
222                not isinstance(parent, exp.UserDefinedFunction)
223                and not (isinstance(parent, exp.Table) and parent.db)
224                and not expression.meta.get("is_table")
225            ):
226                expression.set("this", expression.this.lower())
227
228        return expression
229
230    class Tokenizer(tokens.Tokenizer):
231        QUOTES = ["'", '"', '"""', "'''"]
232        COMMENTS = ["--", "#", ("/*", "*/")]
233        IDENTIFIERS = ["`"]
234        STRING_ESCAPES = ["\\"]
235
236        HEX_STRINGS = [("0x", ""), ("0X", "")]
237
238        BYTE_STRINGS = [
239            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
240        ]
241
242        RAW_STRINGS = [
243            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
244        ]
245
246        KEYWORDS = {
247            **tokens.Tokenizer.KEYWORDS,
248            "ANY TYPE": TokenType.VARIANT,
249            "BEGIN": TokenType.COMMAND,
250            "BEGIN TRANSACTION": TokenType.BEGIN,
251            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
252            "BYTES": TokenType.BINARY,
253            "DECLARE": TokenType.COMMAND,
254            "FLOAT64": TokenType.DOUBLE,
255            "INT64": TokenType.BIGINT,
256            "RECORD": TokenType.STRUCT,
257            "TIMESTAMP": TokenType.TIMESTAMPTZ,
258            "NOT DETERMINISTIC": TokenType.VOLATILE,
259            "UNKNOWN": TokenType.NULL,
260            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
261        }
262        KEYWORDS.pop("DIV")
263
264    class Parser(parser.Parser):
265        PREFIXED_PIVOT_COLUMNS = True
266
267        LOG_BASE_FIRST = False
268        LOG_DEFAULTS_TO_LN = True
269
270        FUNCTIONS = {
271            **parser.Parser.FUNCTIONS,
272            "DATE": _parse_date,
273            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
274            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
275            "DATE_TRUNC": lambda args: exp.DateTrunc(
276                unit=exp.Literal.string(str(seq_get(args, 1))),
277                this=seq_get(args, 0),
278            ),
279            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
280            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
281            "DIV": binary_from_function(exp.IntDiv),
282            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
283            "MD5": exp.MD5Digest.from_arg_list,
284            "TO_HEX": _parse_to_hex,
285            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
286                [seq_get(args, 1), seq_get(args, 0)]
287            ),
288            "PARSE_TIMESTAMP": _parse_timestamp,
289            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
290            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
291                this=seq_get(args, 0),
292                expression=seq_get(args, 1),
293                position=seq_get(args, 2),
294                occurrence=seq_get(args, 3),
295                group=exp.Literal.number(1)
296                if re.compile(str(seq_get(args, 1))).groups == 1
297                else None,
298            ),
299            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
300            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
301            "SPLIT": lambda args: exp.Split(
302                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
303                this=seq_get(args, 0),
304                expression=seq_get(args, 1) or exp.Literal.string(","),
305            ),
306            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
307            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
308            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
309            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
310            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
311        }
312
313        FUNCTION_PARSERS = {
314            **parser.Parser.FUNCTION_PARSERS,
315            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
316        }
317        FUNCTION_PARSERS.pop("TRIM")
318
319        NO_PAREN_FUNCTIONS = {
320            **parser.Parser.NO_PAREN_FUNCTIONS,
321            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
322        }
323
324        NESTED_TYPE_TOKENS = {
325            *parser.Parser.NESTED_TYPE_TOKENS,
326            TokenType.TABLE,
327        }
328
329        ID_VAR_TOKENS = {
330            *parser.Parser.ID_VAR_TOKENS,
331            TokenType.VALUES,
332        }
333
334        PROPERTY_PARSERS = {
335            **parser.Parser.PROPERTY_PARSERS,
336            "NOT DETERMINISTIC": lambda self: self.expression(
337                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
338            ),
339            "OPTIONS": lambda self: self._parse_with_property(),
340        }
341
342        CONSTRAINT_PARSERS = {
343            **parser.Parser.CONSTRAINT_PARSERS,
344            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
345        }
346
347        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
348            this = super()._parse_table_part(schema=schema) or self._parse_number()
349
350            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
351            if isinstance(this, exp.Identifier):
352                table_name = this.name
353                while self._match(TokenType.DASH, advance=False) and self._next:
354                    self._advance(2)
355                    table_name += f"-{self._prev.text}"
356
357                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
358            elif isinstance(this, exp.Literal):
359                table_name = this.name
360
361                if (
362                    self._curr
363                    and self._prev.end == self._curr.start - 1
364                    and self._parse_var(any_token=True)
365                ):
366                    table_name += self._prev.text
367
368                this = exp.Identifier(this=table_name, quoted=True)
369
370            return this
371
372        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
373            table = super()._parse_table_parts(schema=schema)
374            if isinstance(table.this, exp.Identifier) and "." in table.name:
375                catalog, db, this, *rest = (
376                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
377                    for x in split_num_words(table.name, ".", 3)
378                )
379
380                if rest and this:
381                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
382
383                table = exp.Table(this=this, db=db, catalog=catalog)
384
385            return table
386
387        def _parse_json_object(self) -> exp.JSONObject:
388            json_object = super()._parse_json_object()
389            array_kv_pair = seq_get(json_object.expressions, 0)
390
391            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
392            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
393            if (
394                array_kv_pair
395                and isinstance(array_kv_pair.this, exp.Array)
396                and isinstance(array_kv_pair.expression, exp.Array)
397            ):
398                keys = array_kv_pair.this.expressions
399                values = array_kv_pair.expression.expressions
400
401                json_object.set(
402                    "expressions",
403                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
404                )
405
406            return json_object
407
408    class Generator(generator.Generator):
409        EXPLICIT_UNION = True
410        INTERVAL_ALLOWS_PLURAL_FORM = False
411        JOIN_HINTS = False
412        QUERY_HINTS = False
413        TABLE_HINTS = False
414        LIMIT_FETCH = "LIMIT"
415        RENAME_TABLE_WITH_DB = False
416        ESCAPE_LINE_BREAK = True
417        NVL2_SUPPORTED = False
418
419        TRANSFORMS = {
420            **generator.Generator.TRANSFORMS,
421            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
422            exp.ArraySize: rename_func("ARRAY_LENGTH"),
423            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
424            exp.Create: _create_sql,
425            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
426            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
427            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
428            exp.DateFromParts: rename_func("DATE"),
429            exp.DateStrToDate: datestrtodate_sql,
430            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
431            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
432            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
433            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
434            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
435            exp.GroupConcat: rename_func("STRING_AGG"),
436            exp.Hex: rename_func("TO_HEX"),
437            exp.ILike: no_ilike_sql,
438            exp.IntDiv: rename_func("DIV"),
439            exp.JSONFormat: rename_func("TO_JSON_STRING"),
440            exp.JSONKeyValue: json_keyvalue_comma_sql,
441            exp.Max: max_or_greatest,
442            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
443            exp.MD5Digest: rename_func("MD5"),
444            exp.Min: min_or_least,
445            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
446            exp.RegexpExtract: lambda self, e: self.func(
447                "REGEXP_EXTRACT",
448                e.this,
449                e.expression,
450                e.args.get("position"),
451                e.args.get("occurrence"),
452            ),
453            exp.RegexpReplace: regexp_replace_sql,
454            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
455            exp.ReturnsProperty: _returnsproperty_sql,
456            exp.Select: transforms.preprocess(
457                [
458                    transforms.explode_to_unnest,
459                    _unqualify_unnest,
460                    transforms.eliminate_distinct_on,
461                    _alias_ordered_group,
462                ]
463            ),
464            exp.SHA2: lambda self, e: self.func(
465                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
466            ),
467            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
468            if e.name == "IMMUTABLE"
469            else "NOT DETERMINISTIC",
470            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
471            exp.StrToTime: lambda self, e: self.func(
472                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
473            ),
474            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
475            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
476            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
477            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
478            exp.TimeStrToTime: timestrtotime_sql,
479            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
480            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
481            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
482            exp.Unhex: rename_func("FROM_HEX"),
483            exp.Values: _derived_table_values_to_unnest,
484            exp.VariancePop: rename_func("VAR_POP"),
485        }
486
487        TYPE_MAPPING = {
488            **generator.Generator.TYPE_MAPPING,
489            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
490            exp.DataType.Type.BIGINT: "INT64",
491            exp.DataType.Type.BINARY: "BYTES",
492            exp.DataType.Type.BOOLEAN: "BOOL",
493            exp.DataType.Type.CHAR: "STRING",
494            exp.DataType.Type.DECIMAL: "NUMERIC",
495            exp.DataType.Type.DOUBLE: "FLOAT64",
496            exp.DataType.Type.FLOAT: "FLOAT64",
497            exp.DataType.Type.INT: "INT64",
498            exp.DataType.Type.NCHAR: "STRING",
499            exp.DataType.Type.NVARCHAR: "STRING",
500            exp.DataType.Type.SMALLINT: "INT64",
501            exp.DataType.Type.TEXT: "STRING",
502            exp.DataType.Type.TIMESTAMP: "DATETIME",
503            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
504            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
505            exp.DataType.Type.TINYINT: "INT64",
506            exp.DataType.Type.VARBINARY: "BYTES",
507            exp.DataType.Type.VARCHAR: "STRING",
508            exp.DataType.Type.VARIANT: "ANY TYPE",
509        }
510
511        PROPERTIES_LOCATION = {
512            **generator.Generator.PROPERTIES_LOCATION,
513            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
514            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
515        }
516
517        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
518        RESERVED_KEYWORDS = {
519            *generator.Generator.RESERVED_KEYWORDS,
520            "all",
521            "and",
522            "any",
523            "array",
524            "as",
525            "asc",
526            "assert_rows_modified",
527            "at",
528            "between",
529            "by",
530            "case",
531            "cast",
532            "collate",
533            "contains",
534            "create",
535            "cross",
536            "cube",
537            "current",
538            "default",
539            "define",
540            "desc",
541            "distinct",
542            "else",
543            "end",
544            "enum",
545            "escape",
546            "except",
547            "exclude",
548            "exists",
549            "extract",
550            "false",
551            "fetch",
552            "following",
553            "for",
554            "from",
555            "full",
556            "group",
557            "grouping",
558            "groups",
559            "hash",
560            "having",
561            "if",
562            "ignore",
563            "in",
564            "inner",
565            "intersect",
566            "interval",
567            "into",
568            "is",
569            "join",
570            "lateral",
571            "left",
572            "like",
573            "limit",
574            "lookup",
575            "merge",
576            "natural",
577            "new",
578            "no",
579            "not",
580            "null",
581            "nulls",
582            "of",
583            "on",
584            "or",
585            "order",
586            "outer",
587            "over",
588            "partition",
589            "preceding",
590            "proto",
591            "qualify",
592            "range",
593            "recursive",
594            "respect",
595            "right",
596            "rollup",
597            "rows",
598            "select",
599            "set",
600            "some",
601            "struct",
602            "tablesample",
603            "then",
604            "to",
605            "treat",
606            "true",
607            "unbounded",
608            "union",
609            "unnest",
610            "using",
611            "when",
612            "where",
613            "window",
614            "with",
615            "within",
616        }
617
618        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
619            parent = expression.parent
620
621            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
622            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
623            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
624                return self.func(
625                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
626                )
627
628            return super().attimezone_sql(expression)
629
630        def trycast_sql(self, expression: exp.TryCast) -> str:
631            return self.cast_sql(expression, safe_prefix="SAFE_")
632
633        def cte_sql(self, expression: exp.CTE) -> str:
634            if expression.alias_column_names:
635                self.unsupported("Column names in CTE definition are not supported.")
636            return super().cte_sql(expression)
637
638        def array_sql(self, expression: exp.Array) -> str:
639            first_arg = seq_get(expression.expressions, 0)
640            if isinstance(first_arg, exp.Subqueryable):
641                return f"ARRAY{self.wrap(self.sql(first_arg))}"
642
643            return inline_array_sql(self, expression)
644
645        def transaction_sql(self, *_) -> str:
646            return "BEGIN TRANSACTION"
647
648        def commit_sql(self, *_) -> str:
649            return "COMMIT TRANSACTION"
650
651        def rollback_sql(self, *_) -> str:
652            return "ROLLBACK TRANSACTION"
653
654        def in_unnest_op(self, expression: exp.Unnest) -> str:
655            return self.sql(expression)
656
657        def except_op(self, expression: exp.Except) -> str:
658            if not expression.args.get("distinct", False):
659                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
660            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
661
662        def intersect_op(self, expression: exp.Intersect) -> str:
663            if not expression.args.get("distinct", False):
664                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
665            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
666
667        def with_properties(self, properties: exp.Properties) -> str:
668            return self.properties(properties, prefix=self.seg("OPTIONS"))
669
670        def version_sql(self, expression: exp.Version) -> str:
671            if expression.name == "TIMESTAMP":
672                expression = expression.copy()
673                expression.set("this", "SYSTEM_TIME")
674            return super().version_sql(expression)
logger = <Logger sqlglot (WARNING)>
class BigQuery(sqlglot.dialects.dialect.Dialect):
177class BigQuery(Dialect):
178    UNNEST_COLUMN_ONLY = True
179    SUPPORTS_USER_DEFINED_TYPES = False
180
181    # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#case_sensitivity
182    RESOLVES_IDENTIFIERS_AS_UPPERCASE = None
183
184    # bigquery udfs are case sensitive
185    NORMALIZE_FUNCTIONS = False
186
187    TIME_MAPPING = {
188        "%D": "%m/%d/%y",
189    }
190
191    FORMAT_MAPPING = {
192        "DD": "%d",
193        "MM": "%m",
194        "MON": "%b",
195        "MONTH": "%B",
196        "YYYY": "%Y",
197        "YY": "%y",
198        "HH": "%I",
199        "HH12": "%I",
200        "HH24": "%H",
201        "MI": "%M",
202        "SS": "%S",
203        "SSSSS": "%f",
204        "TZH": "%z",
205    }
206
207    # The _PARTITIONTIME and _PARTITIONDATE pseudo-columns are not returned by a SELECT * statement
208    # https://cloud.google.com/bigquery/docs/querying-partitioned-tables#query_an_ingestion-time_partitioned_table
209    PSEUDOCOLUMNS = {"_PARTITIONTIME", "_PARTITIONDATE"}
210
211    @classmethod
212    def normalize_identifier(cls, expression: E) -> E:
213        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
214        # The following check is essentially a heuristic to detect tables based on whether or
215        # not they're qualified.
216        if isinstance(expression, exp.Identifier):
217            parent = expression.parent
218
219            while isinstance(parent, exp.Dot):
220                parent = parent.parent
221
222            if (
223                not isinstance(parent, exp.UserDefinedFunction)
224                and not (isinstance(parent, exp.Table) and parent.db)
225                and not expression.meta.get("is_table")
226            ):
227                expression.set("this", expression.this.lower())
228
229        return expression
230
231    class Tokenizer(tokens.Tokenizer):
232        QUOTES = ["'", '"', '"""', "'''"]
233        COMMENTS = ["--", "#", ("/*", "*/")]
234        IDENTIFIERS = ["`"]
235        STRING_ESCAPES = ["\\"]
236
237        HEX_STRINGS = [("0x", ""), ("0X", "")]
238
239        BYTE_STRINGS = [
240            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
241        ]
242
243        RAW_STRINGS = [
244            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
245        ]
246
247        KEYWORDS = {
248            **tokens.Tokenizer.KEYWORDS,
249            "ANY TYPE": TokenType.VARIANT,
250            "BEGIN": TokenType.COMMAND,
251            "BEGIN TRANSACTION": TokenType.BEGIN,
252            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
253            "BYTES": TokenType.BINARY,
254            "DECLARE": TokenType.COMMAND,
255            "FLOAT64": TokenType.DOUBLE,
256            "INT64": TokenType.BIGINT,
257            "RECORD": TokenType.STRUCT,
258            "TIMESTAMP": TokenType.TIMESTAMPTZ,
259            "NOT DETERMINISTIC": TokenType.VOLATILE,
260            "UNKNOWN": TokenType.NULL,
261            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
262        }
263        KEYWORDS.pop("DIV")
264
265    class Parser(parser.Parser):
266        PREFIXED_PIVOT_COLUMNS = True
267
268        LOG_BASE_FIRST = False
269        LOG_DEFAULTS_TO_LN = True
270
271        FUNCTIONS = {
272            **parser.Parser.FUNCTIONS,
273            "DATE": _parse_date,
274            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
275            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
276            "DATE_TRUNC": lambda args: exp.DateTrunc(
277                unit=exp.Literal.string(str(seq_get(args, 1))),
278                this=seq_get(args, 0),
279            ),
280            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
281            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
282            "DIV": binary_from_function(exp.IntDiv),
283            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
284            "MD5": exp.MD5Digest.from_arg_list,
285            "TO_HEX": _parse_to_hex,
286            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
287                [seq_get(args, 1), seq_get(args, 0)]
288            ),
289            "PARSE_TIMESTAMP": _parse_timestamp,
290            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
291            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
292                this=seq_get(args, 0),
293                expression=seq_get(args, 1),
294                position=seq_get(args, 2),
295                occurrence=seq_get(args, 3),
296                group=exp.Literal.number(1)
297                if re.compile(str(seq_get(args, 1))).groups == 1
298                else None,
299            ),
300            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
301            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
302            "SPLIT": lambda args: exp.Split(
303                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1) or exp.Literal.string(","),
306            ),
307            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
308            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
309            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
310            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
311            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
312        }
313
314        FUNCTION_PARSERS = {
315            **parser.Parser.FUNCTION_PARSERS,
316            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
317        }
318        FUNCTION_PARSERS.pop("TRIM")
319
320        NO_PAREN_FUNCTIONS = {
321            **parser.Parser.NO_PAREN_FUNCTIONS,
322            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
323        }
324
325        NESTED_TYPE_TOKENS = {
326            *parser.Parser.NESTED_TYPE_TOKENS,
327            TokenType.TABLE,
328        }
329
330        ID_VAR_TOKENS = {
331            *parser.Parser.ID_VAR_TOKENS,
332            TokenType.VALUES,
333        }
334
335        PROPERTY_PARSERS = {
336            **parser.Parser.PROPERTY_PARSERS,
337            "NOT DETERMINISTIC": lambda self: self.expression(
338                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
339            ),
340            "OPTIONS": lambda self: self._parse_with_property(),
341        }
342
343        CONSTRAINT_PARSERS = {
344            **parser.Parser.CONSTRAINT_PARSERS,
345            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
346        }
347
348        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
349            this = super()._parse_table_part(schema=schema) or self._parse_number()
350
351            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
352            if isinstance(this, exp.Identifier):
353                table_name = this.name
354                while self._match(TokenType.DASH, advance=False) and self._next:
355                    self._advance(2)
356                    table_name += f"-{self._prev.text}"
357
358                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
359            elif isinstance(this, exp.Literal):
360                table_name = this.name
361
362                if (
363                    self._curr
364                    and self._prev.end == self._curr.start - 1
365                    and self._parse_var(any_token=True)
366                ):
367                    table_name += self._prev.text
368
369                this = exp.Identifier(this=table_name, quoted=True)
370
371            return this
372
373        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
374            table = super()._parse_table_parts(schema=schema)
375            if isinstance(table.this, exp.Identifier) and "." in table.name:
376                catalog, db, this, *rest = (
377                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
378                    for x in split_num_words(table.name, ".", 3)
379                )
380
381                if rest and this:
382                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
383
384                table = exp.Table(this=this, db=db, catalog=catalog)
385
386            return table
387
388        def _parse_json_object(self) -> exp.JSONObject:
389            json_object = super()._parse_json_object()
390            array_kv_pair = seq_get(json_object.expressions, 0)
391
392            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
393            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
394            if (
395                array_kv_pair
396                and isinstance(array_kv_pair.this, exp.Array)
397                and isinstance(array_kv_pair.expression, exp.Array)
398            ):
399                keys = array_kv_pair.this.expressions
400                values = array_kv_pair.expression.expressions
401
402                json_object.set(
403                    "expressions",
404                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
405                )
406
407            return json_object
408
409    class Generator(generator.Generator):
410        EXPLICIT_UNION = True
411        INTERVAL_ALLOWS_PLURAL_FORM = False
412        JOIN_HINTS = False
413        QUERY_HINTS = False
414        TABLE_HINTS = False
415        LIMIT_FETCH = "LIMIT"
416        RENAME_TABLE_WITH_DB = False
417        ESCAPE_LINE_BREAK = True
418        NVL2_SUPPORTED = False
419
420        TRANSFORMS = {
421            **generator.Generator.TRANSFORMS,
422            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
423            exp.ArraySize: rename_func("ARRAY_LENGTH"),
424            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
425            exp.Create: _create_sql,
426            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
427            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
428            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
429            exp.DateFromParts: rename_func("DATE"),
430            exp.DateStrToDate: datestrtodate_sql,
431            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
432            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
433            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
434            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
435            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
436            exp.GroupConcat: rename_func("STRING_AGG"),
437            exp.Hex: rename_func("TO_HEX"),
438            exp.ILike: no_ilike_sql,
439            exp.IntDiv: rename_func("DIV"),
440            exp.JSONFormat: rename_func("TO_JSON_STRING"),
441            exp.JSONKeyValue: json_keyvalue_comma_sql,
442            exp.Max: max_or_greatest,
443            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
444            exp.MD5Digest: rename_func("MD5"),
445            exp.Min: min_or_least,
446            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
447            exp.RegexpExtract: lambda self, e: self.func(
448                "REGEXP_EXTRACT",
449                e.this,
450                e.expression,
451                e.args.get("position"),
452                e.args.get("occurrence"),
453            ),
454            exp.RegexpReplace: regexp_replace_sql,
455            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
456            exp.ReturnsProperty: _returnsproperty_sql,
457            exp.Select: transforms.preprocess(
458                [
459                    transforms.explode_to_unnest,
460                    _unqualify_unnest,
461                    transforms.eliminate_distinct_on,
462                    _alias_ordered_group,
463                ]
464            ),
465            exp.SHA2: lambda self, e: self.func(
466                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
467            ),
468            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
469            if e.name == "IMMUTABLE"
470            else "NOT DETERMINISTIC",
471            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
472            exp.StrToTime: lambda self, e: self.func(
473                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
474            ),
475            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
476            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
477            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
478            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
479            exp.TimeStrToTime: timestrtotime_sql,
480            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
481            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
482            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
483            exp.Unhex: rename_func("FROM_HEX"),
484            exp.Values: _derived_table_values_to_unnest,
485            exp.VariancePop: rename_func("VAR_POP"),
486        }
487
488        TYPE_MAPPING = {
489            **generator.Generator.TYPE_MAPPING,
490            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
491            exp.DataType.Type.BIGINT: "INT64",
492            exp.DataType.Type.BINARY: "BYTES",
493            exp.DataType.Type.BOOLEAN: "BOOL",
494            exp.DataType.Type.CHAR: "STRING",
495            exp.DataType.Type.DECIMAL: "NUMERIC",
496            exp.DataType.Type.DOUBLE: "FLOAT64",
497            exp.DataType.Type.FLOAT: "FLOAT64",
498            exp.DataType.Type.INT: "INT64",
499            exp.DataType.Type.NCHAR: "STRING",
500            exp.DataType.Type.NVARCHAR: "STRING",
501            exp.DataType.Type.SMALLINT: "INT64",
502            exp.DataType.Type.TEXT: "STRING",
503            exp.DataType.Type.TIMESTAMP: "DATETIME",
504            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
505            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
506            exp.DataType.Type.TINYINT: "INT64",
507            exp.DataType.Type.VARBINARY: "BYTES",
508            exp.DataType.Type.VARCHAR: "STRING",
509            exp.DataType.Type.VARIANT: "ANY TYPE",
510        }
511
512        PROPERTIES_LOCATION = {
513            **generator.Generator.PROPERTIES_LOCATION,
514            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
515            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
516        }
517
518        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
519        RESERVED_KEYWORDS = {
520            *generator.Generator.RESERVED_KEYWORDS,
521            "all",
522            "and",
523            "any",
524            "array",
525            "as",
526            "asc",
527            "assert_rows_modified",
528            "at",
529            "between",
530            "by",
531            "case",
532            "cast",
533            "collate",
534            "contains",
535            "create",
536            "cross",
537            "cube",
538            "current",
539            "default",
540            "define",
541            "desc",
542            "distinct",
543            "else",
544            "end",
545            "enum",
546            "escape",
547            "except",
548            "exclude",
549            "exists",
550            "extract",
551            "false",
552            "fetch",
553            "following",
554            "for",
555            "from",
556            "full",
557            "group",
558            "grouping",
559            "groups",
560            "hash",
561            "having",
562            "if",
563            "ignore",
564            "in",
565            "inner",
566            "intersect",
567            "interval",
568            "into",
569            "is",
570            "join",
571            "lateral",
572            "left",
573            "like",
574            "limit",
575            "lookup",
576            "merge",
577            "natural",
578            "new",
579            "no",
580            "not",
581            "null",
582            "nulls",
583            "of",
584            "on",
585            "or",
586            "order",
587            "outer",
588            "over",
589            "partition",
590            "preceding",
591            "proto",
592            "qualify",
593            "range",
594            "recursive",
595            "respect",
596            "right",
597            "rollup",
598            "rows",
599            "select",
600            "set",
601            "some",
602            "struct",
603            "tablesample",
604            "then",
605            "to",
606            "treat",
607            "true",
608            "unbounded",
609            "union",
610            "unnest",
611            "using",
612            "when",
613            "where",
614            "window",
615            "with",
616            "within",
617        }
618
619        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
620            parent = expression.parent
621
622            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
623            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
624            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
625                return self.func(
626                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
627                )
628
629            return super().attimezone_sql(expression)
630
631        def trycast_sql(self, expression: exp.TryCast) -> str:
632            return self.cast_sql(expression, safe_prefix="SAFE_")
633
634        def cte_sql(self, expression: exp.CTE) -> str:
635            if expression.alias_column_names:
636                self.unsupported("Column names in CTE definition are not supported.")
637            return super().cte_sql(expression)
638
639        def array_sql(self, expression: exp.Array) -> str:
640            first_arg = seq_get(expression.expressions, 0)
641            if isinstance(first_arg, exp.Subqueryable):
642                return f"ARRAY{self.wrap(self.sql(first_arg))}"
643
644            return inline_array_sql(self, expression)
645
646        def transaction_sql(self, *_) -> str:
647            return "BEGIN TRANSACTION"
648
649        def commit_sql(self, *_) -> str:
650            return "COMMIT TRANSACTION"
651
652        def rollback_sql(self, *_) -> str:
653            return "ROLLBACK TRANSACTION"
654
655        def in_unnest_op(self, expression: exp.Unnest) -> str:
656            return self.sql(expression)
657
658        def except_op(self, expression: exp.Except) -> str:
659            if not expression.args.get("distinct", False):
660                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
661            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
662
663        def intersect_op(self, expression: exp.Intersect) -> str:
664            if not expression.args.get("distinct", False):
665                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
666            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
667
668        def with_properties(self, properties: exp.Properties) -> str:
669            return self.properties(properties, prefix=self.seg("OPTIONS"))
670
671        def version_sql(self, expression: exp.Version) -> str:
672            if expression.name == "TIMESTAMP":
673                expression = expression.copy()
674                expression.set("this", "SYSTEM_TIME")
675            return super().version_sql(expression)
UNNEST_COLUMN_ONLY = True
SUPPORTS_USER_DEFINED_TYPES = False
RESOLVES_IDENTIFIERS_AS_UPPERCASE: Optional[bool] = None
NORMALIZE_FUNCTIONS: bool | str = False
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
PSEUDOCOLUMNS: Set[str] = {'_PARTITIONDATE', '_PARTITIONTIME'}
@classmethod
def normalize_identifier(cls, expression: ~E) -> ~E:
211    @classmethod
212    def normalize_identifier(cls, expression: E) -> E:
213        # In BigQuery, CTEs aren't case-sensitive, but table names are (by default, at least).
214        # The following check is essentially a heuristic to detect tables based on whether or
215        # not they're qualified.
216        if isinstance(expression, exp.Identifier):
217            parent = expression.parent
218
219            while isinstance(parent, exp.Dot):
220                parent = parent.parent
221
222            if (
223                not isinstance(parent, exp.UserDefinedFunction)
224                and not (isinstance(parent, exp.Table) and parent.db)
225                and not expression.meta.get("is_table")
226            ):
227                expression.set("this", expression.this.lower())
228
229        return expression

Normalizes an unquoted identifier to either lower or upper case, thus essentially making it case-insensitive. If a dialect treats all identifiers as case-insensitive, they will be normalized regardless of being quoted or not.

tokenizer_class = <class 'BigQuery.Tokenizer'>
parser_class = <class 'BigQuery.Parser'>
generator_class = <class 'BigQuery.Generator'>
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
BIT_START = None
BIT_END = None
HEX_START = '0x'
HEX_END = ''
BYTE_START = "b'"
BYTE_END = "'"
class BigQuery.Tokenizer(sqlglot.tokens.Tokenizer):
231    class Tokenizer(tokens.Tokenizer):
232        QUOTES = ["'", '"', '"""', "'''"]
233        COMMENTS = ["--", "#", ("/*", "*/")]
234        IDENTIFIERS = ["`"]
235        STRING_ESCAPES = ["\\"]
236
237        HEX_STRINGS = [("0x", ""), ("0X", "")]
238
239        BYTE_STRINGS = [
240            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("b", "B")
241        ]
242
243        RAW_STRINGS = [
244            (prefix + q, q) for q in t.cast(t.List[str], QUOTES) for prefix in ("r", "R")
245        ]
246
247        KEYWORDS = {
248            **tokens.Tokenizer.KEYWORDS,
249            "ANY TYPE": TokenType.VARIANT,
250            "BEGIN": TokenType.COMMAND,
251            "BEGIN TRANSACTION": TokenType.BEGIN,
252            "CURRENT_DATETIME": TokenType.CURRENT_DATETIME,
253            "BYTES": TokenType.BINARY,
254            "DECLARE": TokenType.COMMAND,
255            "FLOAT64": TokenType.DOUBLE,
256            "INT64": TokenType.BIGINT,
257            "RECORD": TokenType.STRUCT,
258            "TIMESTAMP": TokenType.TIMESTAMPTZ,
259            "NOT DETERMINISTIC": TokenType.VOLATILE,
260            "UNKNOWN": TokenType.NULL,
261            "FOR SYSTEM_TIME": TokenType.TIMESTAMP_SNAPSHOT,
262        }
263        KEYWORDS.pop("DIV")
QUOTES = ["'", '"', '"""', "'''"]
COMMENTS = ['--', '#', ('/*', '*/')]
IDENTIFIERS = ['`']
STRING_ESCAPES = ['\\']
HEX_STRINGS = [('0x', ''), ('0X', '')]
BYTE_STRINGS = [("b'", "'"), ("B'", "'"), ('b"', '"'), ('B"', '"'), ('b"""', '"""'), ('B"""', '"""'), ("b'''", "'''"), ("B'''", "'''")]
RAW_STRINGS = [("r'", "'"), ("R'", "'"), ('r"', '"'), ('R"', '"'), ('r"""', '"""'), ('R"""', '"""'), ("r'''", "'''"), ("R'''", "'''")]
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '/*+': <TokenType.HINT: 'HINT'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.COMMAND: 'COMMAND'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.NULL: 'NULL'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.BIGINT: 'BIGINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'COPY': <TokenType.COMMAND: 'COMMAND'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'TRUNCATE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'ANY TYPE': <TokenType.VARIANT: 'VARIANT'>, 'BEGIN TRANSACTION': <TokenType.BEGIN: 'BEGIN'>, 'CURRENT_DATETIME': <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, 'BYTES': <TokenType.BINARY: 'BINARY'>, 'DECLARE': <TokenType.COMMAND: 'COMMAND'>, 'FLOAT64': <TokenType.DOUBLE: 'DOUBLE'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'RECORD': <TokenType.STRUCT: 'STRUCT'>, 'NOT DETERMINISTIC': <TokenType.VOLATILE: 'VOLATILE'>, 'FOR SYSTEM_TIME': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>}
class BigQuery.Parser(sqlglot.parser.Parser):
265    class Parser(parser.Parser):
266        PREFIXED_PIVOT_COLUMNS = True
267
268        LOG_BASE_FIRST = False
269        LOG_DEFAULTS_TO_LN = True
270
271        FUNCTIONS = {
272            **parser.Parser.FUNCTIONS,
273            "DATE": _parse_date,
274            "DATE_ADD": parse_date_delta_with_interval(exp.DateAdd),
275            "DATE_SUB": parse_date_delta_with_interval(exp.DateSub),
276            "DATE_TRUNC": lambda args: exp.DateTrunc(
277                unit=exp.Literal.string(str(seq_get(args, 1))),
278                this=seq_get(args, 0),
279            ),
280            "DATETIME_ADD": parse_date_delta_with_interval(exp.DatetimeAdd),
281            "DATETIME_SUB": parse_date_delta_with_interval(exp.DatetimeSub),
282            "DIV": binary_from_function(exp.IntDiv),
283            "GENERATE_ARRAY": exp.GenerateSeries.from_arg_list,
284            "MD5": exp.MD5Digest.from_arg_list,
285            "TO_HEX": _parse_to_hex,
286            "PARSE_DATE": lambda args: format_time_lambda(exp.StrToDate, "bigquery")(
287                [seq_get(args, 1), seq_get(args, 0)]
288            ),
289            "PARSE_TIMESTAMP": _parse_timestamp,
290            "REGEXP_CONTAINS": exp.RegexpLike.from_arg_list,
291            "REGEXP_EXTRACT": lambda args: exp.RegexpExtract(
292                this=seq_get(args, 0),
293                expression=seq_get(args, 1),
294                position=seq_get(args, 2),
295                occurrence=seq_get(args, 3),
296                group=exp.Literal.number(1)
297                if re.compile(str(seq_get(args, 1))).groups == 1
298                else None,
299            ),
300            "SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
301            "SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
302            "SPLIT": lambda args: exp.Split(
303                # https://cloud.google.com/bigquery/docs/reference/standard-sql/string_functions#split
304                this=seq_get(args, 0),
305                expression=seq_get(args, 1) or exp.Literal.string(","),
306            ),
307            "TIME_ADD": parse_date_delta_with_interval(exp.TimeAdd),
308            "TIME_SUB": parse_date_delta_with_interval(exp.TimeSub),
309            "TIMESTAMP_ADD": parse_date_delta_with_interval(exp.TimestampAdd),
310            "TIMESTAMP_SUB": parse_date_delta_with_interval(exp.TimestampSub),
311            "TO_JSON_STRING": exp.JSONFormat.from_arg_list,
312        }
313
314        FUNCTION_PARSERS = {
315            **parser.Parser.FUNCTION_PARSERS,
316            "ARRAY": lambda self: self.expression(exp.Array, expressions=[self._parse_statement()]),
317        }
318        FUNCTION_PARSERS.pop("TRIM")
319
320        NO_PAREN_FUNCTIONS = {
321            **parser.Parser.NO_PAREN_FUNCTIONS,
322            TokenType.CURRENT_DATETIME: exp.CurrentDatetime,
323        }
324
325        NESTED_TYPE_TOKENS = {
326            *parser.Parser.NESTED_TYPE_TOKENS,
327            TokenType.TABLE,
328        }
329
330        ID_VAR_TOKENS = {
331            *parser.Parser.ID_VAR_TOKENS,
332            TokenType.VALUES,
333        }
334
335        PROPERTY_PARSERS = {
336            **parser.Parser.PROPERTY_PARSERS,
337            "NOT DETERMINISTIC": lambda self: self.expression(
338                exp.StabilityProperty, this=exp.Literal.string("VOLATILE")
339            ),
340            "OPTIONS": lambda self: self._parse_with_property(),
341        }
342
343        CONSTRAINT_PARSERS = {
344            **parser.Parser.CONSTRAINT_PARSERS,
345            "OPTIONS": lambda self: exp.Properties(expressions=self._parse_with_property()),
346        }
347
348        def _parse_table_part(self, schema: bool = False) -> t.Optional[exp.Expression]:
349            this = super()._parse_table_part(schema=schema) or self._parse_number()
350
351            # https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#table_names
352            if isinstance(this, exp.Identifier):
353                table_name = this.name
354                while self._match(TokenType.DASH, advance=False) and self._next:
355                    self._advance(2)
356                    table_name += f"-{self._prev.text}"
357
358                this = exp.Identifier(this=table_name, quoted=this.args.get("quoted"))
359            elif isinstance(this, exp.Literal):
360                table_name = this.name
361
362                if (
363                    self._curr
364                    and self._prev.end == self._curr.start - 1
365                    and self._parse_var(any_token=True)
366                ):
367                    table_name += self._prev.text
368
369                this = exp.Identifier(this=table_name, quoted=True)
370
371            return this
372
373        def _parse_table_parts(self, schema: bool = False) -> exp.Table:
374            table = super()._parse_table_parts(schema=schema)
375            if isinstance(table.this, exp.Identifier) and "." in table.name:
376                catalog, db, this, *rest = (
377                    t.cast(t.Optional[exp.Expression], exp.to_identifier(x))
378                    for x in split_num_words(table.name, ".", 3)
379                )
380
381                if rest and this:
382                    this = exp.Dot.build(t.cast(t.List[exp.Expression], [this, *rest]))
383
384                table = exp.Table(this=this, db=db, catalog=catalog)
385
386            return table
387
388        def _parse_json_object(self) -> exp.JSONObject:
389            json_object = super()._parse_json_object()
390            array_kv_pair = seq_get(json_object.expressions, 0)
391
392            # Converts BQ's "signature 2" of JSON_OBJECT into SQLGlot's canonical representation
393            # https://cloud.google.com/bigquery/docs/reference/standard-sql/json_functions#json_object_signature2
394            if (
395                array_kv_pair
396                and isinstance(array_kv_pair.this, exp.Array)
397                and isinstance(array_kv_pair.expression, exp.Array)
398            ):
399                keys = array_kv_pair.this.expressions
400                values = array_kv_pair.expression.expressions
401
402                json_object.set(
403                    "expressions",
404                    [exp.JSONKeyValue(this=k, expression=v) for k, v in zip(keys, values)],
405                )
406
407            return json_object

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: Determines the amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
PREFIXED_PIVOT_COLUMNS = True
LOG_BASE_FIRST = False
LOG_DEFAULTS_TO_LN = True
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Array'>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayJoin'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'COALESCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Concat'>>, 'CONCAT_WS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConcatWs'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _parse_date>, 'DATE_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATEDIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function BigQuery.Parser.<lambda>>, 'DATETIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hex'>>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtract'>>, 'JSON_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONExtractScalar'>>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DATE_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDateOfMonth'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Length'>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log'>>, 'LOG10': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log10'>>, 'LOG2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Log2'>>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <function BigQuery.Parser.<lambda>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpReplace'>>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeConcat'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SET_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SetAgg'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <function BigQuery.Parser.<lambda>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_SUB': <function parse_date_delta_with_interval.<locals>.func>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UPPER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function parse_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'LIKE': <function parse_like>, 'DIV': <function binary_from_function.<locals>.<lambda>>, 'GENERATE_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'TO_HEX': <function _parse_to_hex>, 'PARSE_DATE': <function BigQuery.Parser.<lambda>>, 'PARSE_TIMESTAMP': <function _parse_timestamp>, 'REGEXP_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SHA256': <function BigQuery.Parser.<lambda>>, 'SHA512': <function BigQuery.Parser.<lambda>>, 'TO_JSON_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>}
FUNCTION_PARSERS = {'ANY_VALUE': <function Parser.<lambda>>, 'CAST': <function Parser.<lambda>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'ARRAY': <function BigQuery.Parser.<lambda>>}
NO_PAREN_FUNCTIONS = {<TokenType.CURRENT_DATE: 'CURRENT_DATE'>: <class 'sqlglot.expressions.CurrentDate'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>: <class 'sqlglot.expressions.CurrentDatetime'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>: <class 'sqlglot.expressions.CurrentTime'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>: <class 'sqlglot.expressions.CurrentTimestamp'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>: <class 'sqlglot.expressions.CurrentUser'>}
NESTED_TYPE_TOKENS = {<TokenType.NESTED: 'NESTED'>, <TokenType.MAP: 'MAP'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.TABLE: 'TABLE'>}
ID_VAR_TOKENS = {<TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.SEMI: 'SEMI'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.INT128: 'INT128'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.SHOW: 'SHOW'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.TABLE: 'TABLE'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.TOP: 'TOP'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.MAP: 'MAP'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.INT: 'INT'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.BINARY: 'BINARY'>, <TokenType.TIME: 'TIME'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.DELETE: 'DELETE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.APPLY: 'APPLY'>, <TokenType.ANTI: 'ANTI'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.BIT: 'BIT'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.NEXT: 'NEXT'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.ANY: 'ANY'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.YEAR: 'YEAR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.DESC: 'DESC'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.CASE: 'CASE'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.CHAR: 'CHAR'>, <TokenType.FULL: 'FULL'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.MONEY: 'MONEY'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.FIRST: 'FIRST'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.VIEW: 'VIEW'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.INET: 'INET'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.CACHE: 'CACHE'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.XML: 'XML'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.UINT: 'UINT'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ALL: 'ALL'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.ROW: 'ROW'>, <TokenType.UINT256: 'UINT256'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.KEEP: 'KEEP'>, <TokenType.ASC: 'ASC'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.NULL: 'NULL'>, <TokenType.DATE: 'DATE'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.TEXT: 'TEXT'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.LEFT: 'LEFT'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.INT256: 'INT256'>, <TokenType.JSONB: 'JSONB'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.IS: 'IS'>, <TokenType.MERGE: 'MERGE'>, <TokenType.FALSE: 'FALSE'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.SET: 'SET'>, <TokenType.SOME: 'SOME'>, <TokenType.ENUM: 'ENUM'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UINT128: 'UINT128'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.VALUES: 'VALUES'>, <TokenType.UUID: 'UUID'>, <TokenType.FILTER: 'FILTER'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.END: 'END'>, <TokenType.TRUE: 'TRUE'>, <TokenType.INDEX: 'INDEX'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.JSON: 'JSON'>, <TokenType.VAR: 'VAR'>, <TokenType.DIV: 'DIV'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.COLUMN: 'COLUMN'>}
PROPERTY_PARSERS = {'ALGORITHM': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'NOT DETERMINISTIC': <function BigQuery.Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>, 'OPTIONS': <function BigQuery.Parser.<lambda>>}
TOKENIZER_CLASS: Type[sqlglot.tokens.Tokenizer] = <class 'BigQuery.Tokenizer'>
UNNEST_COLUMN_ONLY: bool = True
SUPPORTS_USER_DEFINED_TYPES = False
NORMALIZE_FUNCTIONS = False
SHOW_TRIE: Dict = {}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
FORMAT_MAPPING: Dict[str, str] = {'DD': '%d', 'MM': '%m', 'MON': '%b', 'MONTH': '%B', 'YYYY': '%Y', 'YY': '%y', 'HH': '%I', 'HH12': '%I', 'HH24': '%H', 'MI': '%M', 'SS': '%S', 'SSSSS': '%f', 'TZH': '%z'}
FORMAT_TRIE: Dict = {'D': {'D': {0: True}}, 'M': {'M': {0: True}, 'O': {'N': {0: True, 'T': {'H': {0: True}}}}, 'I': {0: True}}, 'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'H': {'H': {0: True, '1': {'2': {0: True}}, '2': {'4': {0: True}}}}, 'S': {'S': {0: True, 'S': {'S': {'S': {0: True}}}}}, 'T': {'Z': {'H': {0: True}}}}
TIME_MAPPING: Dict[str, str] = {'%D': '%m/%d/%y'}
TIME_TRIE: Dict = {'%': {'D': {0: True}}}
class BigQuery.Generator(sqlglot.generator.Generator):
409    class Generator(generator.Generator):
410        EXPLICIT_UNION = True
411        INTERVAL_ALLOWS_PLURAL_FORM = False
412        JOIN_HINTS = False
413        QUERY_HINTS = False
414        TABLE_HINTS = False
415        LIMIT_FETCH = "LIMIT"
416        RENAME_TABLE_WITH_DB = False
417        ESCAPE_LINE_BREAK = True
418        NVL2_SUPPORTED = False
419
420        TRANSFORMS = {
421            **generator.Generator.TRANSFORMS,
422            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
423            exp.ArraySize: rename_func("ARRAY_LENGTH"),
424            exp.Cast: transforms.preprocess([transforms.remove_precision_parameterized_types]),
425            exp.Create: _create_sql,
426            exp.CTE: transforms.preprocess([_pushdown_cte_column_names]),
427            exp.DateAdd: date_add_interval_sql("DATE", "ADD"),
428            exp.DateDiff: lambda self, e: f"DATE_DIFF({self.sql(e, 'this')}, {self.sql(e, 'expression')}, {self.sql(e.args.get('unit', 'DAY'))})",
429            exp.DateFromParts: rename_func("DATE"),
430            exp.DateStrToDate: datestrtodate_sql,
431            exp.DateSub: date_add_interval_sql("DATE", "SUB"),
432            exp.DatetimeAdd: date_add_interval_sql("DATETIME", "ADD"),
433            exp.DatetimeSub: date_add_interval_sql("DATETIME", "SUB"),
434            exp.DateTrunc: lambda self, e: self.func("DATE_TRUNC", e.this, e.text("unit")),
435            exp.GenerateSeries: rename_func("GENERATE_ARRAY"),
436            exp.GroupConcat: rename_func("STRING_AGG"),
437            exp.Hex: rename_func("TO_HEX"),
438            exp.ILike: no_ilike_sql,
439            exp.IntDiv: rename_func("DIV"),
440            exp.JSONFormat: rename_func("TO_JSON_STRING"),
441            exp.JSONKeyValue: json_keyvalue_comma_sql,
442            exp.Max: max_or_greatest,
443            exp.MD5: lambda self, e: self.func("TO_HEX", self.func("MD5", e.this)),
444            exp.MD5Digest: rename_func("MD5"),
445            exp.Min: min_or_least,
446            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
447            exp.RegexpExtract: lambda self, e: self.func(
448                "REGEXP_EXTRACT",
449                e.this,
450                e.expression,
451                e.args.get("position"),
452                e.args.get("occurrence"),
453            ),
454            exp.RegexpReplace: regexp_replace_sql,
455            exp.RegexpLike: rename_func("REGEXP_CONTAINS"),
456            exp.ReturnsProperty: _returnsproperty_sql,
457            exp.Select: transforms.preprocess(
458                [
459                    transforms.explode_to_unnest,
460                    _unqualify_unnest,
461                    transforms.eliminate_distinct_on,
462                    _alias_ordered_group,
463                ]
464            ),
465            exp.SHA2: lambda self, e: self.func(
466                f"SHA256" if e.text("length") == "256" else "SHA512", e.this
467            ),
468            exp.StabilityProperty: lambda self, e: f"DETERMINISTIC"
469            if e.name == "IMMUTABLE"
470            else "NOT DETERMINISTIC",
471            exp.StrToDate: lambda self, e: f"PARSE_DATE({self.format_time(e)}, {self.sql(e, 'this')})",
472            exp.StrToTime: lambda self, e: self.func(
473                "PARSE_TIMESTAMP", self.format_time(e), e.this, e.args.get("zone")
474            ),
475            exp.TimeAdd: date_add_interval_sql("TIME", "ADD"),
476            exp.TimeSub: date_add_interval_sql("TIME", "SUB"),
477            exp.TimestampAdd: date_add_interval_sql("TIMESTAMP", "ADD"),
478            exp.TimestampSub: date_add_interval_sql("TIMESTAMP", "SUB"),
479            exp.TimeStrToTime: timestrtotime_sql,
480            exp.Trim: lambda self, e: self.func(f"TRIM", e.this, e.expression),
481            exp.TsOrDsAdd: date_add_interval_sql("DATE", "ADD"),
482            exp.TsOrDsToDate: ts_or_ds_to_date_sql("bigquery"),
483            exp.Unhex: rename_func("FROM_HEX"),
484            exp.Values: _derived_table_values_to_unnest,
485            exp.VariancePop: rename_func("VAR_POP"),
486        }
487
488        TYPE_MAPPING = {
489            **generator.Generator.TYPE_MAPPING,
490            exp.DataType.Type.BIGDECIMAL: "BIGNUMERIC",
491            exp.DataType.Type.BIGINT: "INT64",
492            exp.DataType.Type.BINARY: "BYTES",
493            exp.DataType.Type.BOOLEAN: "BOOL",
494            exp.DataType.Type.CHAR: "STRING",
495            exp.DataType.Type.DECIMAL: "NUMERIC",
496            exp.DataType.Type.DOUBLE: "FLOAT64",
497            exp.DataType.Type.FLOAT: "FLOAT64",
498            exp.DataType.Type.INT: "INT64",
499            exp.DataType.Type.NCHAR: "STRING",
500            exp.DataType.Type.NVARCHAR: "STRING",
501            exp.DataType.Type.SMALLINT: "INT64",
502            exp.DataType.Type.TEXT: "STRING",
503            exp.DataType.Type.TIMESTAMP: "DATETIME",
504            exp.DataType.Type.TIMESTAMPTZ: "TIMESTAMP",
505            exp.DataType.Type.TIMESTAMPLTZ: "TIMESTAMP",
506            exp.DataType.Type.TINYINT: "INT64",
507            exp.DataType.Type.VARBINARY: "BYTES",
508            exp.DataType.Type.VARCHAR: "STRING",
509            exp.DataType.Type.VARIANT: "ANY TYPE",
510        }
511
512        PROPERTIES_LOCATION = {
513            **generator.Generator.PROPERTIES_LOCATION,
514            exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
515            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
516        }
517
518        # from: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#reserved_keywords
519        RESERVED_KEYWORDS = {
520            *generator.Generator.RESERVED_KEYWORDS,
521            "all",
522            "and",
523            "any",
524            "array",
525            "as",
526            "asc",
527            "assert_rows_modified",
528            "at",
529            "between",
530            "by",
531            "case",
532            "cast",
533            "collate",
534            "contains",
535            "create",
536            "cross",
537            "cube",
538            "current",
539            "default",
540            "define",
541            "desc",
542            "distinct",
543            "else",
544            "end",
545            "enum",
546            "escape",
547            "except",
548            "exclude",
549            "exists",
550            "extract",
551            "false",
552            "fetch",
553            "following",
554            "for",
555            "from",
556            "full",
557            "group",
558            "grouping",
559            "groups",
560            "hash",
561            "having",
562            "if",
563            "ignore",
564            "in",
565            "inner",
566            "intersect",
567            "interval",
568            "into",
569            "is",
570            "join",
571            "lateral",
572            "left",
573            "like",
574            "limit",
575            "lookup",
576            "merge",
577            "natural",
578            "new",
579            "no",
580            "not",
581            "null",
582            "nulls",
583            "of",
584            "on",
585            "or",
586            "order",
587            "outer",
588            "over",
589            "partition",
590            "preceding",
591            "proto",
592            "qualify",
593            "range",
594            "recursive",
595            "respect",
596            "right",
597            "rollup",
598            "rows",
599            "select",
600            "set",
601            "some",
602            "struct",
603            "tablesample",
604            "then",
605            "to",
606            "treat",
607            "true",
608            "unbounded",
609            "union",
610            "unnest",
611            "using",
612            "when",
613            "where",
614            "window",
615            "with",
616            "within",
617        }
618
619        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
620            parent = expression.parent
621
622            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
623            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
624            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
625                return self.func(
626                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
627                )
628
629            return super().attimezone_sql(expression)
630
631        def trycast_sql(self, expression: exp.TryCast) -> str:
632            return self.cast_sql(expression, safe_prefix="SAFE_")
633
634        def cte_sql(self, expression: exp.CTE) -> str:
635            if expression.alias_column_names:
636                self.unsupported("Column names in CTE definition are not supported.")
637            return super().cte_sql(expression)
638
639        def array_sql(self, expression: exp.Array) -> str:
640            first_arg = seq_get(expression.expressions, 0)
641            if isinstance(first_arg, exp.Subqueryable):
642                return f"ARRAY{self.wrap(self.sql(first_arg))}"
643
644            return inline_array_sql(self, expression)
645
646        def transaction_sql(self, *_) -> str:
647            return "BEGIN TRANSACTION"
648
649        def commit_sql(self, *_) -> str:
650            return "COMMIT TRANSACTION"
651
652        def rollback_sql(self, *_) -> str:
653            return "ROLLBACK TRANSACTION"
654
655        def in_unnest_op(self, expression: exp.Unnest) -> str:
656            return self.sql(expression)
657
658        def except_op(self, expression: exp.Except) -> str:
659            if not expression.args.get("distinct", False):
660                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
661            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
662
663        def intersect_op(self, expression: exp.Intersect) -> str:
664            if not expression.args.get("distinct", False):
665                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
666            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
667
668        def with_properties(self, properties: exp.Properties) -> str:
669            return self.properties(properties, prefix=self.seg("OPTIONS"))
670
671        def version_sql(self, expression: exp.Version) -> str:
672            if expression.name == "TIMESTAMP":
673                expression = expression.copy()
674                expression.set("this", "SYSTEM_TIME")
675            return super().version_sql(expression)

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether or not to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether or not to normalize identifiers to lowercase. Default: False.
  • pad: Determines the pad size in a formatted string. Default: 2.
  • indent: Determines the indentation size in a formatted string. Default: 2.
  • normalize_functions: Whether or not to normalize all function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Determines whether or not the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether or not to preserve comments in the output SQL code. Default: True
EXPLICIT_UNION = True
INTERVAL_ALLOWS_PLURAL_FORM = False
JOIN_HINTS = False
QUERY_HINTS = False
TABLE_HINTS = False
LIMIT_FETCH = 'LIMIT'
RENAME_TABLE_WITH_DB = False
ESCAPE_LINE_BREAK = True
NVL2_SUPPORTED = False
TRANSFORMS = {<class 'sqlglot.expressions.DateAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CheckColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function _returnsproperty_sql>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArraySize'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Cast'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Create'>: <function _create_sql>, <class 'sqlglot.expressions.CTE'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateDiff'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.DateFromParts'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DateSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DatetimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.DateTrunc'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Hex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ILike'>: <function no_ilike_sql>, <class 'sqlglot.expressions.IntDiv'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONFormat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.JSONKeyValue'>: <function json_keyvalue_comma_sql>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.MD5'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.MD5Digest'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpExtract'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.RegexpReplace'>: <function regexp_replace_sql>, <class 'sqlglot.expressions.RegexpLike'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA2'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToDate'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampAdd'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimestampSub'>: <function date_add_interval_sql.<locals>.func>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.Trim'>: <function BigQuery.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function ts_or_ds_to_date_sql.<locals>._ts_or_ds_to_date_sql>, <class 'sqlglot.expressions.Unhex'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Values'>: <function _derived_table_values_to_unnest>, <class 'sqlglot.expressions.VariancePop'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'STRING', <Type.NVARCHAR: 'NVARCHAR'>: 'STRING', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.BIGDECIMAL: 'BIGDECIMAL'>: 'BIGNUMERIC', <Type.BIGINT: 'BIGINT'>: 'INT64', <Type.BINARY: 'BINARY'>: 'BYTES', <Type.BOOLEAN: 'BOOLEAN'>: 'BOOL', <Type.CHAR: 'CHAR'>: 'STRING', <Type.DECIMAL: 'DECIMAL'>: 'NUMERIC', <Type.DOUBLE: 'DOUBLE'>: 'FLOAT64', <Type.FLOAT: 'FLOAT'>: 'FLOAT64', <Type.INT: 'INT'>: 'INT64', <Type.SMALLINT: 'SMALLINT'>: 'INT64', <Type.TEXT: 'TEXT'>: 'STRING', <Type.TIMESTAMP: 'TIMESTAMP'>: 'DATETIME', <Type.TIMESTAMPTZ: 'TIMESTAMPTZ'>: 'TIMESTAMP', <Type.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>: 'TIMESTAMP', <Type.TINYINT: 'TINYINT'>: 'INT64', <Type.VARBINARY: 'VARBINARY'>: 'BYTES', <Type.VARCHAR: 'VARCHAR'>: 'STRING', <Type.VARIANT: 'VARIANT'>: 'ANY TYPE'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>}
RESERVED_KEYWORDS = {'following', 'of', 'interval', 'join', 'tablesample', 'natural', 'nulls', 'as', 'array', 'assert_rows_modified', 'order', 'outer', 'default', 'not', 'some', 'case', 'within', 'group', 'limit', 'current', 'like', 'cube', 'exclude', 'rollup', 'any', 'proto', 'escape', 'false', 'over', 'for', 'all', 'where', 'recursive', 'and', 'select', 'cast', 'else', 'on', 'merge', 'unnest', 'range', 'set', 'exists', 'at', 'intersect', 'grouping', 'lateral', 'when', 'struct', 'desc', 'by', 'cross', 'define', 'no', 'into', 'respect', 'full', 'extract', 'rows', 'qualify', 'between', 'hash', 'collate', 'inner', 'new', 'with', 'if', 'enum', 'asc', 'left', 'union', 'lookup', 'end', 'is', 'except', 'contains', 'ignore', 'partition', 'having', 'from', 'treat', 'fetch', 'or', 'right', 'then', 'unbounded', 'using', 'null', 'create', 'preceding', 'groups', 'window', 'distinct', 'true', 'to', 'in'}
def attimezone_sql(self, expression: sqlglot.expressions.AtTimeZone) -> str:
619        def attimezone_sql(self, expression: exp.AtTimeZone) -> str:
620            parent = expression.parent
621
622            # BigQuery allows CAST(.. AS {STRING|TIMESTAMP} [FORMAT <fmt> [AT TIME ZONE <tz>]]).
623            # Only the TIMESTAMP one should use the below conversion, when AT TIME ZONE is included.
624            if not isinstance(parent, exp.Cast) or not parent.to.is_type("text"):
625                return self.func(
626                    "TIMESTAMP", self.func("DATETIME", expression.this, expression.args.get("zone"))
627                )
628
629            return super().attimezone_sql(expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
631        def trycast_sql(self, expression: exp.TryCast) -> str:
632            return self.cast_sql(expression, safe_prefix="SAFE_")
def cte_sql(self, expression: sqlglot.expressions.CTE) -> str:
634        def cte_sql(self, expression: exp.CTE) -> str:
635            if expression.alias_column_names:
636                self.unsupported("Column names in CTE definition are not supported.")
637            return super().cte_sql(expression)
def array_sql(self, expression: sqlglot.expressions.Array) -> str:
639        def array_sql(self, expression: exp.Array) -> str:
640            first_arg = seq_get(expression.expressions, 0)
641            if isinstance(first_arg, exp.Subqueryable):
642                return f"ARRAY{self.wrap(self.sql(first_arg))}"
643
644            return inline_array_sql(self, expression)
def transaction_sql(self, *_) -> str:
646        def transaction_sql(self, *_) -> str:
647            return "BEGIN TRANSACTION"
def commit_sql(self, *_) -> str:
649        def commit_sql(self, *_) -> str:
650            return "COMMIT TRANSACTION"
def rollback_sql(self, *_) -> str:
652        def rollback_sql(self, *_) -> str:
653            return "ROLLBACK TRANSACTION"
def in_unnest_op(self, expression: sqlglot.expressions.Unnest) -> str:
655        def in_unnest_op(self, expression: exp.Unnest) -> str:
656            return self.sql(expression)
def except_op(self, expression: sqlglot.expressions.Except) -> str:
658        def except_op(self, expression: exp.Except) -> str:
659            if not expression.args.get("distinct", False):
660                self.unsupported("EXCEPT without DISTINCT is not supported in BigQuery")
661            return f"EXCEPT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
663        def intersect_op(self, expression: exp.Intersect) -> str:
664            if not expression.args.get("distinct", False):
665                self.unsupported("INTERSECT without DISTINCT is not supported in BigQuery")
666            return f"INTERSECT{' DISTINCT' if expression.args.get('distinct') else ' ALL'}"
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
668        def with_properties(self, properties: exp.Properties) -> str:
669            return self.properties(properties, prefix=self.seg("OPTIONS"))
def version_sql(self, expression: sqlglot.expressions.Version) -> str:
671        def version_sql(self, expression: exp.Version) -> str:
672            if expression.name == "TIMESTAMP":
673                expression = expression.copy()
674                expression.set("this", "SYSTEM_TIME")
675            return super().version_sql(expression)
INVERSE_TIME_MAPPING: Dict[str, str] = {'%m/%d/%y': '%D'}
INVERSE_TIME_TRIE: Dict = {'%': {'m': {'/': {'%': {'d': {'/': {'%': {'y': {0: True}}}}}}}}}
UNNEST_COLUMN_ONLY = True
NORMALIZE_FUNCTIONS: bool | str = False
@classmethod
def can_identify(text: str, identify: str | bool = 'safe') -> bool:
260    @classmethod
261    def can_identify(cls, text: str, identify: str | bool = "safe") -> bool:
262        """Checks if text can be identified given an identify option.
263
264        Args:
265            text: The text to check.
266            identify:
267                "always" or `True`: Always returns true.
268                "safe": True if the identifier is case-insensitive.
269
270        Returns:
271            Whether or not the given text can be identified.
272        """
273        if identify is True or identify == "always":
274            return True
275
276        if identify == "safe":
277            return not cls.case_sensitive(text)
278
279        return False

Checks if text can be identified given an identify option.

Arguments:
  • text: The text to check.
  • identify: "always" or True: Always returns true. "safe": True if the identifier is case-insensitive.
Returns:

Whether or not the given text can be identified.

QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '`'
IDENTIFIER_END = '`'
TOKENIZER_CLASS = <class 'BigQuery.Tokenizer'>
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = '0x'
HEX_END: Optional[str] = ''
BYTE_START: Optional[str] = "b'"
BYTE_END: Optional[str] = "'"
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
LOCKING_READS_SUPPORTED
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
MATCHED_BY_SOURCE
SINGLE_STRING_INTERVAL
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SIZE_IS_PERCENT
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
COLUMN_JOIN_MARKS_SUPPORTED
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
SELECT_KINDS
VALUES_AS_TABLE
ALTER_TABLE_ADD_COLUMN_KEYWORD
STAR_MAPPING
TIME_PART_SINGULARS
TOKEN_MAPPING
STRUCT_DELIMITER
PARAMETER_TOKEN
WITH_SEPARATED_COMMENTS
UNWRAPPED_INTERVAL_VALUES
SENTINEL_LINE_BREAK
INDEX_OFFSET
ALIAS_POST_TABLESAMPLE
IDENTIFIERS_CAN_START_WITH_DIGIT
STRICT_STRING_CONCAT
NULL_ORDERING
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
normalize_functions
unsupported_messages
generate
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasidentitycolumnconstraint_sql
notnullcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
clone_sql
describe_sql
prepend_ctes
with_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
rawstring_sql
datatypeparam_sql
datatype_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
index_sql
identifier_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
lockingproperty_sql
withdataproperty_sql
insert_sql
intersect_sql
introducer_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
table_sql
tablesample_sql
pivot_sql
tuple_sql
update_sql
values_sql
var_sql
into_sql
from_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
cluster_sql
distribute_sql
sort_sql
ordered_sql
matchrecognize_sql
query_modifiers
offset_limit_modifiers
after_having_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
union_sql
union_op
unnest_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_sql
safebracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
safeconcat_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
formatjson_sql
jsonobject_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
aliases_sql
add_sql
and_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
altercolumn_sql
renametable_sql
altertable_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
intdiv_sql
dpipe_sql
safedpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
or_sql
slice_sql
sub_sql
use_sql
binary
function_fallback_sql
func
format_args
text_width
format_time
expressions
op_expressions
naked_property
set_operation
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql