Edit on GitHub

sqlglot.dialects.snowflake

   1from __future__ import annotations
   2
   3import typing as t
   4
   5from sqlglot import exp, generator, parser, tokens, transforms
   6from sqlglot.dialects.dialect import (
   7    Dialect,
   8    NormalizationStrategy,
   9    binary_from_function,
  10    build_default_decimal_type,
  11    build_timestamp_from_parts,
  12    date_delta_sql,
  13    date_trunc_to_time,
  14    datestrtodate_sql,
  15    build_formatted_time,
  16    if_sql,
  17    inline_array_sql,
  18    max_or_greatest,
  19    min_or_least,
  20    rename_func,
  21    timestamptrunc_sql,
  22    timestrtotime_sql,
  23    var_map_sql,
  24    map_date_part,
  25)
  26from sqlglot.helper import flatten, is_float, is_int, seq_get
  27from sqlglot.tokens import TokenType
  28
  29if t.TYPE_CHECKING:
  30    from sqlglot._typing import E
  31
  32
  33# from https://docs.snowflake.com/en/sql-reference/functions/to_timestamp.html
  34def _build_datetime(
  35    name: str, kind: exp.DataType.Type, safe: bool = False
  36) -> t.Callable[[t.List], exp.Func]:
  37    def _builder(args: t.List) -> exp.Func:
  38        value = seq_get(args, 0)
  39        int_value = value is not None and is_int(value.name)
  40
  41        if isinstance(value, exp.Literal):
  42            # Converts calls like `TO_TIME('01:02:03')` into casts
  43            if len(args) == 1 and value.is_string and not int_value:
  44                return exp.cast(value, kind)
  45
  46            # Handles `TO_TIMESTAMP(str, fmt)` and `TO_TIMESTAMP(num, scale)` as special
  47            # cases so we can transpile them, since they're relatively common
  48            if kind == exp.DataType.Type.TIMESTAMP:
  49                if int_value:
  50                    return exp.UnixToTime(this=value, scale=seq_get(args, 1))
  51                if not is_float(value.this):
  52                    return build_formatted_time(exp.StrToTime, "snowflake")(args)
  53
  54        if kind == exp.DataType.Type.DATE and not int_value:
  55            formatted_exp = build_formatted_time(exp.TsOrDsToDate, "snowflake")(args)
  56            formatted_exp.set("safe", safe)
  57            return formatted_exp
  58
  59        return exp.Anonymous(this=name, expressions=args)
  60
  61    return _builder
  62
  63
  64def _build_object_construct(args: t.List) -> t.Union[exp.StarMap, exp.Struct]:
  65    expression = parser.build_var_map(args)
  66
  67    if isinstance(expression, exp.StarMap):
  68        return expression
  69
  70    return exp.Struct(
  71        expressions=[
  72            exp.PropertyEQ(this=k, expression=v) for k, v in zip(expression.keys, expression.values)
  73        ]
  74    )
  75
  76
  77def _build_datediff(args: t.List) -> exp.DateDiff:
  78    return exp.DateDiff(
  79        this=seq_get(args, 2), expression=seq_get(args, 1), unit=map_date_part(seq_get(args, 0))
  80    )
  81
  82
  83def _build_date_time_add(expr_type: t.Type[E]) -> t.Callable[[t.List], E]:
  84    def _builder(args: t.List) -> E:
  85        return expr_type(
  86            this=seq_get(args, 2),
  87            expression=seq_get(args, 1),
  88            unit=map_date_part(seq_get(args, 0)),
  89        )
  90
  91    return _builder
  92
  93
  94# https://docs.snowflake.com/en/sql-reference/functions/div0
  95def _build_if_from_div0(args: t.List) -> exp.If:
  96    cond = exp.EQ(this=seq_get(args, 1), expression=exp.Literal.number(0)).and_(
  97        exp.Is(this=seq_get(args, 0), expression=exp.null()).not_()
  98    )
  99    true = exp.Literal.number(0)
 100    false = exp.Div(this=seq_get(args, 0), expression=seq_get(args, 1))
 101    return exp.If(this=cond, true=true, false=false)
 102
 103
 104# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 105def _build_if_from_zeroifnull(args: t.List) -> exp.If:
 106    cond = exp.Is(this=seq_get(args, 0), expression=exp.Null())
 107    return exp.If(this=cond, true=exp.Literal.number(0), false=seq_get(args, 0))
 108
 109
 110# https://docs.snowflake.com/en/sql-reference/functions/zeroifnull
 111def _build_if_from_nullifzero(args: t.List) -> exp.If:
 112    cond = exp.EQ(this=seq_get(args, 0), expression=exp.Literal.number(0))
 113    return exp.If(this=cond, true=exp.Null(), false=seq_get(args, 0))
 114
 115
 116def _regexpilike_sql(self: Snowflake.Generator, expression: exp.RegexpILike) -> str:
 117    flag = expression.text("flag")
 118
 119    if "i" not in flag:
 120        flag += "i"
 121
 122    return self.func(
 123        "REGEXP_LIKE", expression.this, expression.expression, exp.Literal.string(flag)
 124    )
 125
 126
 127def _build_regexp_replace(args: t.List) -> exp.RegexpReplace:
 128    regexp_replace = exp.RegexpReplace.from_arg_list(args)
 129
 130    if not regexp_replace.args.get("replacement"):
 131        regexp_replace.set("replacement", exp.Literal.string(""))
 132
 133    return regexp_replace
 134
 135
 136def _show_parser(*args: t.Any, **kwargs: t.Any) -> t.Callable[[Snowflake.Parser], exp.Show]:
 137    def _parse(self: Snowflake.Parser) -> exp.Show:
 138        return self._parse_show_snowflake(*args, **kwargs)
 139
 140    return _parse
 141
 142
 143def _date_trunc_to_time(args: t.List) -> exp.DateTrunc | exp.TimestampTrunc:
 144    trunc = date_trunc_to_time(args)
 145    trunc.set("unit", map_date_part(trunc.args["unit"]))
 146    return trunc
 147
 148
 149def _unqualify_unpivot_columns(expression: exp.Expression) -> exp.Expression:
 150    """
 151    Snowflake doesn't allow columns referenced in UNPIVOT to be qualified,
 152    so we need to unqualify them.
 153
 154    Example:
 155        >>> from sqlglot import parse_one
 156        >>> expr = parse_one("SELECT * FROM m_sales UNPIVOT(sales FOR month IN (m_sales.jan, feb, mar, april))")
 157        >>> print(_unqualify_unpivot_columns(expr).sql(dialect="snowflake"))
 158        SELECT * FROM m_sales UNPIVOT(sales FOR month IN (jan, feb, mar, april))
 159    """
 160    if isinstance(expression, exp.Pivot) and expression.unpivot:
 161        expression = transforms.unqualify_columns(expression)
 162
 163    return expression
 164
 165
 166def _flatten_structured_types_unless_iceberg(expression: exp.Expression) -> exp.Expression:
 167    assert isinstance(expression, exp.Create)
 168
 169    def _flatten_structured_type(expression: exp.DataType) -> exp.DataType:
 170        if expression.this in exp.DataType.NESTED_TYPES:
 171            expression.set("expressions", None)
 172        return expression
 173
 174    props = expression.args.get("properties")
 175    if isinstance(expression.this, exp.Schema) and not (props and props.find(exp.IcebergProperty)):
 176        for schema_expression in expression.this.expressions:
 177            if isinstance(schema_expression, exp.ColumnDef):
 178                column_type = schema_expression.kind
 179                if isinstance(column_type, exp.DataType):
 180                    column_type.transform(_flatten_structured_type, copy=False)
 181
 182    return expression
 183
 184
 185def _unnest_generate_date_array(expression: exp.Expression) -> exp.Expression:
 186    if isinstance(expression, exp.Select):
 187        for unnest in expression.find_all(exp.Unnest):
 188            if (
 189                isinstance(unnest.parent, (exp.From, exp.Join))
 190                and len(unnest.expressions) == 1
 191                and isinstance(unnest.expressions[0], exp.GenerateDateArray)
 192            ):
 193                generate_date_array = unnest.expressions[0]
 194                start = generate_date_array.args.get("start")
 195                end = generate_date_array.args.get("end")
 196                step = generate_date_array.args.get("step")
 197
 198                if not start or not end or not isinstance(step, exp.Interval) or step.name != "1":
 199                    continue
 200
 201                unit = step.args.get("unit")
 202
 203                unnest_alias = unnest.args.get("alias")
 204                if unnest_alias:
 205                    unnest_alias = unnest_alias.copy()
 206                    sequence_value_name = seq_get(unnest_alias.columns, 0) or "value"
 207                else:
 208                    sequence_value_name = "value"
 209
 210                # We'll add the next sequence value to the starting date and project the result
 211                date_add = _build_date_time_add(exp.DateAdd)(
 212                    [unit, exp.cast(sequence_value_name, "int"), exp.cast(start, "date")]
 213                ).as_(sequence_value_name)
 214
 215                # We use DATEDIFF to compute the number of sequence values needed
 216                number_sequence = Snowflake.Parser.FUNCTIONS["ARRAY_GENERATE_RANGE"](
 217                    [exp.Literal.number(0), _build_datediff([unit, start, end]) + 1]
 218                )
 219
 220                unnest.set("expressions", [number_sequence])
 221                unnest.replace(exp.select(date_add).from_(unnest.copy()).subquery(unnest_alias))
 222
 223    return expression
 224
 225
 226class Snowflake(Dialect):
 227    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 228    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 229    NULL_ORDERING = "nulls_are_large"
 230    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 231    SUPPORTS_USER_DEFINED_TYPES = False
 232    SUPPORTS_SEMI_ANTI_JOIN = False
 233    PREFER_CTE_ALIAS_COLUMN = True
 234    TABLESAMPLE_SIZE_IS_PERCENT = True
 235    COPY_PARAMS_ARE_CSV = False
 236
 237    TIME_MAPPING = {
 238        "YYYY": "%Y",
 239        "yyyy": "%Y",
 240        "YY": "%y",
 241        "yy": "%y",
 242        "MMMM": "%B",
 243        "mmmm": "%B",
 244        "MON": "%b",
 245        "mon": "%b",
 246        "MM": "%m",
 247        "mm": "%m",
 248        "DD": "%d",
 249        "dd": "%-d",
 250        "DY": "%a",
 251        "dy": "%w",
 252        "HH24": "%H",
 253        "hh24": "%H",
 254        "HH12": "%I",
 255        "hh12": "%I",
 256        "MI": "%M",
 257        "mi": "%M",
 258        "SS": "%S",
 259        "ss": "%S",
 260        "FF": "%f",
 261        "ff": "%f",
 262        "FF6": "%f",
 263        "ff6": "%f",
 264    }
 265
 266    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 267        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 268        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 269        if (
 270            isinstance(expression, exp.Identifier)
 271            and isinstance(expression.parent, exp.Table)
 272            and expression.name.lower() == "dual"
 273        ):
 274            return expression  # type: ignore
 275
 276        return super().quote_identifier(expression, identify=identify)
 277
 278    class Parser(parser.Parser):
 279        IDENTIFY_PIVOT_STRINGS = True
 280        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 281        COLON_IS_VARIANT_EXTRACT = True
 282
 283        ID_VAR_TOKENS = {
 284            *parser.Parser.ID_VAR_TOKENS,
 285            TokenType.MATCH_CONDITION,
 286        }
 287
 288        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 289        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 290
 291        FUNCTIONS = {
 292            **parser.Parser.FUNCTIONS,
 293            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 294            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 295            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 296            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 297                this=seq_get(args, 1), expression=seq_get(args, 0)
 298            ),
 299            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 300                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 301                start=seq_get(args, 0),
 302                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 303                step=seq_get(args, 2),
 304            ),
 305            "BITXOR": binary_from_function(exp.BitwiseXor),
 306            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 307            "BOOLXOR": binary_from_function(exp.Xor),
 308            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 309            "DATE_TRUNC": _date_trunc_to_time,
 310            "DATEADD": _build_date_time_add(exp.DateAdd),
 311            "DATEDIFF": _build_datediff,
 312            "DIV0": _build_if_from_div0,
 313            "FLATTEN": exp.Explode.from_arg_list,
 314            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 315                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 316            ),
 317            "IFF": exp.If.from_arg_list,
 318            "LAST_DAY": lambda args: exp.LastDay(
 319                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 320            ),
 321            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 322            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 323            "LISTAGG": exp.GroupConcat.from_arg_list,
 324            "MEDIAN": lambda args: exp.PercentileCont(
 325                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 326            ),
 327            "NULLIFZERO": _build_if_from_nullifzero,
 328            "OBJECT_CONSTRUCT": _build_object_construct,
 329            "REGEXP_REPLACE": _build_regexp_replace,
 330            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 331            "RLIKE": exp.RegexpLike.from_arg_list,
 332            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 333            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 334            "TIMEDIFF": _build_datediff,
 335            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 336            "TIMESTAMPDIFF": _build_datediff,
 337            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 338            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 339            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 340            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 341            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 342            "TO_NUMBER": lambda args: exp.ToNumber(
 343                this=seq_get(args, 0),
 344                format=seq_get(args, 1),
 345                precision=seq_get(args, 2),
 346                scale=seq_get(args, 3),
 347            ),
 348            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 349            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 350            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 351            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 352            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 353            "TO_VARCHAR": exp.ToChar.from_arg_list,
 354            "ZEROIFNULL": _build_if_from_zeroifnull,
 355        }
 356
 357        FUNCTION_PARSERS = {
 358            **parser.Parser.FUNCTION_PARSERS,
 359            "DATE_PART": lambda self: self._parse_date_part(),
 360            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 361        }
 362        FUNCTION_PARSERS.pop("TRIM")
 363
 364        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 365
 366        RANGE_PARSERS = {
 367            **parser.Parser.RANGE_PARSERS,
 368            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 369            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 370        }
 371
 372        ALTER_PARSERS = {
 373            **parser.Parser.ALTER_PARSERS,
 374            "UNSET": lambda self: self.expression(
 375                exp.Set,
 376                tag=self._match_text_seq("TAG"),
 377                expressions=self._parse_csv(self._parse_id_var),
 378                unset=True,
 379            ),
 380            "SWAP": lambda self: self._parse_alter_table_swap(),
 381        }
 382
 383        STATEMENT_PARSERS = {
 384            **parser.Parser.STATEMENT_PARSERS,
 385            TokenType.SHOW: lambda self: self._parse_show(),
 386        }
 387
 388        PROPERTY_PARSERS = {
 389            **parser.Parser.PROPERTY_PARSERS,
 390            "LOCATION": lambda self: self._parse_location_property(),
 391        }
 392
 393        TYPE_CONVERTERS = {
 394            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 395            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 396        }
 397
 398        SHOW_PARSERS = {
 399            "SCHEMAS": _show_parser("SCHEMAS"),
 400            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 401            "OBJECTS": _show_parser("OBJECTS"),
 402            "TERSE OBJECTS": _show_parser("OBJECTS"),
 403            "TABLES": _show_parser("TABLES"),
 404            "TERSE TABLES": _show_parser("TABLES"),
 405            "VIEWS": _show_parser("VIEWS"),
 406            "TERSE VIEWS": _show_parser("VIEWS"),
 407            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 408            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 409            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 410            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 411            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 412            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 413            "SEQUENCES": _show_parser("SEQUENCES"),
 414            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 415            "COLUMNS": _show_parser("COLUMNS"),
 416            "USERS": _show_parser("USERS"),
 417            "TERSE USERS": _show_parser("USERS"),
 418        }
 419
 420        CONSTRAINT_PARSERS = {
 421            **parser.Parser.CONSTRAINT_PARSERS,
 422            "WITH": lambda self: self._parse_with_constraint(),
 423            "MASKING": lambda self: self._parse_with_constraint(),
 424            "PROJECTION": lambda self: self._parse_with_constraint(),
 425            "TAG": lambda self: self._parse_with_constraint(),
 426        }
 427
 428        STAGED_FILE_SINGLE_TOKENS = {
 429            TokenType.DOT,
 430            TokenType.MOD,
 431            TokenType.SLASH,
 432        }
 433
 434        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 435
 436        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 437
 438        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 439
 440        LAMBDAS = {
 441            **parser.Parser.LAMBDAS,
 442            TokenType.ARROW: lambda self, expressions: self.expression(
 443                exp.Lambda,
 444                this=self._replace_lambda(
 445                    self._parse_assignment(),
 446                    expressions,
 447                ),
 448                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 449            ),
 450        }
 451
 452        def _negate_range(
 453            self, this: t.Optional[exp.Expression] = None
 454        ) -> t.Optional[exp.Expression]:
 455            if not this:
 456                return this
 457
 458            query = this.args.get("query")
 459            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 460                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 461                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 462                # which can produce different results (most likely a SnowFlake bug).
 463                #
 464                # https://docs.snowflake.com/en/sql-reference/functions/in
 465                # Context: https://github.com/tobymao/sqlglot/issues/3890
 466                return self.expression(
 467                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 468                )
 469
 470            return self.expression(exp.Not, this=this)
 471
 472        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 473            if self._prev.token_type != TokenType.WITH:
 474                self._retreat(self._index - 1)
 475
 476            if self._match_text_seq("MASKING", "POLICY"):
 477                policy = self._parse_column()
 478                return self.expression(
 479                    exp.MaskingPolicyColumnConstraint,
 480                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 481                    expressions=self._match(TokenType.USING)
 482                    and self._parse_wrapped_csv(self._parse_id_var),
 483                )
 484            if self._match_text_seq("PROJECTION", "POLICY"):
 485                policy = self._parse_column()
 486                return self.expression(
 487                    exp.ProjectionPolicyColumnConstraint,
 488                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 489                )
 490            if self._match(TokenType.TAG):
 491                return self.expression(
 492                    exp.TagColumnConstraint,
 493                    expressions=self._parse_wrapped_csv(self._parse_property),
 494                )
 495
 496            return None
 497
 498        def _parse_create(self) -> exp.Create | exp.Command:
 499            expression = super()._parse_create()
 500            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 501                # Replace the Table node with the enclosed Identifier
 502                expression.this.replace(expression.this.this)
 503
 504            return expression
 505
 506        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 507        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 508        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 509            this = self._parse_var() or self._parse_type()
 510
 511            if not this:
 512                return None
 513
 514            self._match(TokenType.COMMA)
 515            expression = self._parse_bitwise()
 516            this = map_date_part(this)
 517            name = this.name.upper()
 518
 519            if name.startswith("EPOCH"):
 520                if name == "EPOCH_MILLISECOND":
 521                    scale = 10**3
 522                elif name == "EPOCH_MICROSECOND":
 523                    scale = 10**6
 524                elif name == "EPOCH_NANOSECOND":
 525                    scale = 10**9
 526                else:
 527                    scale = None
 528
 529                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 530                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 531
 532                if scale:
 533                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 534
 535                return to_unix
 536
 537            return self.expression(exp.Extract, this=this, expression=expression)
 538
 539        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 540            if is_map:
 541                # Keys are strings in Snowflake's objects, see also:
 542                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 543                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 544                return self._parse_slice(self._parse_string())
 545
 546            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 547
 548        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 549            lateral = super()._parse_lateral()
 550            if not lateral:
 551                return lateral
 552
 553            if isinstance(lateral.this, exp.Explode):
 554                table_alias = lateral.args.get("alias")
 555                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 556                if table_alias and not table_alias.args.get("columns"):
 557                    table_alias.set("columns", columns)
 558                elif not table_alias:
 559                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 560
 561            return lateral
 562
 563        def _parse_table_parts(
 564            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 565        ) -> exp.Table:
 566            # https://docs.snowflake.com/en/user-guide/querying-stage
 567            if self._match(TokenType.STRING, advance=False):
 568                table = self._parse_string()
 569            elif self._match_text_seq("@", advance=False):
 570                table = self._parse_location_path()
 571            else:
 572                table = None
 573
 574            if table:
 575                file_format = None
 576                pattern = None
 577
 578                wrapped = self._match(TokenType.L_PAREN)
 579                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 580                    if self._match_text_seq("FILE_FORMAT", "=>"):
 581                        file_format = self._parse_string() or super()._parse_table_parts(
 582                            is_db_reference=is_db_reference
 583                        )
 584                    elif self._match_text_seq("PATTERN", "=>"):
 585                        pattern = self._parse_string()
 586                    else:
 587                        break
 588
 589                    self._match(TokenType.COMMA)
 590
 591                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 592            else:
 593                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 594
 595            return table
 596
 597        def _parse_id_var(
 598            self,
 599            any_token: bool = True,
 600            tokens: t.Optional[t.Collection[TokenType]] = None,
 601        ) -> t.Optional[exp.Expression]:
 602            if self._match_text_seq("IDENTIFIER", "("):
 603                identifier = (
 604                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 605                    or self._parse_string()
 606                )
 607                self._match_r_paren()
 608                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 609
 610            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 611
 612        def _parse_show_snowflake(self, this: str) -> exp.Show:
 613            scope = None
 614            scope_kind = None
 615
 616            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 617            # which is syntactically valid but has no effect on the output
 618            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 619
 620            history = self._match_text_seq("HISTORY")
 621
 622            like = self._parse_string() if self._match(TokenType.LIKE) else None
 623
 624            if self._match(TokenType.IN):
 625                if self._match_text_seq("ACCOUNT"):
 626                    scope_kind = "ACCOUNT"
 627                elif self._match_set(self.DB_CREATABLES):
 628                    scope_kind = self._prev.text.upper()
 629                    if self._curr:
 630                        scope = self._parse_table_parts()
 631                elif self._curr:
 632                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 633                    scope = self._parse_table_parts()
 634
 635            return self.expression(
 636                exp.Show,
 637                **{
 638                    "terse": terse,
 639                    "this": this,
 640                    "history": history,
 641                    "like": like,
 642                    "scope": scope,
 643                    "scope_kind": scope_kind,
 644                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 645                    "limit": self._parse_limit(),
 646                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 647                },
 648            )
 649
 650        def _parse_alter_table_swap(self) -> exp.SwapTable:
 651            self._match_text_seq("WITH")
 652            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 653
 654        def _parse_location_property(self) -> exp.LocationProperty:
 655            self._match(TokenType.EQ)
 656            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 657
 658        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 659            # Parse either a subquery or a staged file
 660            return (
 661                self._parse_select(table=True, parse_subquery_alias=False)
 662                if self._match(TokenType.L_PAREN, advance=False)
 663                else self._parse_table_parts()
 664            )
 665
 666        def _parse_location_path(self) -> exp.Var:
 667            parts = [self._advance_any(ignore_reserved=True)]
 668
 669            # We avoid consuming a comma token because external tables like @foo and @bar
 670            # can be joined in a query with a comma separator, as well as closing paren
 671            # in case of subqueries
 672            while self._is_connected() and not self._match_set(
 673                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 674            ):
 675                parts.append(self._advance_any(ignore_reserved=True))
 676
 677            return exp.var("".join(part.text for part in parts if part))
 678
 679        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 680            this = super()._parse_lambda_arg()
 681
 682            if not this:
 683                return this
 684
 685            typ = self._parse_types()
 686
 687            if typ:
 688                return self.expression(exp.Cast, this=this, to=typ)
 689
 690            return this
 691
 692    class Tokenizer(tokens.Tokenizer):
 693        STRING_ESCAPES = ["\\", "'"]
 694        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 695        RAW_STRINGS = ["$$"]
 696        COMMENTS = ["--", "//", ("/*", "*/")]
 697        NESTED_COMMENTS = False
 698
 699        KEYWORDS = {
 700            **tokens.Tokenizer.KEYWORDS,
 701            "BYTEINT": TokenType.INT,
 702            "CHAR VARYING": TokenType.VARCHAR,
 703            "CHARACTER VARYING": TokenType.VARCHAR,
 704            "EXCLUDE": TokenType.EXCEPT,
 705            "ILIKE ANY": TokenType.ILIKE_ANY,
 706            "LIKE ANY": TokenType.LIKE_ANY,
 707            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 708            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 709            "MINUS": TokenType.EXCEPT,
 710            "NCHAR VARYING": TokenType.VARCHAR,
 711            "PUT": TokenType.COMMAND,
 712            "REMOVE": TokenType.COMMAND,
 713            "RM": TokenType.COMMAND,
 714            "SAMPLE": TokenType.TABLE_SAMPLE,
 715            "SQL_DOUBLE": TokenType.DOUBLE,
 716            "SQL_VARCHAR": TokenType.VARCHAR,
 717            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 718            "TAG": TokenType.TAG,
 719            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 720            "TOP": TokenType.TOP,
 721            "WAREHOUSE": TokenType.WAREHOUSE,
 722            "STREAMLIT": TokenType.STREAMLIT,
 723        }
 724        KEYWORDS.pop("/*+")
 725
 726        SINGLE_TOKENS = {
 727            **tokens.Tokenizer.SINGLE_TOKENS,
 728            "$": TokenType.PARAMETER,
 729        }
 730
 731        VAR_SINGLE_TOKENS = {"$"}
 732
 733        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 734
 735    class Generator(generator.Generator):
 736        PARAMETER_TOKEN = "$"
 737        MATCHED_BY_SOURCE = False
 738        SINGLE_STRING_INTERVAL = True
 739        JOIN_HINTS = False
 740        TABLE_HINTS = False
 741        QUERY_HINTS = False
 742        AGGREGATE_FILTER_SUPPORTED = False
 743        SUPPORTS_TABLE_COPY = False
 744        COLLATE_IS_FUNC = True
 745        LIMIT_ONLY_LITERALS = True
 746        JSON_KEY_VALUE_PAIR_SEP = ","
 747        INSERT_OVERWRITE = " OVERWRITE INTO"
 748        STRUCT_DELIMITER = ("(", ")")
 749        COPY_PARAMS_ARE_WRAPPED = False
 750        COPY_PARAMS_EQ_REQUIRED = True
 751        STAR_EXCEPT = "EXCLUDE"
 752        SUPPORTS_EXPLODING_PROJECTIONS = False
 753        ARRAY_CONCAT_IS_VAR_LEN = False
 754        SUPPORTS_CONVERT_TIMEZONE = True
 755
 756        TRANSFORMS = {
 757            **generator.Generator.TRANSFORMS,
 758            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 759            exp.ArgMax: rename_func("MAX_BY"),
 760            exp.ArgMin: rename_func("MIN_BY"),
 761            exp.Array: inline_array_sql,
 762            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 763            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 764            exp.AtTimeZone: lambda self, e: self.func(
 765                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 766            ),
 767            exp.BitwiseXor: rename_func("BITXOR"),
 768            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 769            exp.DateAdd: date_delta_sql("DATEADD"),
 770            exp.DateDiff: date_delta_sql("DATEDIFF"),
 771            exp.DateStrToDate: datestrtodate_sql,
 772            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 773            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 774            exp.DayOfYear: rename_func("DAYOFYEAR"),
 775            exp.Explode: rename_func("FLATTEN"),
 776            exp.Extract: rename_func("DATE_PART"),
 777            exp.FromTimeZone: lambda self, e: self.func(
 778                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 779            ),
 780            exp.GenerateSeries: lambda self, e: self.func(
 781                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 782            ),
 783            exp.GroupConcat: rename_func("LISTAGG"),
 784            exp.If: if_sql(name="IFF", false_value="NULL"),
 785            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 786            exp.JSONExtractScalar: lambda self, e: self.func(
 787                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 788            ),
 789            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 790            exp.JSONPathRoot: lambda *_: "",
 791            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 792            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 793            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 794            exp.Max: max_or_greatest,
 795            exp.Min: min_or_least,
 796            exp.ParseJSON: lambda self, e: self.func(
 797                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 798            ),
 799            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 800            exp.PercentileCont: transforms.preprocess(
 801                [transforms.add_within_group_for_percentiles]
 802            ),
 803            exp.PercentileDisc: transforms.preprocess(
 804                [transforms.add_within_group_for_percentiles]
 805            ),
 806            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 807            exp.RegexpILike: _regexpilike_sql,
 808            exp.Rand: rename_func("RANDOM"),
 809            exp.Select: transforms.preprocess(
 810                [
 811                    transforms.eliminate_distinct_on,
 812                    transforms.explode_to_unnest(),
 813                    transforms.eliminate_semi_and_anti_joins,
 814                    _unnest_generate_date_array,
 815                ]
 816            ),
 817            exp.SHA: rename_func("SHA1"),
 818            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 819            exp.StartsWith: rename_func("STARTSWITH"),
 820            exp.StrPosition: lambda self, e: self.func(
 821                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 822            ),
 823            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 824            exp.Stuff: rename_func("INSERT"),
 825            exp.TimeAdd: date_delta_sql("TIMEADD"),
 826            exp.TimestampDiff: lambda self, e: self.func(
 827                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 828            ),
 829            exp.TimestampTrunc: timestamptrunc_sql(),
 830            exp.TimeStrToTime: timestrtotime_sql,
 831            exp.TimeToStr: lambda self, e: self.func(
 832                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 833            ),
 834            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 835            exp.ToArray: rename_func("TO_ARRAY"),
 836            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 837            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 838            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 839            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 840            exp.TsOrDsToDate: lambda self, e: self.func(
 841                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 842            ),
 843            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 844            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 845            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 846            exp.Xor: rename_func("BOOLXOR"),
 847        }
 848
 849        SUPPORTED_JSON_PATH_PARTS = {
 850            exp.JSONPathKey,
 851            exp.JSONPathRoot,
 852            exp.JSONPathSubscript,
 853        }
 854
 855        TYPE_MAPPING = {
 856            **generator.Generator.TYPE_MAPPING,
 857            exp.DataType.Type.NESTED: "OBJECT",
 858            exp.DataType.Type.STRUCT: "OBJECT",
 859        }
 860
 861        PROPERTIES_LOCATION = {
 862            **generator.Generator.PROPERTIES_LOCATION,
 863            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 864            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 865        }
 866
 867        UNSUPPORTED_VALUES_EXPRESSIONS = {
 868            exp.Map,
 869            exp.StarMap,
 870            exp.Struct,
 871            exp.VarMap,
 872        }
 873
 874        def with_properties(self, properties: exp.Properties) -> str:
 875            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 876
 877        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 878            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 879                values_as_table = False
 880
 881            return super().values_sql(expression, values_as_table=values_as_table)
 882
 883        def datatype_sql(self, expression: exp.DataType) -> str:
 884            expressions = expression.expressions
 885            if (
 886                expressions
 887                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 888                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 889            ):
 890                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 891                return "OBJECT"
 892
 893            return super().datatype_sql(expression)
 894
 895        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 896            return self.func(
 897                "TO_NUMBER",
 898                expression.this,
 899                expression.args.get("format"),
 900                expression.args.get("precision"),
 901                expression.args.get("scale"),
 902            )
 903
 904        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 905            milli = expression.args.get("milli")
 906            if milli is not None:
 907                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 908                expression.set("nano", milli_to_nano)
 909
 910            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 911
 912        def trycast_sql(self, expression: exp.TryCast) -> str:
 913            value = expression.this
 914
 915            if value.type is None:
 916                from sqlglot.optimizer.annotate_types import annotate_types
 917
 918                value = annotate_types(value)
 919
 920            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 921                return super().trycast_sql(expression)
 922
 923            # TRY_CAST only works for string values in Snowflake
 924            return self.cast_sql(expression)
 925
 926        def log_sql(self, expression: exp.Log) -> str:
 927            if not expression.expression:
 928                return self.func("LN", expression.this)
 929
 930            return super().log_sql(expression)
 931
 932        def unnest_sql(self, expression: exp.Unnest) -> str:
 933            unnest_alias = expression.args.get("alias")
 934            offset = expression.args.get("offset")
 935
 936            columns = [
 937                exp.to_identifier("seq"),
 938                exp.to_identifier("key"),
 939                exp.to_identifier("path"),
 940                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 941                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 942                or exp.to_identifier("value"),
 943                exp.to_identifier("this"),
 944            ]
 945
 946            if unnest_alias:
 947                unnest_alias.set("columns", columns)
 948            else:
 949                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 950
 951            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 952            alias = self.sql(unnest_alias)
 953            alias = f" AS {alias}" if alias else ""
 954            return f"{explode}{alias}"
 955
 956        def show_sql(self, expression: exp.Show) -> str:
 957            terse = "TERSE " if expression.args.get("terse") else ""
 958            history = " HISTORY" if expression.args.get("history") else ""
 959            like = self.sql(expression, "like")
 960            like = f" LIKE {like}" if like else ""
 961
 962            scope = self.sql(expression, "scope")
 963            scope = f" {scope}" if scope else ""
 964
 965            scope_kind = self.sql(expression, "scope_kind")
 966            if scope_kind:
 967                scope_kind = f" IN {scope_kind}"
 968
 969            starts_with = self.sql(expression, "starts_with")
 970            if starts_with:
 971                starts_with = f" STARTS WITH {starts_with}"
 972
 973            limit = self.sql(expression, "limit")
 974
 975            from_ = self.sql(expression, "from")
 976            if from_:
 977                from_ = f" FROM {from_}"
 978
 979            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 980
 981        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 982            # Other dialects don't support all of the following parameters, so we need to
 983            # generate default values as necessary to ensure the transpilation is correct
 984            group = expression.args.get("group")
 985            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 986            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 987            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 988
 989            return self.func(
 990                "REGEXP_SUBSTR",
 991                expression.this,
 992                expression.expression,
 993                position,
 994                occurrence,
 995                parameters,
 996                group,
 997            )
 998
 999        def except_op(self, expression: exp.Except) -> str:
1000            if not expression.args.get("distinct"):
1001                self.unsupported("EXCEPT with All is not supported in Snowflake")
1002            return super().except_op(expression)
1003
1004        def intersect_op(self, expression: exp.Intersect) -> str:
1005            if not expression.args.get("distinct"):
1006                self.unsupported("INTERSECT with All is not supported in Snowflake")
1007            return super().intersect_op(expression)
1008
1009        def describe_sql(self, expression: exp.Describe) -> str:
1010            # Default to table if kind is unknown
1011            kind_value = expression.args.get("kind") or "TABLE"
1012            kind = f" {kind_value}" if kind_value else ""
1013            this = f" {self.sql(expression, 'this')}"
1014            expressions = self.expressions(expression, flat=True)
1015            expressions = f" {expressions}" if expressions else ""
1016            return f"DESCRIBE{kind}{this}{expressions}"
1017
1018        def generatedasidentitycolumnconstraint_sql(
1019            self, expression: exp.GeneratedAsIdentityColumnConstraint
1020        ) -> str:
1021            start = expression.args.get("start")
1022            start = f" START {start}" if start else ""
1023            increment = expression.args.get("increment")
1024            increment = f" INCREMENT {increment}" if increment else ""
1025            return f"AUTOINCREMENT{start}{increment}"
1026
1027        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1028            this = self.sql(expression, "this")
1029            return f"SWAP WITH {this}"
1030
1031        def cluster_sql(self, expression: exp.Cluster) -> str:
1032            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1033
1034        def struct_sql(self, expression: exp.Struct) -> str:
1035            keys = []
1036            values = []
1037
1038            for i, e in enumerate(expression.expressions):
1039                if isinstance(e, exp.PropertyEQ):
1040                    keys.append(
1041                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1042                    )
1043                    values.append(e.expression)
1044                else:
1045                    keys.append(exp.Literal.string(f"_{i}"))
1046                    values.append(e)
1047
1048            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1049
1050        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1051            if expression.args.get("weight") or expression.args.get("accuracy"):
1052                self.unsupported(
1053                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1054                )
1055
1056            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1057
1058        def alterset_sql(self, expression: exp.AlterSet) -> str:
1059            exprs = self.expressions(expression, flat=True)
1060            exprs = f" {exprs}" if exprs else ""
1061            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1062            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1063            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1064            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1065            tag = self.expressions(expression, key="tag", flat=True)
1066            tag = f" TAG {tag}" if tag else ""
1067
1068            return f"SET{exprs}{file_format}{copy_options}{tag}"
class Snowflake(sqlglot.dialects.dialect.Dialect):
 227class Snowflake(Dialect):
 228    # https://docs.snowflake.com/en/sql-reference/identifiers-syntax
 229    NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
 230    NULL_ORDERING = "nulls_are_large"
 231    TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
 232    SUPPORTS_USER_DEFINED_TYPES = False
 233    SUPPORTS_SEMI_ANTI_JOIN = False
 234    PREFER_CTE_ALIAS_COLUMN = True
 235    TABLESAMPLE_SIZE_IS_PERCENT = True
 236    COPY_PARAMS_ARE_CSV = False
 237
 238    TIME_MAPPING = {
 239        "YYYY": "%Y",
 240        "yyyy": "%Y",
 241        "YY": "%y",
 242        "yy": "%y",
 243        "MMMM": "%B",
 244        "mmmm": "%B",
 245        "MON": "%b",
 246        "mon": "%b",
 247        "MM": "%m",
 248        "mm": "%m",
 249        "DD": "%d",
 250        "dd": "%-d",
 251        "DY": "%a",
 252        "dy": "%w",
 253        "HH24": "%H",
 254        "hh24": "%H",
 255        "HH12": "%I",
 256        "hh12": "%I",
 257        "MI": "%M",
 258        "mi": "%M",
 259        "SS": "%S",
 260        "ss": "%S",
 261        "FF": "%f",
 262        "ff": "%f",
 263        "FF6": "%f",
 264        "ff6": "%f",
 265    }
 266
 267    def quote_identifier(self, expression: E, identify: bool = True) -> E:
 268        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
 269        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
 270        if (
 271            isinstance(expression, exp.Identifier)
 272            and isinstance(expression.parent, exp.Table)
 273            and expression.name.lower() == "dual"
 274        ):
 275            return expression  # type: ignore
 276
 277        return super().quote_identifier(expression, identify=identify)
 278
 279    class Parser(parser.Parser):
 280        IDENTIFY_PIVOT_STRINGS = True
 281        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
 282        COLON_IS_VARIANT_EXTRACT = True
 283
 284        ID_VAR_TOKENS = {
 285            *parser.Parser.ID_VAR_TOKENS,
 286            TokenType.MATCH_CONDITION,
 287        }
 288
 289        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
 290        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
 291
 292        FUNCTIONS = {
 293            **parser.Parser.FUNCTIONS,
 294            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
 295            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
 296            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
 297            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
 298                this=seq_get(args, 1), expression=seq_get(args, 0)
 299            ),
 300            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
 301                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
 302                start=seq_get(args, 0),
 303                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
 304                step=seq_get(args, 2),
 305            ),
 306            "BITXOR": binary_from_function(exp.BitwiseXor),
 307            "BIT_XOR": binary_from_function(exp.BitwiseXor),
 308            "BOOLXOR": binary_from_function(exp.Xor),
 309            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
 310            "DATE_TRUNC": _date_trunc_to_time,
 311            "DATEADD": _build_date_time_add(exp.DateAdd),
 312            "DATEDIFF": _build_datediff,
 313            "DIV0": _build_if_from_div0,
 314            "FLATTEN": exp.Explode.from_arg_list,
 315            "GET_PATH": lambda args, dialect: exp.JSONExtract(
 316                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
 317            ),
 318            "IFF": exp.If.from_arg_list,
 319            "LAST_DAY": lambda args: exp.LastDay(
 320                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
 321            ),
 322            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 323            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
 324            "LISTAGG": exp.GroupConcat.from_arg_list,
 325            "MEDIAN": lambda args: exp.PercentileCont(
 326                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
 327            ),
 328            "NULLIFZERO": _build_if_from_nullifzero,
 329            "OBJECT_CONSTRUCT": _build_object_construct,
 330            "REGEXP_REPLACE": _build_regexp_replace,
 331            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
 332            "RLIKE": exp.RegexpLike.from_arg_list,
 333            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
 334            "TIMEADD": _build_date_time_add(exp.TimeAdd),
 335            "TIMEDIFF": _build_datediff,
 336            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
 337            "TIMESTAMPDIFF": _build_datediff,
 338            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
 339            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
 340            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
 341            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
 342            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
 343            "TO_NUMBER": lambda args: exp.ToNumber(
 344                this=seq_get(args, 0),
 345                format=seq_get(args, 1),
 346                precision=seq_get(args, 2),
 347                scale=seq_get(args, 3),
 348            ),
 349            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
 350            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
 351            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
 352            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
 353            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
 354            "TO_VARCHAR": exp.ToChar.from_arg_list,
 355            "ZEROIFNULL": _build_if_from_zeroifnull,
 356        }
 357
 358        FUNCTION_PARSERS = {
 359            **parser.Parser.FUNCTION_PARSERS,
 360            "DATE_PART": lambda self: self._parse_date_part(),
 361            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
 362        }
 363        FUNCTION_PARSERS.pop("TRIM")
 364
 365        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
 366
 367        RANGE_PARSERS = {
 368            **parser.Parser.RANGE_PARSERS,
 369            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
 370            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
 371        }
 372
 373        ALTER_PARSERS = {
 374            **parser.Parser.ALTER_PARSERS,
 375            "UNSET": lambda self: self.expression(
 376                exp.Set,
 377                tag=self._match_text_seq("TAG"),
 378                expressions=self._parse_csv(self._parse_id_var),
 379                unset=True,
 380            ),
 381            "SWAP": lambda self: self._parse_alter_table_swap(),
 382        }
 383
 384        STATEMENT_PARSERS = {
 385            **parser.Parser.STATEMENT_PARSERS,
 386            TokenType.SHOW: lambda self: self._parse_show(),
 387        }
 388
 389        PROPERTY_PARSERS = {
 390            **parser.Parser.PROPERTY_PARSERS,
 391            "LOCATION": lambda self: self._parse_location_property(),
 392        }
 393
 394        TYPE_CONVERTERS = {
 395            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
 396            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
 397        }
 398
 399        SHOW_PARSERS = {
 400            "SCHEMAS": _show_parser("SCHEMAS"),
 401            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
 402            "OBJECTS": _show_parser("OBJECTS"),
 403            "TERSE OBJECTS": _show_parser("OBJECTS"),
 404            "TABLES": _show_parser("TABLES"),
 405            "TERSE TABLES": _show_parser("TABLES"),
 406            "VIEWS": _show_parser("VIEWS"),
 407            "TERSE VIEWS": _show_parser("VIEWS"),
 408            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 409            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
 410            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 411            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
 412            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 413            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
 414            "SEQUENCES": _show_parser("SEQUENCES"),
 415            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
 416            "COLUMNS": _show_parser("COLUMNS"),
 417            "USERS": _show_parser("USERS"),
 418            "TERSE USERS": _show_parser("USERS"),
 419        }
 420
 421        CONSTRAINT_PARSERS = {
 422            **parser.Parser.CONSTRAINT_PARSERS,
 423            "WITH": lambda self: self._parse_with_constraint(),
 424            "MASKING": lambda self: self._parse_with_constraint(),
 425            "PROJECTION": lambda self: self._parse_with_constraint(),
 426            "TAG": lambda self: self._parse_with_constraint(),
 427        }
 428
 429        STAGED_FILE_SINGLE_TOKENS = {
 430            TokenType.DOT,
 431            TokenType.MOD,
 432            TokenType.SLASH,
 433        }
 434
 435        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
 436
 437        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
 438
 439        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
 440
 441        LAMBDAS = {
 442            **parser.Parser.LAMBDAS,
 443            TokenType.ARROW: lambda self, expressions: self.expression(
 444                exp.Lambda,
 445                this=self._replace_lambda(
 446                    self._parse_assignment(),
 447                    expressions,
 448                ),
 449                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
 450            ),
 451        }
 452
 453        def _negate_range(
 454            self, this: t.Optional[exp.Expression] = None
 455        ) -> t.Optional[exp.Expression]:
 456            if not this:
 457                return this
 458
 459            query = this.args.get("query")
 460            if isinstance(this, exp.In) and isinstance(query, exp.Query):
 461                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
 462                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
 463                # which can produce different results (most likely a SnowFlake bug).
 464                #
 465                # https://docs.snowflake.com/en/sql-reference/functions/in
 466                # Context: https://github.com/tobymao/sqlglot/issues/3890
 467                return self.expression(
 468                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
 469                )
 470
 471            return self.expression(exp.Not, this=this)
 472
 473        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
 474            if self._prev.token_type != TokenType.WITH:
 475                self._retreat(self._index - 1)
 476
 477            if self._match_text_seq("MASKING", "POLICY"):
 478                policy = self._parse_column()
 479                return self.expression(
 480                    exp.MaskingPolicyColumnConstraint,
 481                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 482                    expressions=self._match(TokenType.USING)
 483                    and self._parse_wrapped_csv(self._parse_id_var),
 484                )
 485            if self._match_text_seq("PROJECTION", "POLICY"):
 486                policy = self._parse_column()
 487                return self.expression(
 488                    exp.ProjectionPolicyColumnConstraint,
 489                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
 490                )
 491            if self._match(TokenType.TAG):
 492                return self.expression(
 493                    exp.TagColumnConstraint,
 494                    expressions=self._parse_wrapped_csv(self._parse_property),
 495                )
 496
 497            return None
 498
 499        def _parse_create(self) -> exp.Create | exp.Command:
 500            expression = super()._parse_create()
 501            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
 502                # Replace the Table node with the enclosed Identifier
 503                expression.this.replace(expression.this.this)
 504
 505            return expression
 506
 507        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
 508        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
 509        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
 510            this = self._parse_var() or self._parse_type()
 511
 512            if not this:
 513                return None
 514
 515            self._match(TokenType.COMMA)
 516            expression = self._parse_bitwise()
 517            this = map_date_part(this)
 518            name = this.name.upper()
 519
 520            if name.startswith("EPOCH"):
 521                if name == "EPOCH_MILLISECOND":
 522                    scale = 10**3
 523                elif name == "EPOCH_MICROSECOND":
 524                    scale = 10**6
 525                elif name == "EPOCH_NANOSECOND":
 526                    scale = 10**9
 527                else:
 528                    scale = None
 529
 530                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
 531                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
 532
 533                if scale:
 534                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
 535
 536                return to_unix
 537
 538            return self.expression(exp.Extract, this=this, expression=expression)
 539
 540        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
 541            if is_map:
 542                # Keys are strings in Snowflake's objects, see also:
 543                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
 544                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
 545                return self._parse_slice(self._parse_string())
 546
 547            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
 548
 549        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
 550            lateral = super()._parse_lateral()
 551            if not lateral:
 552                return lateral
 553
 554            if isinstance(lateral.this, exp.Explode):
 555                table_alias = lateral.args.get("alias")
 556                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
 557                if table_alias and not table_alias.args.get("columns"):
 558                    table_alias.set("columns", columns)
 559                elif not table_alias:
 560                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
 561
 562            return lateral
 563
 564        def _parse_table_parts(
 565            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
 566        ) -> exp.Table:
 567            # https://docs.snowflake.com/en/user-guide/querying-stage
 568            if self._match(TokenType.STRING, advance=False):
 569                table = self._parse_string()
 570            elif self._match_text_seq("@", advance=False):
 571                table = self._parse_location_path()
 572            else:
 573                table = None
 574
 575            if table:
 576                file_format = None
 577                pattern = None
 578
 579                wrapped = self._match(TokenType.L_PAREN)
 580                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
 581                    if self._match_text_seq("FILE_FORMAT", "=>"):
 582                        file_format = self._parse_string() or super()._parse_table_parts(
 583                            is_db_reference=is_db_reference
 584                        )
 585                    elif self._match_text_seq("PATTERN", "=>"):
 586                        pattern = self._parse_string()
 587                    else:
 588                        break
 589
 590                    self._match(TokenType.COMMA)
 591
 592                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
 593            else:
 594                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
 595
 596            return table
 597
 598        def _parse_id_var(
 599            self,
 600            any_token: bool = True,
 601            tokens: t.Optional[t.Collection[TokenType]] = None,
 602        ) -> t.Optional[exp.Expression]:
 603            if self._match_text_seq("IDENTIFIER", "("):
 604                identifier = (
 605                    super()._parse_id_var(any_token=any_token, tokens=tokens)
 606                    or self._parse_string()
 607                )
 608                self._match_r_paren()
 609                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
 610
 611            return super()._parse_id_var(any_token=any_token, tokens=tokens)
 612
 613        def _parse_show_snowflake(self, this: str) -> exp.Show:
 614            scope = None
 615            scope_kind = None
 616
 617            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
 618            # which is syntactically valid but has no effect on the output
 619            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
 620
 621            history = self._match_text_seq("HISTORY")
 622
 623            like = self._parse_string() if self._match(TokenType.LIKE) else None
 624
 625            if self._match(TokenType.IN):
 626                if self._match_text_seq("ACCOUNT"):
 627                    scope_kind = "ACCOUNT"
 628                elif self._match_set(self.DB_CREATABLES):
 629                    scope_kind = self._prev.text.upper()
 630                    if self._curr:
 631                        scope = self._parse_table_parts()
 632                elif self._curr:
 633                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
 634                    scope = self._parse_table_parts()
 635
 636            return self.expression(
 637                exp.Show,
 638                **{
 639                    "terse": terse,
 640                    "this": this,
 641                    "history": history,
 642                    "like": like,
 643                    "scope": scope,
 644                    "scope_kind": scope_kind,
 645                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
 646                    "limit": self._parse_limit(),
 647                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
 648                },
 649            )
 650
 651        def _parse_alter_table_swap(self) -> exp.SwapTable:
 652            self._match_text_seq("WITH")
 653            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
 654
 655        def _parse_location_property(self) -> exp.LocationProperty:
 656            self._match(TokenType.EQ)
 657            return self.expression(exp.LocationProperty, this=self._parse_location_path())
 658
 659        def _parse_file_location(self) -> t.Optional[exp.Expression]:
 660            # Parse either a subquery or a staged file
 661            return (
 662                self._parse_select(table=True, parse_subquery_alias=False)
 663                if self._match(TokenType.L_PAREN, advance=False)
 664                else self._parse_table_parts()
 665            )
 666
 667        def _parse_location_path(self) -> exp.Var:
 668            parts = [self._advance_any(ignore_reserved=True)]
 669
 670            # We avoid consuming a comma token because external tables like @foo and @bar
 671            # can be joined in a query with a comma separator, as well as closing paren
 672            # in case of subqueries
 673            while self._is_connected() and not self._match_set(
 674                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
 675            ):
 676                parts.append(self._advance_any(ignore_reserved=True))
 677
 678            return exp.var("".join(part.text for part in parts if part))
 679
 680        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
 681            this = super()._parse_lambda_arg()
 682
 683            if not this:
 684                return this
 685
 686            typ = self._parse_types()
 687
 688            if typ:
 689                return self.expression(exp.Cast, this=this, to=typ)
 690
 691            return this
 692
 693    class Tokenizer(tokens.Tokenizer):
 694        STRING_ESCAPES = ["\\", "'"]
 695        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
 696        RAW_STRINGS = ["$$"]
 697        COMMENTS = ["--", "//", ("/*", "*/")]
 698        NESTED_COMMENTS = False
 699
 700        KEYWORDS = {
 701            **tokens.Tokenizer.KEYWORDS,
 702            "BYTEINT": TokenType.INT,
 703            "CHAR VARYING": TokenType.VARCHAR,
 704            "CHARACTER VARYING": TokenType.VARCHAR,
 705            "EXCLUDE": TokenType.EXCEPT,
 706            "ILIKE ANY": TokenType.ILIKE_ANY,
 707            "LIKE ANY": TokenType.LIKE_ANY,
 708            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
 709            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
 710            "MINUS": TokenType.EXCEPT,
 711            "NCHAR VARYING": TokenType.VARCHAR,
 712            "PUT": TokenType.COMMAND,
 713            "REMOVE": TokenType.COMMAND,
 714            "RM": TokenType.COMMAND,
 715            "SAMPLE": TokenType.TABLE_SAMPLE,
 716            "SQL_DOUBLE": TokenType.DOUBLE,
 717            "SQL_VARCHAR": TokenType.VARCHAR,
 718            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
 719            "TAG": TokenType.TAG,
 720            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
 721            "TOP": TokenType.TOP,
 722            "WAREHOUSE": TokenType.WAREHOUSE,
 723            "STREAMLIT": TokenType.STREAMLIT,
 724        }
 725        KEYWORDS.pop("/*+")
 726
 727        SINGLE_TOKENS = {
 728            **tokens.Tokenizer.SINGLE_TOKENS,
 729            "$": TokenType.PARAMETER,
 730        }
 731
 732        VAR_SINGLE_TOKENS = {"$"}
 733
 734        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
 735
 736    class Generator(generator.Generator):
 737        PARAMETER_TOKEN = "$"
 738        MATCHED_BY_SOURCE = False
 739        SINGLE_STRING_INTERVAL = True
 740        JOIN_HINTS = False
 741        TABLE_HINTS = False
 742        QUERY_HINTS = False
 743        AGGREGATE_FILTER_SUPPORTED = False
 744        SUPPORTS_TABLE_COPY = False
 745        COLLATE_IS_FUNC = True
 746        LIMIT_ONLY_LITERALS = True
 747        JSON_KEY_VALUE_PAIR_SEP = ","
 748        INSERT_OVERWRITE = " OVERWRITE INTO"
 749        STRUCT_DELIMITER = ("(", ")")
 750        COPY_PARAMS_ARE_WRAPPED = False
 751        COPY_PARAMS_EQ_REQUIRED = True
 752        STAR_EXCEPT = "EXCLUDE"
 753        SUPPORTS_EXPLODING_PROJECTIONS = False
 754        ARRAY_CONCAT_IS_VAR_LEN = False
 755        SUPPORTS_CONVERT_TIMEZONE = True
 756
 757        TRANSFORMS = {
 758            **generator.Generator.TRANSFORMS,
 759            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 760            exp.ArgMax: rename_func("MAX_BY"),
 761            exp.ArgMin: rename_func("MIN_BY"),
 762            exp.Array: inline_array_sql,
 763            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 764            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 765            exp.AtTimeZone: lambda self, e: self.func(
 766                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 767            ),
 768            exp.BitwiseXor: rename_func("BITXOR"),
 769            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 770            exp.DateAdd: date_delta_sql("DATEADD"),
 771            exp.DateDiff: date_delta_sql("DATEDIFF"),
 772            exp.DateStrToDate: datestrtodate_sql,
 773            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 774            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 775            exp.DayOfYear: rename_func("DAYOFYEAR"),
 776            exp.Explode: rename_func("FLATTEN"),
 777            exp.Extract: rename_func("DATE_PART"),
 778            exp.FromTimeZone: lambda self, e: self.func(
 779                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 780            ),
 781            exp.GenerateSeries: lambda self, e: self.func(
 782                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 783            ),
 784            exp.GroupConcat: rename_func("LISTAGG"),
 785            exp.If: if_sql(name="IFF", false_value="NULL"),
 786            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 787            exp.JSONExtractScalar: lambda self, e: self.func(
 788                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 789            ),
 790            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 791            exp.JSONPathRoot: lambda *_: "",
 792            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 793            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 794            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 795            exp.Max: max_or_greatest,
 796            exp.Min: min_or_least,
 797            exp.ParseJSON: lambda self, e: self.func(
 798                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 799            ),
 800            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 801            exp.PercentileCont: transforms.preprocess(
 802                [transforms.add_within_group_for_percentiles]
 803            ),
 804            exp.PercentileDisc: transforms.preprocess(
 805                [transforms.add_within_group_for_percentiles]
 806            ),
 807            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 808            exp.RegexpILike: _regexpilike_sql,
 809            exp.Rand: rename_func("RANDOM"),
 810            exp.Select: transforms.preprocess(
 811                [
 812                    transforms.eliminate_distinct_on,
 813                    transforms.explode_to_unnest(),
 814                    transforms.eliminate_semi_and_anti_joins,
 815                    _unnest_generate_date_array,
 816                ]
 817            ),
 818            exp.SHA: rename_func("SHA1"),
 819            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 820            exp.StartsWith: rename_func("STARTSWITH"),
 821            exp.StrPosition: lambda self, e: self.func(
 822                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 823            ),
 824            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 825            exp.Stuff: rename_func("INSERT"),
 826            exp.TimeAdd: date_delta_sql("TIMEADD"),
 827            exp.TimestampDiff: lambda self, e: self.func(
 828                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 829            ),
 830            exp.TimestampTrunc: timestamptrunc_sql(),
 831            exp.TimeStrToTime: timestrtotime_sql,
 832            exp.TimeToStr: lambda self, e: self.func(
 833                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 834            ),
 835            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 836            exp.ToArray: rename_func("TO_ARRAY"),
 837            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 838            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 839            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 840            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 841            exp.TsOrDsToDate: lambda self, e: self.func(
 842                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 843            ),
 844            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 845            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 846            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 847            exp.Xor: rename_func("BOOLXOR"),
 848        }
 849
 850        SUPPORTED_JSON_PATH_PARTS = {
 851            exp.JSONPathKey,
 852            exp.JSONPathRoot,
 853            exp.JSONPathSubscript,
 854        }
 855
 856        TYPE_MAPPING = {
 857            **generator.Generator.TYPE_MAPPING,
 858            exp.DataType.Type.NESTED: "OBJECT",
 859            exp.DataType.Type.STRUCT: "OBJECT",
 860        }
 861
 862        PROPERTIES_LOCATION = {
 863            **generator.Generator.PROPERTIES_LOCATION,
 864            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 865            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 866        }
 867
 868        UNSUPPORTED_VALUES_EXPRESSIONS = {
 869            exp.Map,
 870            exp.StarMap,
 871            exp.Struct,
 872            exp.VarMap,
 873        }
 874
 875        def with_properties(self, properties: exp.Properties) -> str:
 876            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 877
 878        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 879            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 880                values_as_table = False
 881
 882            return super().values_sql(expression, values_as_table=values_as_table)
 883
 884        def datatype_sql(self, expression: exp.DataType) -> str:
 885            expressions = expression.expressions
 886            if (
 887                expressions
 888                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 889                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 890            ):
 891                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 892                return "OBJECT"
 893
 894            return super().datatype_sql(expression)
 895
 896        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 897            return self.func(
 898                "TO_NUMBER",
 899                expression.this,
 900                expression.args.get("format"),
 901                expression.args.get("precision"),
 902                expression.args.get("scale"),
 903            )
 904
 905        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 906            milli = expression.args.get("milli")
 907            if milli is not None:
 908                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 909                expression.set("nano", milli_to_nano)
 910
 911            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 912
 913        def trycast_sql(self, expression: exp.TryCast) -> str:
 914            value = expression.this
 915
 916            if value.type is None:
 917                from sqlglot.optimizer.annotate_types import annotate_types
 918
 919                value = annotate_types(value)
 920
 921            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 922                return super().trycast_sql(expression)
 923
 924            # TRY_CAST only works for string values in Snowflake
 925            return self.cast_sql(expression)
 926
 927        def log_sql(self, expression: exp.Log) -> str:
 928            if not expression.expression:
 929                return self.func("LN", expression.this)
 930
 931            return super().log_sql(expression)
 932
 933        def unnest_sql(self, expression: exp.Unnest) -> str:
 934            unnest_alias = expression.args.get("alias")
 935            offset = expression.args.get("offset")
 936
 937            columns = [
 938                exp.to_identifier("seq"),
 939                exp.to_identifier("key"),
 940                exp.to_identifier("path"),
 941                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 942                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 943                or exp.to_identifier("value"),
 944                exp.to_identifier("this"),
 945            ]
 946
 947            if unnest_alias:
 948                unnest_alias.set("columns", columns)
 949            else:
 950                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 951
 952            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 953            alias = self.sql(unnest_alias)
 954            alias = f" AS {alias}" if alias else ""
 955            return f"{explode}{alias}"
 956
 957        def show_sql(self, expression: exp.Show) -> str:
 958            terse = "TERSE " if expression.args.get("terse") else ""
 959            history = " HISTORY" if expression.args.get("history") else ""
 960            like = self.sql(expression, "like")
 961            like = f" LIKE {like}" if like else ""
 962
 963            scope = self.sql(expression, "scope")
 964            scope = f" {scope}" if scope else ""
 965
 966            scope_kind = self.sql(expression, "scope_kind")
 967            if scope_kind:
 968                scope_kind = f" IN {scope_kind}"
 969
 970            starts_with = self.sql(expression, "starts_with")
 971            if starts_with:
 972                starts_with = f" STARTS WITH {starts_with}"
 973
 974            limit = self.sql(expression, "limit")
 975
 976            from_ = self.sql(expression, "from")
 977            if from_:
 978                from_ = f" FROM {from_}"
 979
 980            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 981
 982        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 983            # Other dialects don't support all of the following parameters, so we need to
 984            # generate default values as necessary to ensure the transpilation is correct
 985            group = expression.args.get("group")
 986            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 987            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 988            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 989
 990            return self.func(
 991                "REGEXP_SUBSTR",
 992                expression.this,
 993                expression.expression,
 994                position,
 995                occurrence,
 996                parameters,
 997                group,
 998            )
 999
1000        def except_op(self, expression: exp.Except) -> str:
1001            if not expression.args.get("distinct"):
1002                self.unsupported("EXCEPT with All is not supported in Snowflake")
1003            return super().except_op(expression)
1004
1005        def intersect_op(self, expression: exp.Intersect) -> str:
1006            if not expression.args.get("distinct"):
1007                self.unsupported("INTERSECT with All is not supported in Snowflake")
1008            return super().intersect_op(expression)
1009
1010        def describe_sql(self, expression: exp.Describe) -> str:
1011            # Default to table if kind is unknown
1012            kind_value = expression.args.get("kind") or "TABLE"
1013            kind = f" {kind_value}" if kind_value else ""
1014            this = f" {self.sql(expression, 'this')}"
1015            expressions = self.expressions(expression, flat=True)
1016            expressions = f" {expressions}" if expressions else ""
1017            return f"DESCRIBE{kind}{this}{expressions}"
1018
1019        def generatedasidentitycolumnconstraint_sql(
1020            self, expression: exp.GeneratedAsIdentityColumnConstraint
1021        ) -> str:
1022            start = expression.args.get("start")
1023            start = f" START {start}" if start else ""
1024            increment = expression.args.get("increment")
1025            increment = f" INCREMENT {increment}" if increment else ""
1026            return f"AUTOINCREMENT{start}{increment}"
1027
1028        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1029            this = self.sql(expression, "this")
1030            return f"SWAP WITH {this}"
1031
1032        def cluster_sql(self, expression: exp.Cluster) -> str:
1033            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1034
1035        def struct_sql(self, expression: exp.Struct) -> str:
1036            keys = []
1037            values = []
1038
1039            for i, e in enumerate(expression.expressions):
1040                if isinstance(e, exp.PropertyEQ):
1041                    keys.append(
1042                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1043                    )
1044                    values.append(e.expression)
1045                else:
1046                    keys.append(exp.Literal.string(f"_{i}"))
1047                    values.append(e)
1048
1049            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1050
1051        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1052            if expression.args.get("weight") or expression.args.get("accuracy"):
1053                self.unsupported(
1054                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1055                )
1056
1057            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1058
1059        def alterset_sql(self, expression: exp.AlterSet) -> str:
1060            exprs = self.expressions(expression, flat=True)
1061            exprs = f" {exprs}" if exprs else ""
1062            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1063            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1064            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1065            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1066            tag = self.expressions(expression, key="tag", flat=True)
1067            tag = f" TAG {tag}" if tag else ""
1068
1069            return f"SET{exprs}{file_format}{copy_options}{tag}"
NORMALIZATION_STRATEGY = <NormalizationStrategy.UPPERCASE: 'UPPERCASE'>

Specifies the strategy according to which identifiers should be normalized.

NULL_ORDERING = 'nulls_are_large'

Default NULL ordering method to use if not explicitly set. Possible values: "nulls_are_small", "nulls_are_large", "nulls_are_last"

TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
SUPPORTS_USER_DEFINED_TYPES = False

Whether user-defined data types are supported.

SUPPORTS_SEMI_ANTI_JOIN = False

Whether SEMI or ANTI joins are supported.

PREFER_CTE_ALIAS_COLUMN = True

Some dialects, such as Snowflake, allow you to reference a CTE column alias in the HAVING clause of the CTE. This flag will cause the CTE alias columns to override any projection aliases in the subquery.

For example, WITH y(c) AS ( SELECT SUM(a) FROM (SELECT 1 a) AS x HAVING c > 0 ) SELECT c FROM y;

will be rewritten as

WITH y(c) AS (
    SELECT SUM(a) AS c FROM (SELECT 1 AS a) AS x HAVING c > 0
) SELECT c FROM y;
TABLESAMPLE_SIZE_IS_PERCENT = True

Whether a size in the table sample clause represents percentage.

COPY_PARAMS_ARE_CSV = False

Whether COPY statement parameters are separated by comma or whitespace

TIME_MAPPING: Dict[str, str] = {'YYYY': '%Y', 'yyyy': '%Y', 'YY': '%y', 'yy': '%y', 'MMMM': '%B', 'mmmm': '%B', 'MON': '%b', 'mon': '%b', 'MM': '%m', 'mm': '%m', 'DD': '%d', 'dd': '%-d', 'DY': '%a', 'dy': '%w', 'HH24': '%H', 'hh24': '%H', 'HH12': '%I', 'hh12': '%I', 'MI': '%M', 'mi': '%M', 'SS': '%S', 'ss': '%S', 'FF': '%f', 'ff': '%f', 'FF6': '%f', 'ff6': '%f'}

Associates this dialect's time formats with their equivalent Python strftime formats.

def quote_identifier(self, expression: ~E, identify: bool = True) -> ~E:
267    def quote_identifier(self, expression: E, identify: bool = True) -> E:
268        # This disables quoting DUAL in SELECT ... FROM DUAL, because Snowflake treats an
269        # unquoted DUAL keyword in a special way and does not map it to a user-defined table
270        if (
271            isinstance(expression, exp.Identifier)
272            and isinstance(expression.parent, exp.Table)
273            and expression.name.lower() == "dual"
274        ):
275            return expression  # type: ignore
276
277        return super().quote_identifier(expression, identify=identify)

Adds quotes to a given identifier.

Arguments:
  • expression: The expression of interest. If it's not an Identifier, this method is a no-op.
  • identify: If set to False, the quotes will only be added if the identifier is deemed "unsafe", with respect to its characters and this dialect's normalization strategy.
SUPPORTS_COLUMN_JOIN_MARKS = False

Whether the old-style outer join (+) syntax is supported.

UNESCAPED_SEQUENCES: Dict[str, str] = {'\\a': '\x07', '\\b': '\x08', '\\f': '\x0c', '\\n': '\n', '\\r': '\r', '\\t': '\t', '\\v': '\x0b', '\\\\': '\\'}

Mapping of an escaped sequence (\n) to its unescaped version ( ).

tokenizer_class = <class 'Snowflake.Tokenizer'>
jsonpath_tokenizer_class = <class 'sqlglot.tokens.JSONPathTokenizer'>
parser_class = <class 'Snowflake.Parser'>
generator_class = <class 'Snowflake.Generator'>
TIME_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
FORMAT_TRIE: Dict = {'Y': {'Y': {'Y': {'Y': {0: True}}, 0: True}}, 'y': {'y': {'y': {'y': {0: True}}, 0: True}}, 'M': {'M': {'M': {'M': {0: True}}, 0: True}, 'O': {'N': {0: True}}, 'I': {0: True}}, 'm': {'m': {'m': {'m': {0: True}}, 0: True}, 'o': {'n': {0: True}}, 'i': {0: True}}, 'D': {'D': {0: True}, 'Y': {0: True}}, 'd': {'d': {0: True}, 'y': {0: True}}, 'H': {'H': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'h': {'h': {'2': {'4': {0: True}}, '1': {'2': {0: True}}}}, 'S': {'S': {0: True}}, 's': {'s': {0: True}}, 'F': {'F': {0: True, '6': {0: True}}}, 'f': {'f': {0: True, '6': {0: True}}}}
INVERSE_TIME_MAPPING: Dict[str, str] = {'%Y': 'yyyy', '%y': 'yy', '%B': 'mmmm', '%b': 'mon', '%m': 'mm', '%d': 'DD', '%-d': 'dd', '%a': 'DY', '%w': 'dy', '%H': 'hh24', '%I': 'hh12', '%M': 'mi', '%S': 'ss', '%f': 'ff6'}
INVERSE_TIME_TRIE: Dict = {'%': {'Y': {0: True}, 'y': {0: True}, 'B': {0: True}, 'b': {0: True}, 'm': {0: True}, 'd': {0: True}, '-': {'d': {0: True}}, 'a': {0: True}, 'w': {0: True}, 'H': {0: True}, 'I': {0: True}, 'M': {0: True}, 'S': {0: True}, 'f': {0: True}}}
INVERSE_FORMAT_MAPPING: Dict[str, str] = {}
INVERSE_FORMAT_TRIE: Dict = {}
INVERSE_CREATABLE_KIND_MAPPING: dict[str, str] = {}
ESCAPED_SEQUENCES: Dict[str, str] = {'\x07': '\\a', '\x08': '\\b', '\x0c': '\\f', '\n': '\\n', '\r': '\\r', '\t': '\\t', '\x0b': '\\v', '\\': '\\\\'}
QUOTE_START = "'"
QUOTE_END = "'"
IDENTIFIER_START = '"'
IDENTIFIER_END = '"'
BIT_START: Optional[str] = None
BIT_END: Optional[str] = None
HEX_START: Optional[str] = "x'"
HEX_END: Optional[str] = "'"
BYTE_START: Optional[str] = None
BYTE_END: Optional[str] = None
UNICODE_START: Optional[str] = None
UNICODE_END: Optional[str] = None
class Snowflake.Parser(sqlglot.parser.Parser):
279    class Parser(parser.Parser):
280        IDENTIFY_PIVOT_STRINGS = True
281        DEFAULT_SAMPLING_METHOD = "BERNOULLI"
282        COLON_IS_VARIANT_EXTRACT = True
283
284        ID_VAR_TOKENS = {
285            *parser.Parser.ID_VAR_TOKENS,
286            TokenType.MATCH_CONDITION,
287        }
288
289        TABLE_ALIAS_TOKENS = parser.Parser.TABLE_ALIAS_TOKENS | {TokenType.WINDOW}
290        TABLE_ALIAS_TOKENS.discard(TokenType.MATCH_CONDITION)
291
292        FUNCTIONS = {
293            **parser.Parser.FUNCTIONS,
294            "APPROX_PERCENTILE": exp.ApproxQuantile.from_arg_list,
295            "ARRAYAGG": exp.ArrayAgg.from_arg_list,
296            "ARRAY_CONSTRUCT": lambda args: exp.Array(expressions=args),
297            "ARRAY_CONTAINS": lambda args: exp.ArrayContains(
298                this=seq_get(args, 1), expression=seq_get(args, 0)
299            ),
300            "ARRAY_GENERATE_RANGE": lambda args: exp.GenerateSeries(
301                # ARRAY_GENERATE_RANGE has an exlusive end; we normalize it to be inclusive
302                start=seq_get(args, 0),
303                end=exp.Sub(this=seq_get(args, 1), expression=exp.Literal.number(1)),
304                step=seq_get(args, 2),
305            ),
306            "BITXOR": binary_from_function(exp.BitwiseXor),
307            "BIT_XOR": binary_from_function(exp.BitwiseXor),
308            "BOOLXOR": binary_from_function(exp.Xor),
309            "DATE": _build_datetime("DATE", exp.DataType.Type.DATE),
310            "DATE_TRUNC": _date_trunc_to_time,
311            "DATEADD": _build_date_time_add(exp.DateAdd),
312            "DATEDIFF": _build_datediff,
313            "DIV0": _build_if_from_div0,
314            "FLATTEN": exp.Explode.from_arg_list,
315            "GET_PATH": lambda args, dialect: exp.JSONExtract(
316                this=seq_get(args, 0), expression=dialect.to_json_path(seq_get(args, 1))
317            ),
318            "IFF": exp.If.from_arg_list,
319            "LAST_DAY": lambda args: exp.LastDay(
320                this=seq_get(args, 0), unit=map_date_part(seq_get(args, 1))
321            ),
322            "LEN": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
323            "LENGTH": lambda args: exp.Length(this=seq_get(args, 0), binary=True),
324            "LISTAGG": exp.GroupConcat.from_arg_list,
325            "MEDIAN": lambda args: exp.PercentileCont(
326                this=seq_get(args, 0), expression=exp.Literal.number(0.5)
327            ),
328            "NULLIFZERO": _build_if_from_nullifzero,
329            "OBJECT_CONSTRUCT": _build_object_construct,
330            "REGEXP_REPLACE": _build_regexp_replace,
331            "REGEXP_SUBSTR": exp.RegexpExtract.from_arg_list,
332            "RLIKE": exp.RegexpLike.from_arg_list,
333            "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
334            "TIMEADD": _build_date_time_add(exp.TimeAdd),
335            "TIMEDIFF": _build_datediff,
336            "TIMESTAMPADD": _build_date_time_add(exp.DateAdd),
337            "TIMESTAMPDIFF": _build_datediff,
338            "TIMESTAMPFROMPARTS": build_timestamp_from_parts,
339            "TIMESTAMP_FROM_PARTS": build_timestamp_from_parts,
340            "TRY_PARSE_JSON": lambda args: exp.ParseJSON(this=seq_get(args, 0), safe=True),
341            "TRY_TO_DATE": _build_datetime("TRY_TO_DATE", exp.DataType.Type.DATE, safe=True),
342            "TO_DATE": _build_datetime("TO_DATE", exp.DataType.Type.DATE),
343            "TO_NUMBER": lambda args: exp.ToNumber(
344                this=seq_get(args, 0),
345                format=seq_get(args, 1),
346                precision=seq_get(args, 2),
347                scale=seq_get(args, 3),
348            ),
349            "TO_TIME": _build_datetime("TO_TIME", exp.DataType.Type.TIME),
350            "TO_TIMESTAMP": _build_datetime("TO_TIMESTAMP", exp.DataType.Type.TIMESTAMP),
351            "TO_TIMESTAMP_LTZ": _build_datetime("TO_TIMESTAMP_LTZ", exp.DataType.Type.TIMESTAMPLTZ),
352            "TO_TIMESTAMP_NTZ": _build_datetime("TO_TIMESTAMP_NTZ", exp.DataType.Type.TIMESTAMP),
353            "TO_TIMESTAMP_TZ": _build_datetime("TO_TIMESTAMP_TZ", exp.DataType.Type.TIMESTAMPTZ),
354            "TO_VARCHAR": exp.ToChar.from_arg_list,
355            "ZEROIFNULL": _build_if_from_zeroifnull,
356        }
357
358        FUNCTION_PARSERS = {
359            **parser.Parser.FUNCTION_PARSERS,
360            "DATE_PART": lambda self: self._parse_date_part(),
361            "OBJECT_CONSTRUCT_KEEP_NULL": lambda self: self._parse_json_object(),
362        }
363        FUNCTION_PARSERS.pop("TRIM")
364
365        TIMESTAMPS = parser.Parser.TIMESTAMPS - {TokenType.TIME}
366
367        RANGE_PARSERS = {
368            **parser.Parser.RANGE_PARSERS,
369            TokenType.LIKE_ANY: parser.binary_range_parser(exp.LikeAny),
370            TokenType.ILIKE_ANY: parser.binary_range_parser(exp.ILikeAny),
371        }
372
373        ALTER_PARSERS = {
374            **parser.Parser.ALTER_PARSERS,
375            "UNSET": lambda self: self.expression(
376                exp.Set,
377                tag=self._match_text_seq("TAG"),
378                expressions=self._parse_csv(self._parse_id_var),
379                unset=True,
380            ),
381            "SWAP": lambda self: self._parse_alter_table_swap(),
382        }
383
384        STATEMENT_PARSERS = {
385            **parser.Parser.STATEMENT_PARSERS,
386            TokenType.SHOW: lambda self: self._parse_show(),
387        }
388
389        PROPERTY_PARSERS = {
390            **parser.Parser.PROPERTY_PARSERS,
391            "LOCATION": lambda self: self._parse_location_property(),
392        }
393
394        TYPE_CONVERTERS = {
395            # https://docs.snowflake.com/en/sql-reference/data-types-numeric#number
396            exp.DataType.Type.DECIMAL: build_default_decimal_type(precision=38, scale=0),
397        }
398
399        SHOW_PARSERS = {
400            "SCHEMAS": _show_parser("SCHEMAS"),
401            "TERSE SCHEMAS": _show_parser("SCHEMAS"),
402            "OBJECTS": _show_parser("OBJECTS"),
403            "TERSE OBJECTS": _show_parser("OBJECTS"),
404            "TABLES": _show_parser("TABLES"),
405            "TERSE TABLES": _show_parser("TABLES"),
406            "VIEWS": _show_parser("VIEWS"),
407            "TERSE VIEWS": _show_parser("VIEWS"),
408            "PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
409            "TERSE PRIMARY KEYS": _show_parser("PRIMARY KEYS"),
410            "IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
411            "TERSE IMPORTED KEYS": _show_parser("IMPORTED KEYS"),
412            "UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
413            "TERSE UNIQUE KEYS": _show_parser("UNIQUE KEYS"),
414            "SEQUENCES": _show_parser("SEQUENCES"),
415            "TERSE SEQUENCES": _show_parser("SEQUENCES"),
416            "COLUMNS": _show_parser("COLUMNS"),
417            "USERS": _show_parser("USERS"),
418            "TERSE USERS": _show_parser("USERS"),
419        }
420
421        CONSTRAINT_PARSERS = {
422            **parser.Parser.CONSTRAINT_PARSERS,
423            "WITH": lambda self: self._parse_with_constraint(),
424            "MASKING": lambda self: self._parse_with_constraint(),
425            "PROJECTION": lambda self: self._parse_with_constraint(),
426            "TAG": lambda self: self._parse_with_constraint(),
427        }
428
429        STAGED_FILE_SINGLE_TOKENS = {
430            TokenType.DOT,
431            TokenType.MOD,
432            TokenType.SLASH,
433        }
434
435        FLATTEN_COLUMNS = ["SEQ", "KEY", "PATH", "INDEX", "VALUE", "THIS"]
436
437        SCHEMA_KINDS = {"OBJECTS", "TABLES", "VIEWS", "SEQUENCES", "UNIQUE KEYS", "IMPORTED KEYS"}
438
439        NON_TABLE_CREATABLES = {"STORAGE INTEGRATION", "TAG", "WAREHOUSE", "STREAMLIT"}
440
441        LAMBDAS = {
442            **parser.Parser.LAMBDAS,
443            TokenType.ARROW: lambda self, expressions: self.expression(
444                exp.Lambda,
445                this=self._replace_lambda(
446                    self._parse_assignment(),
447                    expressions,
448                ),
449                expressions=[e.this if isinstance(e, exp.Cast) else e for e in expressions],
450            ),
451        }
452
453        def _negate_range(
454            self, this: t.Optional[exp.Expression] = None
455        ) -> t.Optional[exp.Expression]:
456            if not this:
457                return this
458
459            query = this.args.get("query")
460            if isinstance(this, exp.In) and isinstance(query, exp.Query):
461                # Snowflake treats `value NOT IN (subquery)` as `VALUE <> ALL (subquery)`, so
462                # we do this conversion here to avoid parsing it into `NOT value IN (subquery)`
463                # which can produce different results (most likely a SnowFlake bug).
464                #
465                # https://docs.snowflake.com/en/sql-reference/functions/in
466                # Context: https://github.com/tobymao/sqlglot/issues/3890
467                return self.expression(
468                    exp.NEQ, this=this.this, expression=exp.All(this=query.unnest())
469                )
470
471            return self.expression(exp.Not, this=this)
472
473        def _parse_with_constraint(self) -> t.Optional[exp.Expression]:
474            if self._prev.token_type != TokenType.WITH:
475                self._retreat(self._index - 1)
476
477            if self._match_text_seq("MASKING", "POLICY"):
478                policy = self._parse_column()
479                return self.expression(
480                    exp.MaskingPolicyColumnConstraint,
481                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
482                    expressions=self._match(TokenType.USING)
483                    and self._parse_wrapped_csv(self._parse_id_var),
484                )
485            if self._match_text_seq("PROJECTION", "POLICY"):
486                policy = self._parse_column()
487                return self.expression(
488                    exp.ProjectionPolicyColumnConstraint,
489                    this=policy.to_dot() if isinstance(policy, exp.Column) else policy,
490                )
491            if self._match(TokenType.TAG):
492                return self.expression(
493                    exp.TagColumnConstraint,
494                    expressions=self._parse_wrapped_csv(self._parse_property),
495                )
496
497            return None
498
499        def _parse_create(self) -> exp.Create | exp.Command:
500            expression = super()._parse_create()
501            if isinstance(expression, exp.Create) and expression.kind in self.NON_TABLE_CREATABLES:
502                # Replace the Table node with the enclosed Identifier
503                expression.this.replace(expression.this.this)
504
505            return expression
506
507        # https://docs.snowflake.com/en/sql-reference/functions/date_part.html
508        # https://docs.snowflake.com/en/sql-reference/functions-date-time.html#label-supported-date-time-parts
509        def _parse_date_part(self: Snowflake.Parser) -> t.Optional[exp.Expression]:
510            this = self._parse_var() or self._parse_type()
511
512            if not this:
513                return None
514
515            self._match(TokenType.COMMA)
516            expression = self._parse_bitwise()
517            this = map_date_part(this)
518            name = this.name.upper()
519
520            if name.startswith("EPOCH"):
521                if name == "EPOCH_MILLISECOND":
522                    scale = 10**3
523                elif name == "EPOCH_MICROSECOND":
524                    scale = 10**6
525                elif name == "EPOCH_NANOSECOND":
526                    scale = 10**9
527                else:
528                    scale = None
529
530                ts = self.expression(exp.Cast, this=expression, to=exp.DataType.build("TIMESTAMP"))
531                to_unix: exp.Expression = self.expression(exp.TimeToUnix, this=ts)
532
533                if scale:
534                    to_unix = exp.Mul(this=to_unix, expression=exp.Literal.number(scale))
535
536                return to_unix
537
538            return self.expression(exp.Extract, this=this, expression=expression)
539
540        def _parse_bracket_key_value(self, is_map: bool = False) -> t.Optional[exp.Expression]:
541            if is_map:
542                # Keys are strings in Snowflake's objects, see also:
543                # - https://docs.snowflake.com/en/sql-reference/data-types-semistructured
544                # - https://docs.snowflake.com/en/sql-reference/functions/object_construct
545                return self._parse_slice(self._parse_string())
546
547            return self._parse_slice(self._parse_alias(self._parse_assignment(), explicit=True))
548
549        def _parse_lateral(self) -> t.Optional[exp.Lateral]:
550            lateral = super()._parse_lateral()
551            if not lateral:
552                return lateral
553
554            if isinstance(lateral.this, exp.Explode):
555                table_alias = lateral.args.get("alias")
556                columns = [exp.to_identifier(col) for col in self.FLATTEN_COLUMNS]
557                if table_alias and not table_alias.args.get("columns"):
558                    table_alias.set("columns", columns)
559                elif not table_alias:
560                    exp.alias_(lateral, "_flattened", table=columns, copy=False)
561
562            return lateral
563
564        def _parse_table_parts(
565            self, schema: bool = False, is_db_reference: bool = False, wildcard: bool = False
566        ) -> exp.Table:
567            # https://docs.snowflake.com/en/user-guide/querying-stage
568            if self._match(TokenType.STRING, advance=False):
569                table = self._parse_string()
570            elif self._match_text_seq("@", advance=False):
571                table = self._parse_location_path()
572            else:
573                table = None
574
575            if table:
576                file_format = None
577                pattern = None
578
579                wrapped = self._match(TokenType.L_PAREN)
580                while self._curr and wrapped and not self._match(TokenType.R_PAREN):
581                    if self._match_text_seq("FILE_FORMAT", "=>"):
582                        file_format = self._parse_string() or super()._parse_table_parts(
583                            is_db_reference=is_db_reference
584                        )
585                    elif self._match_text_seq("PATTERN", "=>"):
586                        pattern = self._parse_string()
587                    else:
588                        break
589
590                    self._match(TokenType.COMMA)
591
592                table = self.expression(exp.Table, this=table, format=file_format, pattern=pattern)
593            else:
594                table = super()._parse_table_parts(schema=schema, is_db_reference=is_db_reference)
595
596            return table
597
598        def _parse_id_var(
599            self,
600            any_token: bool = True,
601            tokens: t.Optional[t.Collection[TokenType]] = None,
602        ) -> t.Optional[exp.Expression]:
603            if self._match_text_seq("IDENTIFIER", "("):
604                identifier = (
605                    super()._parse_id_var(any_token=any_token, tokens=tokens)
606                    or self._parse_string()
607                )
608                self._match_r_paren()
609                return self.expression(exp.Anonymous, this="IDENTIFIER", expressions=[identifier])
610
611            return super()._parse_id_var(any_token=any_token, tokens=tokens)
612
613        def _parse_show_snowflake(self, this: str) -> exp.Show:
614            scope = None
615            scope_kind = None
616
617            # will identity SHOW TERSE SCHEMAS but not SHOW TERSE PRIMARY KEYS
618            # which is syntactically valid but has no effect on the output
619            terse = self._tokens[self._index - 2].text.upper() == "TERSE"
620
621            history = self._match_text_seq("HISTORY")
622
623            like = self._parse_string() if self._match(TokenType.LIKE) else None
624
625            if self._match(TokenType.IN):
626                if self._match_text_seq("ACCOUNT"):
627                    scope_kind = "ACCOUNT"
628                elif self._match_set(self.DB_CREATABLES):
629                    scope_kind = self._prev.text.upper()
630                    if self._curr:
631                        scope = self._parse_table_parts()
632                elif self._curr:
633                    scope_kind = "SCHEMA" if this in self.SCHEMA_KINDS else "TABLE"
634                    scope = self._parse_table_parts()
635
636            return self.expression(
637                exp.Show,
638                **{
639                    "terse": terse,
640                    "this": this,
641                    "history": history,
642                    "like": like,
643                    "scope": scope,
644                    "scope_kind": scope_kind,
645                    "starts_with": self._match_text_seq("STARTS", "WITH") and self._parse_string(),
646                    "limit": self._parse_limit(),
647                    "from": self._parse_string() if self._match(TokenType.FROM) else None,
648                },
649            )
650
651        def _parse_alter_table_swap(self) -> exp.SwapTable:
652            self._match_text_seq("WITH")
653            return self.expression(exp.SwapTable, this=self._parse_table(schema=True))
654
655        def _parse_location_property(self) -> exp.LocationProperty:
656            self._match(TokenType.EQ)
657            return self.expression(exp.LocationProperty, this=self._parse_location_path())
658
659        def _parse_file_location(self) -> t.Optional[exp.Expression]:
660            # Parse either a subquery or a staged file
661            return (
662                self._parse_select(table=True, parse_subquery_alias=False)
663                if self._match(TokenType.L_PAREN, advance=False)
664                else self._parse_table_parts()
665            )
666
667        def _parse_location_path(self) -> exp.Var:
668            parts = [self._advance_any(ignore_reserved=True)]
669
670            # We avoid consuming a comma token because external tables like @foo and @bar
671            # can be joined in a query with a comma separator, as well as closing paren
672            # in case of subqueries
673            while self._is_connected() and not self._match_set(
674                (TokenType.COMMA, TokenType.L_PAREN, TokenType.R_PAREN), advance=False
675            ):
676                parts.append(self._advance_any(ignore_reserved=True))
677
678            return exp.var("".join(part.text for part in parts if part))
679
680        def _parse_lambda_arg(self) -> t.Optional[exp.Expression]:
681            this = super()._parse_lambda_arg()
682
683            if not this:
684                return this
685
686            typ = self._parse_types()
687
688            if typ:
689                return self.expression(exp.Cast, this=this, to=typ)
690
691            return this

Parser consumes a list of tokens produced by the Tokenizer and produces a parsed syntax tree.

Arguments:
  • error_level: The desired error level. Default: ErrorLevel.IMMEDIATE
  • error_message_context: The amount of context to capture from a query string when displaying the error message (in number of characters). Default: 100
  • max_errors: Maximum number of error messages to include in a raised ParseError. This is only relevant if error_level is ErrorLevel.RAISE. Default: 3
IDENTIFY_PIVOT_STRINGS = True
DEFAULT_SAMPLING_METHOD = 'BERNOULLI'
COLON_IS_VARIANT_EXTRACT = True
ID_VAR_TOKENS = {<TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.SET: 'SET'>, <TokenType.ROW: 'ROW'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.NAME: 'NAME'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.KILL: 'KILL'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.INET: 'INET'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.ASOF: 'ASOF'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.UINT: 'UINT'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.DESC: 'DESC'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.NULL: 'NULL'>, <TokenType.TOP: 'TOP'>, <TokenType.CASE: 'CASE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UUID: 'UUID'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.XML: 'XML'>, <TokenType.BIT: 'BIT'>, <TokenType.VAR: 'VAR'>, <TokenType.TIME: 'TIME'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MERGE: 'MERGE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.ALL: 'ALL'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.UINT128: 'UINT128'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.COPY: 'COPY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ASC: 'ASC'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TAG: 'TAG'>, <TokenType.VIEW: 'VIEW'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.APPLY: 'APPLY'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.CUBE: 'CUBE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.MAP: 'MAP'>, <TokenType.FILTER: 'FILTER'>, <TokenType.INT128: 'INT128'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.JSONB: 'JSONB'>, <TokenType.DELETE: 'DELETE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ENUM: 'ENUM'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.NEXT: 'NEXT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATE: 'DATE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.FINAL: 'FINAL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.INT256: 'INT256'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.FULL: 'FULL'>, <TokenType.IS: 'IS'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT: 'INT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.OFFSET: 'OFFSET'>, <TokenType.NATURAL: 'NATURAL'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.IPV4: 'IPV4'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.RENAME: 'RENAME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.USE: 'USE'>, <TokenType.LEFT: 'LEFT'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.LIST: 'LIST'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.KEEP: 'KEEP'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.JSON: 'JSON'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.END: 'END'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.FALSE: 'FALSE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.RIGHT: 'RIGHT'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.SOME: 'SOME'>}
TABLE_ALIAS_TOKENS = {<TokenType.PROCEDURE: 'PROCEDURE'>, <TokenType.UNPIVOT: 'UNPIVOT'>, <TokenType.DATE32: 'DATE32'>, <TokenType.UNNEST: 'UNNEST'>, <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, <TokenType.ISNULL: 'ISNULL'>, <TokenType.EXISTS: 'EXISTS'>, <TokenType.IMAGE: 'IMAGE'>, <TokenType.ORDINALITY: 'ORDINALITY'>, <TokenType.BIGINT: 'BIGINT'>, <TokenType.VARCHAR: 'VARCHAR'>, <TokenType.USERDEFINED: 'USERDEFINED'>, <TokenType.SET: 'SET'>, <TokenType.ROW: 'ROW'>, <TokenType.VARBINARY: 'VARBINARY'>, <TokenType.NAME: 'NAME'>, <TokenType.COLUMN: 'COLUMN'>, <TokenType.BIGSERIAL: 'BIGSERIAL'>, <TokenType.UNIQUE: 'UNIQUE'>, <TokenType.KILL: 'KILL'>, <TokenType.PERCENT: 'PERCENT'>, <TokenType.STREAMLIT: 'STREAMLIT'>, <TokenType.INET: 'INET'>, <TokenType.SMALLMONEY: 'SMALLMONEY'>, <TokenType.BOOLEAN: 'BOOLEAN'>, <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, <TokenType.GEOMETRY: 'GEOMETRY'>, <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, <TokenType.UINT: 'UINT'>, <TokenType.TINYBLOB: 'TINYBLOB'>, <TokenType.DESC: 'DESC'>, <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, <TokenType.IPPREFIX: 'IPPREFIX'>, <TokenType.SEMI: 'SEMI'>, <TokenType.CURRENT_DATETIME: 'CURRENT_DATETIME'>, <TokenType.CACHE: 'CACHE'>, <TokenType.NUMRANGE: 'NUMRANGE'>, <TokenType.NULL: 'NULL'>, <TokenType.TOP: 'TOP'>, <TokenType.CASE: 'CASE'>, <TokenType.DATETIME: 'DATETIME'>, <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, <TokenType.NESTED: 'NESTED'>, <TokenType.DECIMAL: 'DECIMAL'>, <TokenType.UUID: 'UUID'>, <TokenType.HLLSKETCH: 'HLLSKETCH'>, <TokenType.UNKNOWN: 'UNKNOWN'>, <TokenType.TRUE: 'TRUE'>, <TokenType.ROWS: 'ROWS'>, <TokenType.WINDOW: 'WINDOW'>, <TokenType.VARIANT: 'VARIANT'>, <TokenType.COMMENT: 'COMMENT'>, <TokenType.UMEDIUMINT: 'UMEDIUMINT'>, <TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.XML: 'XML'>, <TokenType.BIT: 'BIT'>, <TokenType.VAR: 'VAR'>, <TokenType.TIME: 'TIME'>, <TokenType.REFRESH: 'REFRESH'>, <TokenType.PARTITION: 'PARTITION'>, <TokenType.SMALLSERIAL: 'SMALLSERIAL'>, <TokenType.ANY: 'ANY'>, <TokenType.FIXEDSTRING: 'FIXEDSTRING'>, <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, <TokenType.MERGE: 'MERGE'>, <TokenType.RANGE: 'RANGE'>, <TokenType.LOAD: 'LOAD'>, <TokenType.INT4RANGE: 'INT4RANGE'>, <TokenType.CONSTRAINT: 'CONSTRAINT'>, <TokenType.UNIQUEIDENTIFIER: 'UNIQUEIDENTIFIER'>, <TokenType.ALL: 'ALL'>, <TokenType.RECURSIVE: 'RECURSIVE'>, <TokenType.UBIGINT: 'UBIGINT'>, <TokenType.UINT128: 'UINT128'>, <TokenType.STRUCT: 'STRUCT'>, <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, <TokenType.LONGTEXT: 'LONGTEXT'>, <TokenType.INTERVAL: 'INTERVAL'>, <TokenType.COMMIT: 'COMMIT'>, <TokenType.COPY: 'COPY'>, <TokenType.IPV6: 'IPV6'>, <TokenType.UPDATE: 'UPDATE'>, <TokenType.ENUM8: 'ENUM8'>, <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, <TokenType.ASC: 'ASC'>, <TokenType.TINYTEXT: 'TINYTEXT'>, <TokenType.TAG: 'TAG'>, <TokenType.VIEW: 'VIEW'>, <TokenType.MEDIUMINT: 'MEDIUMINT'>, <TokenType.INT8RANGE: 'INT8RANGE'>, <TokenType.DIV: 'DIV'>, <TokenType.BEGIN: 'BEGIN'>, <TokenType.REFERENCES: 'REFERENCES'>, <TokenType.BPCHAR: 'BPCHAR'>, <TokenType.TSTZRANGE: 'TSTZRANGE'>, <TokenType.UDECIMAL: 'UDECIMAL'>, <TokenType.CURRENT_USER: 'CURRENT_USER'>, <TokenType.SERIAL: 'SERIAL'>, <TokenType.COMMAND: 'COMMAND'>, <TokenType.DEFAULT: 'DEFAULT'>, <TokenType.LOWCARDINALITY: 'LOWCARDINALITY'>, <TokenType.ENUM16: 'ENUM16'>, <TokenType.PSEUDO_TYPE: 'PSEUDO_TYPE'>, <TokenType.CUBE: 'CUBE'>, <TokenType.REPLACE: 'REPLACE'>, <TokenType.MAP: 'MAP'>, <TokenType.FILTER: 'FILTER'>, <TokenType.INT128: 'INT128'>, <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, <TokenType.JSONB: 'JSONB'>, <TokenType.DELETE: 'DELETE'>, <TokenType.FIRST: 'FIRST'>, <TokenType.OVERLAPS: 'OVERLAPS'>, <TokenType.ENUM: 'ENUM'>, <TokenType.AGGREGATEFUNCTION: 'AGGREGATEFUNCTION'>, <TokenType.TIMESTAMP_NS: 'TIMESTAMP_NS'>, <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, <TokenType.NEXT: 'NEXT'>, <TokenType.OPERATOR: 'OPERATOR'>, <TokenType.DATE: 'DATE'>, <TokenType.YEAR: 'YEAR'>, <TokenType.USMALLINT: 'USMALLINT'>, <TokenType.TABLE: 'TABLE'>, <TokenType.FLOAT: 'FLOAT'>, <TokenType.COLLATE: 'COLLATE'>, <TokenType.TRUNCATE: 'TRUNCATE'>, <TokenType.VOLATILE: 'VOLATILE'>, <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.FINAL: 'FINAL'>, <TokenType.TDIGEST: 'TDIGEST'>, <TokenType.INT256: 'INT256'>, <TokenType.FUNCTION: 'FUNCTION'>, <TokenType.NVARCHAR: 'NVARCHAR'>, <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, <TokenType.SCHEMA: 'SCHEMA'>, <TokenType.SEQUENCE: 'SEQUENCE'>, <TokenType.BINARY: 'BINARY'>, <TokenType.INDEX: 'INDEX'>, <TokenType.IPADDRESS: 'IPADDRESS'>, <TokenType.IS: 'IS'>, <TokenType.ROLLUP: 'ROLLUP'>, <TokenType.LONGBLOB: 'LONGBLOB'>, <TokenType.ESCAPE: 'ESCAPE'>, <TokenType.DATETIME64: 'DATETIME64'>, <TokenType.DESCRIBE: 'DESCRIBE'>, <TokenType.SHOW: 'SHOW'>, <TokenType.UTINYINT: 'UTINYINT'>, <TokenType.TSRANGE: 'TSRANGE'>, <TokenType.INT: 'INT'>, <TokenType.SIMPLEAGGREGATEFUNCTION: 'SIMPLEAGGREGATEFUNCTION'>, <TokenType.ROWVERSION: 'ROWVERSION'>, <TokenType.NCHAR: 'NCHAR'>, <TokenType.DOUBLE: 'DOUBLE'>, <TokenType.IPV4: 'IPV4'>, <TokenType.OBJECT_IDENTIFIER: 'OBJECT_IDENTIFIER'>, <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, <TokenType.TIMESTAMP_S: 'TIMESTAMP_S'>, <TokenType.HSTORE: 'HSTORE'>, <TokenType.RENAME: 'RENAME'>, <TokenType.OBJECT: 'OBJECT'>, <TokenType.IDENTIFIER: 'IDENTIFIER'>, <TokenType.USE: 'USE'>, <TokenType.SMALLINT: 'SMALLINT'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.PRAGMA: 'PRAGMA'>, <TokenType.LIST: 'LIST'>, <TokenType.PIVOT: 'PIVOT'>, <TokenType.DATABASE: 'DATABASE'>, <TokenType.CHAR: 'CHAR'>, <TokenType.KEEP: 'KEEP'>, <TokenType.UINT256: 'UINT256'>, <TokenType.TEXT: 'TEXT'>, <TokenType.ANTI: 'ANTI'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.JSON: 'JSON'>, <TokenType.WAREHOUSE: 'WAREHOUSE'>, <TokenType.DATERANGE: 'DATERANGE'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, <TokenType.END: 'END'>, <TokenType.FORMAT: 'FORMAT'>, <TokenType.TEMPORARY: 'TEMPORARY'>, <TokenType.ARRAY: 'ARRAY'>, <TokenType.TIMESTAMP_MS: 'TIMESTAMP_MS'>, <TokenType.OVERWRITE: 'OVERWRITE'>, <TokenType.MODEL: 'MODEL'>, <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, <TokenType.FALSE: 'FALSE'>, <TokenType.SETTINGS: 'SETTINGS'>, <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, <TokenType.NULLABLE: 'NULLABLE'>, <TokenType.MONEY: 'MONEY'>, <TokenType.DICTIONARY: 'DICTIONARY'>, <TokenType.SUPER: 'SUPER'>, <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, <TokenType.VECTOR: 'VECTOR'>, <TokenType.TINYINT: 'TINYINT'>, <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, <TokenType.SOME: 'SOME'>}
FUNCTIONS = {'ABS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Abs'>>, 'ADD_MONTHS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AddMonths'>>, 'ANONYMOUS_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnonymousAggFunc'>>, 'ANY_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.AnyValue'>>, 'APPROX_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_COUNT_DISTINCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxDistinct'>>, 'APPROX_QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'APPROX_TOP_K': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxTopK'>>, 'ARG_MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARGMAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'MAX_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMax'>>, 'ARG_MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARGMIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'MIN_BY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArgMin'>>, 'ARRAY': <function Parser.<lambda>>, 'ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAll'>>, 'ARRAY_ANY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAny'>>, 'ARRAY_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConcat'>>, 'ARRAY_CONSTRUCT_COMPACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayConstructCompact'>>, 'ARRAY_CONTAINS': <function Snowflake.Parser.<lambda>>, 'ARRAY_HAS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContains'>>, 'ARRAY_CONTAINS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'ARRAY_HAS_ALL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayContainsAll'>>, 'FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_FILTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayFilter'>>, 'ARRAY_OVERLAPS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayOverlaps'>>, 'ARRAY_SIZE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_LENGTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySize'>>, 'ARRAY_SORT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySort'>>, 'ARRAY_SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArraySum'>>, 'ARRAY_TO_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_JOIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayToString'>>, 'ARRAY_UNION_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUnionAgg'>>, 'ARRAY_UNIQUE_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayUniqueAgg'>>, 'AVG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Avg'>>, 'CASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Case'>>, 'CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cast'>>, 'CAST_TO_STR_TYPE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CastToStrType'>>, 'CBRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Cbrt'>>, 'CEIL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CEILING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ceil'>>, 'CHR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Chr'>>, 'COALESCE': <function Parser.<lambda>>, 'IFNULL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'NVL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Coalesce'>>, 'COLLATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Collate'>>, 'COMBINED_AGG_FUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedAggFunc'>>, 'COMBINED_PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CombinedParameterizedAgg'>>, 'CONCAT': <function Parser.<lambda>>, 'CONCAT_WS': <function Parser.<lambda>>, 'CONNECT_BY_ROOT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ConnectByRoot'>>, 'CONVERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Convert'>>, 'CONVERT_TIMEZONE': <function build_convert_timezone>, 'CORR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Corr'>>, 'COUNT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Count'>>, 'COUNT_IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COUNTIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CountIf'>>, 'COVAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarPop'>>, 'COVAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CovarSamp'>>, 'CURRENT_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDate'>>, 'CURRENT_DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentDatetime'>>, 'CURRENT_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTime'>>, 'CURRENT_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentTimestamp'>>, 'CURRENT_USER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.CurrentUser'>>, 'DATE': <function _build_datetime.<locals>._builder>, 'DATE_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateAdd'>>, 'DATEDIFF': <function _build_datediff>, 'DATE_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateDiff'>>, 'DATE_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateFromParts'>>, 'DATE_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateStrToDate'>>, 'DATE_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateSub'>>, 'DATE_TO_DATE_STR': <function Parser.<lambda>>, 'DATE_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DateToDi'>>, 'DATE_TRUNC': <function _date_trunc_to_time>, 'DATETIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Datetime'>>, 'DATETIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeAdd'>>, 'DATETIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeDiff'>>, 'DATETIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeSub'>>, 'DATETIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DatetimeTrunc'>>, 'DAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Day'>>, 'DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAYOFMONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfMonth'>>, 'DAY_OF_WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAYOFWEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfWeek'>>, 'DAY_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DAYOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DayOfYear'>>, 'DECODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Decode'>>, 'DI_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.DiToDate'>>, 'ENCODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Encode'>>, 'EXP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Exp'>>, 'EXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'EXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodeOuter'>>, 'EXPLODING_GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ExplodingGenerateSeries'>>, 'EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Extract'>>, 'FIRST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.First'>>, 'FIRST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FirstValue'>>, 'FLATTEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Explode'>>, 'FLOOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Floor'>>, 'FROM_BASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase'>>, 'FROM_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromBase64'>>, 'FROM_ISO8601_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.FromISO8601Timestamp'>>, 'GAP_FILL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GapFill'>>, 'GENERATE_DATE_ARRAY': <function Parser.<lambda>>, 'GENERATE_SERIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateSeries'>>, 'GENERATE_TIMESTAMP_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GenerateTimestampArray'>>, 'GREATEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Greatest'>>, 'GROUP_CONCAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'HEX': <function build_hex>, 'HLL': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Hll'>>, 'IF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'IIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'INITCAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Initcap'>>, 'IS_INF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'ISINF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsInf'>>, 'IS_NAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'ISNAN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.IsNan'>>, 'J_S_O_N_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArray'>>, 'J_S_O_N_ARRAY_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayAgg'>>, 'JSON_ARRAY_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONArrayContains'>>, 'JSONB_CONTAINS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBContains'>>, 'JSONB_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtract'>>, 'JSONB_EXTRACT_SCALAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONBExtractScalar'>>, 'JSON_EXTRACT': <function build_extract_json_with_path.<locals>._builder>, 'JSON_EXTRACT_SCALAR': <function build_extract_json_with_path.<locals>._builder>, 'JSON_FORMAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONFormat'>>, 'J_S_O_N_OBJECT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObject'>>, 'J_S_O_N_OBJECT_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONObjectAgg'>>, 'J_S_O_N_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.JSONTable'>>, 'LAG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lag'>>, 'LAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Last'>>, 'LAST_DAY': <function Snowflake.Parser.<lambda>>, 'LAST_DAY_OF_MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastDay'>>, 'LAST_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LastValue'>>, 'LEAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lead'>>, 'LEAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Least'>>, 'LEFT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Left'>>, 'LENGTH': <function Snowflake.Parser.<lambda>>, 'LEN': <function Snowflake.Parser.<lambda>>, 'LEVENSHTEIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Levenshtein'>>, 'LIST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.List'>>, 'LN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Ln'>>, 'LOG': <function build_logarithm>, 'LOGICAL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOL_AND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'BOOLAND_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalAnd'>>, 'LOGICAL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOL_OR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'BOOLOR_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LogicalOr'>>, 'LOWER': <function build_lower>, 'LCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Lower'>>, 'LOWER_HEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.LowerHex'>>, 'MD5': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5'>>, 'MD5_DIGEST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MD5Digest'>>, 'MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Map'>>, 'MAP_FROM_ENTRIES': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MapFromEntries'>>, 'MATCH_AGAINST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MatchAgainst'>>, 'MAX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Max'>>, 'MIN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Min'>>, 'MONTH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Month'>>, 'MONTHS_BETWEEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.MonthsBetween'>>, 'NEXT_VALUE_FOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NextValueFor'>>, 'NTH_VALUE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NthValue'>>, 'NULLIF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nullif'>>, 'NUMBER_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.NumberToStr'>>, 'NVL2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Nvl2'>>, 'OBJECT_INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ObjectInsert'>>, 'OPEN_J_S_O_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.OpenJSON'>>, 'PAD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pad'>>, 'PARAMETERIZED_AGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParameterizedAgg'>>, 'PARSE_JSON': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'JSON_PARSE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ParseJSON'>>, 'PERCENTILE_CONT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileCont'>>, 'PERCENTILE_DISC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PercentileDisc'>>, 'POSEXPLODE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Posexplode'>>, 'POSEXPLODE_OUTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.PosexplodeOuter'>>, 'POWER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'POW': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Pow'>>, 'PREDICT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Predict'>>, 'QUANTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quantile'>>, 'QUARTER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Quarter'>>, 'RAND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDOM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Rand'>>, 'RANDN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Randn'>>, 'RANGE_N': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RangeN'>>, 'READ_CSV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ReadCSV'>>, 'REDUCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Reduce'>>, 'REGEXP_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'REGEXP_I_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpILike'>>, 'REGEXP_LIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'REGEXP_REPLACE': <function _build_regexp_replace>, 'REGEXP_SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpSplit'>>, 'REPEAT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Repeat'>>, 'RIGHT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Right'>>, 'ROUND': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Round'>>, 'ROW_NUMBER': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RowNumber'>>, 'SHA': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA1': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA'>>, 'SHA2': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SHA2'>>, 'SAFE_DIVIDE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SafeDivide'>>, 'SIGN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SIGNUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sign'>>, 'SORT_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.SortArray'>>, 'SPLIT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Split'>>, 'SQRT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sqrt'>>, 'STANDARD_HASH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StandardHash'>>, 'STAR_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StarMap'>>, 'STARTS_WITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STARTSWITH': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StartsWith'>>, 'STDDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDEV': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stddev'>>, 'STDDEV_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevPop'>>, 'STDDEV_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StddevSamp'>>, 'STR_POSITION': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrPosition'>>, 'STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToDate'>>, 'STR_TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToMap'>>, 'STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToTime'>>, 'STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StrToUnix'>>, 'STRING_TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'SPLIT_BY_STRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StringToArray'>>, 'STRUCT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Struct'>>, 'STRUCT_EXTRACT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.StructExtract'>>, 'STUFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'INSERT': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Stuff'>>, 'SUBSTRING': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Substring'>>, 'SUM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Sum'>>, 'TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Time'>>, 'TIME_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeAdd'>>, 'TIME_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeDiff'>>, 'TIME_FROM_PARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIMEFROMPARTS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeFromParts'>>, 'TIME_STR_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToDate'>>, 'TIME_STR_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToTime'>>, 'TIME_STR_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeStrToUnix'>>, 'TIME_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeSub'>>, 'TIME_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToStr'>>, 'TIME_TO_TIME_STR': <function Parser.<lambda>>, 'TIME_TO_UNIX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeToUnix'>>, 'TIME_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimeTrunc'>>, 'TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Timestamp'>>, 'TIMESTAMP_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampAdd'>>, 'TIMESTAMPDIFF': <function _build_datediff>, 'TIMESTAMP_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampDiff'>>, 'TIMESTAMP_FROM_PARTS': <function build_timestamp_from_parts>, 'TIMESTAMPFROMPARTS': <function build_timestamp_from_parts>, 'TIMESTAMP_SUB': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampSub'>>, 'TIMESTAMP_TRUNC': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TimestampTrunc'>>, 'TO_ARRAY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToArray'>>, 'TO_BASE64': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToBase64'>>, 'TO_CHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'TO_DAYS': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToDays'>>, 'TO_MAP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToMap'>>, 'TO_NUMBER': <function Snowflake.Parser.<lambda>>, 'TRANSFORM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Transform'>>, 'TRIM': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Trim'>>, 'TRY': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Try'>>, 'TRY_CAST': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TryCast'>>, 'TS_OR_DI_TO_DI': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDiToDi'>>, 'TS_OR_DS_ADD': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsAdd'>>, 'TS_OR_DS_DIFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsDiff'>>, 'TS_OR_DS_TO_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToDate'>>, 'TS_OR_DS_TO_DATE_STR': <function Parser.<lambda>>, 'TS_OR_DS_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTime'>>, 'TS_OR_DS_TO_TIMESTAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.TsOrDsToTimestamp'>>, 'UNHEX': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Unhex'>>, 'UNIX_DATE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixDate'>>, 'UNIX_TO_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToStr'>>, 'UNIX_TO_TIME': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTime'>>, 'UNIX_TO_TIME_STR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.UnixToTimeStr'>>, 'UNNEST': <function Parser.<lambda>>, 'UPPER': <function build_upper>, 'UCASE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Upper'>>, 'VAR_MAP': <function build_var_map>, 'VARIANCE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VAR_SAMP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Variance'>>, 'VARIANCE_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'VAR_POP': <bound method Func.from_arg_list of <class 'sqlglot.expressions.VariancePop'>>, 'WEEK': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Week'>>, 'WEEK_OF_YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WEEKOFYEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.WeekOfYear'>>, 'WHEN': <bound method Func.from_arg_list of <class 'sqlglot.expressions.When'>>, 'X_M_L_TABLE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.XMLTable'>>, 'XOR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Xor'>>, 'YEAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.Year'>>, 'GLOB': <function Parser.<lambda>>, 'JSON_EXTRACT_PATH_TEXT': <function build_extract_json_with_path.<locals>._builder>, 'LIKE': <function build_like>, 'LOG2': <function Parser.<lambda>>, 'LOG10': <function Parser.<lambda>>, 'LPAD': <function Parser.<lambda>>, 'LEFTPAD': <function Parser.<lambda>>, 'LTRIM': <function Parser.<lambda>>, 'MOD': <function build_mod>, 'RIGHTPAD': <function Parser.<lambda>>, 'RPAD': <function Parser.<lambda>>, 'RTRIM': <function Parser.<lambda>>, 'SCOPE_RESOLUTION': <function Parser.<lambda>>, 'TO_HEX': <function build_hex>, 'APPROX_PERCENTILE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ApproxQuantile'>>, 'ARRAYAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ArrayAgg'>>, 'ARRAY_CONSTRUCT': <function Snowflake.Parser.<lambda>>, 'ARRAY_GENERATE_RANGE': <function Snowflake.Parser.<lambda>>, 'BITXOR': <function binary_from_function.<locals>.<lambda>>, 'BIT_XOR': <function binary_from_function.<locals>.<lambda>>, 'BOOLXOR': <function binary_from_function.<locals>.<lambda>>, 'DATEADD': <function _build_date_time_add.<locals>._builder>, 'DIV0': <function _build_if_from_div0>, 'GET_PATH': <function Snowflake.Parser.<lambda>>, 'IFF': <bound method Func.from_arg_list of <class 'sqlglot.expressions.If'>>, 'LISTAGG': <bound method Func.from_arg_list of <class 'sqlglot.expressions.GroupConcat'>>, 'MEDIAN': <function Snowflake.Parser.<lambda>>, 'NULLIFZERO': <function _build_if_from_nullifzero>, 'OBJECT_CONSTRUCT': <function _build_object_construct>, 'REGEXP_SUBSTR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpExtract'>>, 'RLIKE': <bound method Func.from_arg_list of <class 'sqlglot.expressions.RegexpLike'>>, 'SQUARE': <function Snowflake.Parser.<lambda>>, 'TIMEADD': <function _build_date_time_add.<locals>._builder>, 'TIMEDIFF': <function _build_datediff>, 'TIMESTAMPADD': <function _build_date_time_add.<locals>._builder>, 'TRY_PARSE_JSON': <function Snowflake.Parser.<lambda>>, 'TRY_TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_DATE': <function _build_datetime.<locals>._builder>, 'TO_TIME': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_LTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_NTZ': <function _build_datetime.<locals>._builder>, 'TO_TIMESTAMP_TZ': <function _build_datetime.<locals>._builder>, 'TO_VARCHAR': <bound method Func.from_arg_list of <class 'sqlglot.expressions.ToChar'>>, 'ZEROIFNULL': <function _build_if_from_zeroifnull>}
FUNCTION_PARSERS = {'CAST': <function Parser.<lambda>>, 'CONVERT': <function Parser.<lambda>>, 'DECODE': <function Parser.<lambda>>, 'EXTRACT': <function Parser.<lambda>>, 'GAP_FILL': <function Parser.<lambda>>, 'JSON_OBJECT': <function Parser.<lambda>>, 'JSON_OBJECTAGG': <function Parser.<lambda>>, 'JSON_TABLE': <function Parser.<lambda>>, 'MATCH': <function Parser.<lambda>>, 'OPENJSON': <function Parser.<lambda>>, 'POSITION': <function Parser.<lambda>>, 'PREDICT': <function Parser.<lambda>>, 'SAFE_CAST': <function Parser.<lambda>>, 'STRING_AGG': <function Parser.<lambda>>, 'SUBSTRING': <function Parser.<lambda>>, 'TRY_CAST': <function Parser.<lambda>>, 'TRY_CONVERT': <function Parser.<lambda>>, 'DATE_PART': <function Snowflake.Parser.<lambda>>, 'OBJECT_CONSTRUCT_KEEP_NULL': <function Snowflake.Parser.<lambda>>}
TIMESTAMPS = {<TokenType.TIMESTAMP: 'TIMESTAMP'>, <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, <TokenType.TIMETZ: 'TIMETZ'>, <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>}
RANGE_PARSERS = {<TokenType.BETWEEN: 'BETWEEN'>: <function Parser.<lambda>>, <TokenType.GLOB: 'GLOB'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE: 'ILIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IN: 'IN'>: <function Parser.<lambda>>, <TokenType.IRLIKE: 'IRLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.IS: 'IS'>: <function Parser.<lambda>>, <TokenType.LIKE: 'LIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.OVERLAPS: 'OVERLAPS'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.RLIKE: 'RLIKE'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.SIMILAR_TO: 'SIMILAR_TO'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.FOR: 'FOR'>: <function Parser.<lambda>>, <TokenType.LIKE_ANY: 'LIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>, <TokenType.ILIKE_ANY: 'ILIKE_ANY'>: <function binary_range_parser.<locals>._parse_binary_range>}
ALTER_PARSERS = {'ADD': <function Parser.<lambda>>, 'ALTER': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'DELETE': <function Parser.<lambda>>, 'DROP': <function Parser.<lambda>>, 'RENAME': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'AS': <function Parser.<lambda>>, 'UNSET': <function Snowflake.Parser.<lambda>>, 'SWAP': <function Snowflake.Parser.<lambda>>}
STATEMENT_PARSERS = {<TokenType.ALTER: 'ALTER'>: <function Parser.<lambda>>, <TokenType.BEGIN: 'BEGIN'>: <function Parser.<lambda>>, <TokenType.CACHE: 'CACHE'>: <function Parser.<lambda>>, <TokenType.COMMENT: 'COMMENT'>: <function Parser.<lambda>>, <TokenType.COMMIT: 'COMMIT'>: <function Parser.<lambda>>, <TokenType.COPY: 'COPY'>: <function Parser.<lambda>>, <TokenType.CREATE: 'CREATE'>: <function Parser.<lambda>>, <TokenType.DELETE: 'DELETE'>: <function Parser.<lambda>>, <TokenType.DESC: 'DESC'>: <function Parser.<lambda>>, <TokenType.DESCRIBE: 'DESCRIBE'>: <function Parser.<lambda>>, <TokenType.DROP: 'DROP'>: <function Parser.<lambda>>, <TokenType.INSERT: 'INSERT'>: <function Parser.<lambda>>, <TokenType.KILL: 'KILL'>: <function Parser.<lambda>>, <TokenType.LOAD: 'LOAD'>: <function Parser.<lambda>>, <TokenType.MERGE: 'MERGE'>: <function Parser.<lambda>>, <TokenType.PIVOT: 'PIVOT'>: <function Parser.<lambda>>, <TokenType.PRAGMA: 'PRAGMA'>: <function Parser.<lambda>>, <TokenType.REFRESH: 'REFRESH'>: <function Parser.<lambda>>, <TokenType.ROLLBACK: 'ROLLBACK'>: <function Parser.<lambda>>, <TokenType.SET: 'SET'>: <function Parser.<lambda>>, <TokenType.TRUNCATE: 'TRUNCATE'>: <function Parser.<lambda>>, <TokenType.UNCACHE: 'UNCACHE'>: <function Parser.<lambda>>, <TokenType.UPDATE: 'UPDATE'>: <function Parser.<lambda>>, <TokenType.USE: 'USE'>: <function Parser.<lambda>>, <TokenType.SEMICOLON: 'SEMICOLON'>: <function Parser.<lambda>>, <TokenType.SHOW: 'SHOW'>: <function Snowflake.Parser.<lambda>>}
PROPERTY_PARSERS = {'ALLOWED_VALUES': <function Parser.<lambda>>, 'ALGORITHM': <function Parser.<lambda>>, 'AUTO': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'BACKUP': <function Parser.<lambda>>, 'BLOCKCOMPRESSION': <function Parser.<lambda>>, 'CHARSET': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECKSUM': <function Parser.<lambda>>, 'CLUSTER BY': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'CONTAINS': <function Parser.<lambda>>, 'COPY': <function Parser.<lambda>>, 'DATABLOCKSIZE': <function Parser.<lambda>>, 'DATA_DELETION': <function Parser.<lambda>>, 'DEFINER': <function Parser.<lambda>>, 'DETERMINISTIC': <function Parser.<lambda>>, 'DYNAMIC': <function Parser.<lambda>>, 'DISTKEY': <function Parser.<lambda>>, 'DISTSTYLE': <function Parser.<lambda>>, 'EMPTY': <function Parser.<lambda>>, 'ENGINE': <function Parser.<lambda>>, 'EXECUTE': <function Parser.<lambda>>, 'EXTERNAL': <function Parser.<lambda>>, 'FALLBACK': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'FREESPACE': <function Parser.<lambda>>, 'GLOBAL': <function Parser.<lambda>>, 'HEAP': <function Parser.<lambda>>, 'ICEBERG': <function Parser.<lambda>>, 'IMMUTABLE': <function Parser.<lambda>>, 'INHERITS': <function Parser.<lambda>>, 'INPUT': <function Parser.<lambda>>, 'JOURNAL': <function Parser.<lambda>>, 'LANGUAGE': <function Parser.<lambda>>, 'LAYOUT': <function Parser.<lambda>>, 'LIFETIME': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'LOCATION': <function Snowflake.Parser.<lambda>>, 'LOCK': <function Parser.<lambda>>, 'LOCKING': <function Parser.<lambda>>, 'LOG': <function Parser.<lambda>>, 'MATERIALIZED': <function Parser.<lambda>>, 'MERGEBLOCKRATIO': <function Parser.<lambda>>, 'MODIFIES': <function Parser.<lambda>>, 'MULTISET': <function Parser.<lambda>>, 'NO': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'ORDER BY': <function Parser.<lambda>>, 'OUTPUT': <function Parser.<lambda>>, 'PARTITION': <function Parser.<lambda>>, 'PARTITION BY': <function Parser.<lambda>>, 'PARTITIONED BY': <function Parser.<lambda>>, 'PARTITIONED_BY': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'RANGE': <function Parser.<lambda>>, 'READS': <function Parser.<lambda>>, 'REMOTE': <function Parser.<lambda>>, 'RETURNS': <function Parser.<lambda>>, 'STRICT': <function Parser.<lambda>>, 'STREAMING': <function Parser.<lambda>>, 'ROW': <function Parser.<lambda>>, 'ROW_FORMAT': <function Parser.<lambda>>, 'SAMPLE': <function Parser.<lambda>>, 'SECURE': <function Parser.<lambda>>, 'SET': <function Parser.<lambda>>, 'SETTINGS': <function Parser.<lambda>>, 'SHARING': <function Parser.<lambda>>, 'SORTKEY': <function Parser.<lambda>>, 'SOURCE': <function Parser.<lambda>>, 'STABLE': <function Parser.<lambda>>, 'STORED': <function Parser.<lambda>>, 'SYSTEM_VERSIONING': <function Parser.<lambda>>, 'TBLPROPERTIES': <function Parser.<lambda>>, 'TEMP': <function Parser.<lambda>>, 'TEMPORARY': <function Parser.<lambda>>, 'TO': <function Parser.<lambda>>, 'TRANSIENT': <function Parser.<lambda>>, 'TRANSFORM': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'USING': <function Parser.<lambda>>, 'UNLOGGED': <function Parser.<lambda>>, 'VOLATILE': <function Parser.<lambda>>, 'WITH': <function Parser.<lambda>>}
TYPE_CONVERTERS = {<Type.DECIMAL: 'DECIMAL'>: <function build_default_decimal_type.<locals>._builder>}
SHOW_PARSERS = {'SCHEMAS': <function _show_parser.<locals>._parse>, 'TERSE SCHEMAS': <function _show_parser.<locals>._parse>, 'OBJECTS': <function _show_parser.<locals>._parse>, 'TERSE OBJECTS': <function _show_parser.<locals>._parse>, 'TABLES': <function _show_parser.<locals>._parse>, 'TERSE TABLES': <function _show_parser.<locals>._parse>, 'VIEWS': <function _show_parser.<locals>._parse>, 'TERSE VIEWS': <function _show_parser.<locals>._parse>, 'PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'TERSE PRIMARY KEYS': <function _show_parser.<locals>._parse>, 'IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'TERSE IMPORTED KEYS': <function _show_parser.<locals>._parse>, 'UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'TERSE UNIQUE KEYS': <function _show_parser.<locals>._parse>, 'SEQUENCES': <function _show_parser.<locals>._parse>, 'TERSE SEQUENCES': <function _show_parser.<locals>._parse>, 'COLUMNS': <function _show_parser.<locals>._parse>, 'USERS': <function _show_parser.<locals>._parse>, 'TERSE USERS': <function _show_parser.<locals>._parse>}
CONSTRAINT_PARSERS = {'AUTOINCREMENT': <function Parser.<lambda>>, 'AUTO_INCREMENT': <function Parser.<lambda>>, 'CASESPECIFIC': <function Parser.<lambda>>, 'CHARACTER SET': <function Parser.<lambda>>, 'CHECK': <function Parser.<lambda>>, 'COLLATE': <function Parser.<lambda>>, 'COMMENT': <function Parser.<lambda>>, 'COMPRESS': <function Parser.<lambda>>, 'CLUSTERED': <function Parser.<lambda>>, 'NONCLUSTERED': <function Parser.<lambda>>, 'DEFAULT': <function Parser.<lambda>>, 'ENCODE': <function Parser.<lambda>>, 'EPHEMERAL': <function Parser.<lambda>>, 'EXCLUDE': <function Parser.<lambda>>, 'FOREIGN KEY': <function Parser.<lambda>>, 'FORMAT': <function Parser.<lambda>>, 'GENERATED': <function Parser.<lambda>>, 'IDENTITY': <function Parser.<lambda>>, 'INLINE': <function Parser.<lambda>>, 'LIKE': <function Parser.<lambda>>, 'NOT': <function Parser.<lambda>>, 'NULL': <function Parser.<lambda>>, 'ON': <function Parser.<lambda>>, 'PATH': <function Parser.<lambda>>, 'PERIOD': <function Parser.<lambda>>, 'PRIMARY KEY': <function Parser.<lambda>>, 'REFERENCES': <function Parser.<lambda>>, 'TITLE': <function Parser.<lambda>>, 'TTL': <function Parser.<lambda>>, 'UNIQUE': <function Parser.<lambda>>, 'UPPERCASE': <function Parser.<lambda>>, 'WITH': <function Snowflake.Parser.<lambda>>, 'MASKING': <function Snowflake.Parser.<lambda>>, 'PROJECTION': <function Snowflake.Parser.<lambda>>, 'TAG': <function Snowflake.Parser.<lambda>>}
STAGED_FILE_SINGLE_TOKENS = {<TokenType.DOT: 'DOT'>, <TokenType.SLASH: 'SLASH'>, <TokenType.MOD: 'MOD'>}
FLATTEN_COLUMNS = ['SEQ', 'KEY', 'PATH', 'INDEX', 'VALUE', 'THIS']
SCHEMA_KINDS = {'IMPORTED KEYS', 'VIEWS', 'TABLES', 'OBJECTS', 'UNIQUE KEYS', 'SEQUENCES'}
NON_TABLE_CREATABLES = {'STREAMLIT', 'WAREHOUSE', 'TAG', 'STORAGE INTEGRATION'}
LAMBDAS = {<TokenType.ARROW: 'ARROW'>: <function Snowflake.Parser.<lambda>>, <TokenType.FARROW: 'FARROW'>: <function Parser.<lambda>>}
SHOW_TRIE: Dict = {'SCHEMAS': {0: True}, 'TERSE': {'SCHEMAS': {0: True}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'USERS': {0: True}}, 'OBJECTS': {0: True}, 'TABLES': {0: True}, 'VIEWS': {0: True}, 'PRIMARY': {'KEYS': {0: True}}, 'IMPORTED': {'KEYS': {0: True}}, 'UNIQUE': {'KEYS': {0: True}}, 'SEQUENCES': {0: True}, 'COLUMNS': {0: True}, 'USERS': {0: True}}
SET_TRIE: Dict = {'GLOBAL': {0: True}, 'LOCAL': {0: True}, 'SESSION': {0: True}, 'TRANSACTION': {0: True}}
Inherited Members
sqlglot.parser.Parser
Parser
NO_PAREN_FUNCTIONS
STRUCT_TYPE_TOKENS
NESTED_TYPE_TOKENS
ENUM_TYPE_TOKENS
AGGREGATE_TYPE_TOKENS
TYPE_TOKENS
SIGNED_TO_UNSIGNED_TYPE_TOKEN
SUBQUERY_PREDICATES
RESERVED_TOKENS
DB_CREATABLES
CREATABLES
ALTERABLES
INTERVAL_VARS
ALIAS_TOKENS
ARRAY_CONSTRUCTORS
COMMENT_TABLE_ALIAS_TOKENS
UPDATE_ALIAS_TOKENS
TRIM_TYPES
FUNC_TOKENS
CONJUNCTION
ASSIGNMENT
DISJUNCTION
EQUALITY
COMPARISON
BITWISE
TERM
FACTOR
EXPONENT
TIMES
SET_OPERATIONS
JOIN_METHODS
JOIN_SIDES
JOIN_KINDS
JOIN_HINTS
COLUMN_OPERATORS
EXPRESSION_PARSERS
UNARY_PARSERS
STRING_PARSERS
NUMERIC_PARSERS
PRIMARY_PARSERS
PLACEHOLDER_PARSERS
ALTER_ALTER_PARSERS
SCHEMA_UNNAMED_CONSTRAINTS
NO_PAREN_FUNCTION_PARSERS
INVALID_FUNC_NAME_TOKENS
FUNCTIONS_WITH_ALIASED_ARGS
KEY_VALUE_DEFINITIONS
QUERY_MODIFIER_PARSERS
SET_PARSERS
TYPE_LITERAL_PARSERS
DDL_SELECT_TOKENS
PRE_VOLATILE_TOKENS
TRANSACTION_KIND
TRANSACTION_CHARACTERISTICS
CONFLICT_ACTIONS
CREATE_SEQUENCE
ISOLATED_LOADING_OPTIONS
USABLES
CAST_ACTIONS
SCHEMA_BINDING_OPTIONS
KEY_CONSTRAINT_OPTIONS
INSERT_ALTERNATIVES
CLONE_KEYWORDS
HISTORICAL_DATA_PREFIX
HISTORICAL_DATA_KIND
OPCLASS_FOLLOW_KEYWORDS
OPTYPE_FOLLOW_TOKENS
TABLE_INDEX_HINT_TOKENS
VIEW_ATTRIBUTES
WINDOW_ALIAS_TOKENS
WINDOW_BEFORE_PAREN_TOKENS
WINDOW_SIDES
JSON_KEY_VALUE_SEPARATOR_TOKENS
FETCH_TOKENS
ADD_CONSTRAINT_TOKENS
DISTINCT_TOKENS
NULL_TOKENS
UNNEST_OFFSET_ALIAS_TOKENS
SELECT_START_TOKENS
COPY_INTO_VARLEN_OPTIONS
IS_JSON_PREDICATE_KIND
STRICT_CAST
PREFIXED_PIVOT_COLUMNS
LOG_DEFAULTS_TO_LN
ALTER_TABLE_ADD_REQUIRED_FOR_EACH_COLUMN
TABLESAMPLE_CSV
SET_REQUIRES_ASSIGNMENT_DELIMITER
TRIM_PATTERN_FIRST
STRING_ALIASES
MODIFIERS_ATTACHED_TO_SET_OP
SET_OP_MODIFIERS
NO_PAREN_IF_COMMANDS
JSON_ARROWS_REQUIRE_JSON_TYPE
VALUES_FOLLOWED_BY_PAREN
SUPPORTS_IMPLICIT_UNNEST
INTERVAL_SPANS
SUPPORTS_PARTITION_SELECTION
error_level
error_message_context
max_errors
dialect
reset
parse
parse_into
check_errors
raise_error
expression
validate_expression
errors
sql
class Snowflake.Tokenizer(sqlglot.tokens.Tokenizer):
693    class Tokenizer(tokens.Tokenizer):
694        STRING_ESCAPES = ["\\", "'"]
695        HEX_STRINGS = [("x'", "'"), ("X'", "'")]
696        RAW_STRINGS = ["$$"]
697        COMMENTS = ["--", "//", ("/*", "*/")]
698        NESTED_COMMENTS = False
699
700        KEYWORDS = {
701            **tokens.Tokenizer.KEYWORDS,
702            "BYTEINT": TokenType.INT,
703            "CHAR VARYING": TokenType.VARCHAR,
704            "CHARACTER VARYING": TokenType.VARCHAR,
705            "EXCLUDE": TokenType.EXCEPT,
706            "ILIKE ANY": TokenType.ILIKE_ANY,
707            "LIKE ANY": TokenType.LIKE_ANY,
708            "MATCH_CONDITION": TokenType.MATCH_CONDITION,
709            "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
710            "MINUS": TokenType.EXCEPT,
711            "NCHAR VARYING": TokenType.VARCHAR,
712            "PUT": TokenType.COMMAND,
713            "REMOVE": TokenType.COMMAND,
714            "RM": TokenType.COMMAND,
715            "SAMPLE": TokenType.TABLE_SAMPLE,
716            "SQL_DOUBLE": TokenType.DOUBLE,
717            "SQL_VARCHAR": TokenType.VARCHAR,
718            "STORAGE INTEGRATION": TokenType.STORAGE_INTEGRATION,
719            "TAG": TokenType.TAG,
720            "TIMESTAMP_TZ": TokenType.TIMESTAMPTZ,
721            "TOP": TokenType.TOP,
722            "WAREHOUSE": TokenType.WAREHOUSE,
723            "STREAMLIT": TokenType.STREAMLIT,
724        }
725        KEYWORDS.pop("/*+")
726
727        SINGLE_TOKENS = {
728            **tokens.Tokenizer.SINGLE_TOKENS,
729            "$": TokenType.PARAMETER,
730        }
731
732        VAR_SINGLE_TOKENS = {"$"}
733
734        COMMANDS = tokens.Tokenizer.COMMANDS - {TokenType.SHOW}
STRING_ESCAPES = ['\\', "'"]
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
RAW_STRINGS = ['$$']
COMMENTS = ['--', '//', ('/*', '*/')]
NESTED_COMMENTS = False
KEYWORDS = {'{%': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{%-': <TokenType.BLOCK_START: 'BLOCK_START'>, '%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '+%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-%}': <TokenType.BLOCK_END: 'BLOCK_END'>, '{{+': <TokenType.BLOCK_START: 'BLOCK_START'>, '{{-': <TokenType.BLOCK_START: 'BLOCK_START'>, '+}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '-}}': <TokenType.BLOCK_END: 'BLOCK_END'>, '==': <TokenType.EQ: 'EQ'>, '::': <TokenType.DCOLON: 'DCOLON'>, '||': <TokenType.DPIPE: 'DPIPE'>, '>=': <TokenType.GTE: 'GTE'>, '<=': <TokenType.LTE: 'LTE'>, '<>': <TokenType.NEQ: 'NEQ'>, '!=': <TokenType.NEQ: 'NEQ'>, ':=': <TokenType.COLON_EQ: 'COLON_EQ'>, '<=>': <TokenType.NULLSAFE_EQ: 'NULLSAFE_EQ'>, '->': <TokenType.ARROW: 'ARROW'>, '->>': <TokenType.DARROW: 'DARROW'>, '=>': <TokenType.FARROW: 'FARROW'>, '#>': <TokenType.HASH_ARROW: 'HASH_ARROW'>, '#>>': <TokenType.DHASH_ARROW: 'DHASH_ARROW'>, '<->': <TokenType.LR_ARROW: 'LR_ARROW'>, '&&': <TokenType.DAMP: 'DAMP'>, '??': <TokenType.DQMARK: 'DQMARK'>, 'ALL': <TokenType.ALL: 'ALL'>, 'ALWAYS': <TokenType.ALWAYS: 'ALWAYS'>, 'AND': <TokenType.AND: 'AND'>, 'ANTI': <TokenType.ANTI: 'ANTI'>, 'ANY': <TokenType.ANY: 'ANY'>, 'ASC': <TokenType.ASC: 'ASC'>, 'AS': <TokenType.ALIAS: 'ALIAS'>, 'ASOF': <TokenType.ASOF: 'ASOF'>, 'AUTOINCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'AUTO_INCREMENT': <TokenType.AUTO_INCREMENT: 'AUTO_INCREMENT'>, 'BEGIN': <TokenType.BEGIN: 'BEGIN'>, 'BETWEEN': <TokenType.BETWEEN: 'BETWEEN'>, 'CACHE': <TokenType.CACHE: 'CACHE'>, 'UNCACHE': <TokenType.UNCACHE: 'UNCACHE'>, 'CASE': <TokenType.CASE: 'CASE'>, 'CHARACTER SET': <TokenType.CHARACTER_SET: 'CHARACTER_SET'>, 'CLUSTER BY': <TokenType.CLUSTER_BY: 'CLUSTER_BY'>, 'COLLATE': <TokenType.COLLATE: 'COLLATE'>, 'COLUMN': <TokenType.COLUMN: 'COLUMN'>, 'COMMIT': <TokenType.COMMIT: 'COMMIT'>, 'CONNECT BY': <TokenType.CONNECT_BY: 'CONNECT_BY'>, 'CONSTRAINT': <TokenType.CONSTRAINT: 'CONSTRAINT'>, 'COPY': <TokenType.COPY: 'COPY'>, 'CREATE': <TokenType.CREATE: 'CREATE'>, 'CROSS': <TokenType.CROSS: 'CROSS'>, 'CUBE': <TokenType.CUBE: 'CUBE'>, 'CURRENT_DATE': <TokenType.CURRENT_DATE: 'CURRENT_DATE'>, 'CURRENT_TIME': <TokenType.CURRENT_TIME: 'CURRENT_TIME'>, 'CURRENT_TIMESTAMP': <TokenType.CURRENT_TIMESTAMP: 'CURRENT_TIMESTAMP'>, 'CURRENT_USER': <TokenType.CURRENT_USER: 'CURRENT_USER'>, 'DATABASE': <TokenType.DATABASE: 'DATABASE'>, 'DEFAULT': <TokenType.DEFAULT: 'DEFAULT'>, 'DELETE': <TokenType.DELETE: 'DELETE'>, 'DESC': <TokenType.DESC: 'DESC'>, 'DESCRIBE': <TokenType.DESCRIBE: 'DESCRIBE'>, 'DISTINCT': <TokenType.DISTINCT: 'DISTINCT'>, 'DISTRIBUTE BY': <TokenType.DISTRIBUTE_BY: 'DISTRIBUTE_BY'>, 'DIV': <TokenType.DIV: 'DIV'>, 'DROP': <TokenType.DROP: 'DROP'>, 'ELSE': <TokenType.ELSE: 'ELSE'>, 'END': <TokenType.END: 'END'>, 'ENUM': <TokenType.ENUM: 'ENUM'>, 'ESCAPE': <TokenType.ESCAPE: 'ESCAPE'>, 'EXCEPT': <TokenType.EXCEPT: 'EXCEPT'>, 'EXECUTE': <TokenType.EXECUTE: 'EXECUTE'>, 'EXISTS': <TokenType.EXISTS: 'EXISTS'>, 'FALSE': <TokenType.FALSE: 'FALSE'>, 'FETCH': <TokenType.FETCH: 'FETCH'>, 'FILTER': <TokenType.FILTER: 'FILTER'>, 'FIRST': <TokenType.FIRST: 'FIRST'>, 'FULL': <TokenType.FULL: 'FULL'>, 'FUNCTION': <TokenType.FUNCTION: 'FUNCTION'>, 'FOR': <TokenType.FOR: 'FOR'>, 'FOREIGN KEY': <TokenType.FOREIGN_KEY: 'FOREIGN_KEY'>, 'FORMAT': <TokenType.FORMAT: 'FORMAT'>, 'FROM': <TokenType.FROM: 'FROM'>, 'GEOGRAPHY': <TokenType.GEOGRAPHY: 'GEOGRAPHY'>, 'GEOMETRY': <TokenType.GEOMETRY: 'GEOMETRY'>, 'GLOB': <TokenType.GLOB: 'GLOB'>, 'GROUP BY': <TokenType.GROUP_BY: 'GROUP_BY'>, 'GROUPING SETS': <TokenType.GROUPING_SETS: 'GROUPING_SETS'>, 'HAVING': <TokenType.HAVING: 'HAVING'>, 'ILIKE': <TokenType.ILIKE: 'ILIKE'>, 'IN': <TokenType.IN: 'IN'>, 'INDEX': <TokenType.INDEX: 'INDEX'>, 'INET': <TokenType.INET: 'INET'>, 'INNER': <TokenType.INNER: 'INNER'>, 'INSERT': <TokenType.INSERT: 'INSERT'>, 'INTERVAL': <TokenType.INTERVAL: 'INTERVAL'>, 'INTERSECT': <TokenType.INTERSECT: 'INTERSECT'>, 'INTO': <TokenType.INTO: 'INTO'>, 'IS': <TokenType.IS: 'IS'>, 'ISNULL': <TokenType.ISNULL: 'ISNULL'>, 'JOIN': <TokenType.JOIN: 'JOIN'>, 'KEEP': <TokenType.KEEP: 'KEEP'>, 'KILL': <TokenType.KILL: 'KILL'>, 'LATERAL': <TokenType.LATERAL: 'LATERAL'>, 'LEFT': <TokenType.LEFT: 'LEFT'>, 'LIKE': <TokenType.LIKE: 'LIKE'>, 'LIMIT': <TokenType.LIMIT: 'LIMIT'>, 'LOAD': <TokenType.LOAD: 'LOAD'>, 'LOCK': <TokenType.LOCK: 'LOCK'>, 'MERGE': <TokenType.MERGE: 'MERGE'>, 'NATURAL': <TokenType.NATURAL: 'NATURAL'>, 'NEXT': <TokenType.NEXT: 'NEXT'>, 'NOT': <TokenType.NOT: 'NOT'>, 'NOTNULL': <TokenType.NOTNULL: 'NOTNULL'>, 'NULL': <TokenType.NULL: 'NULL'>, 'OBJECT': <TokenType.OBJECT: 'OBJECT'>, 'OFFSET': <TokenType.OFFSET: 'OFFSET'>, 'ON': <TokenType.ON: 'ON'>, 'OR': <TokenType.OR: 'OR'>, 'XOR': <TokenType.XOR: 'XOR'>, 'ORDER BY': <TokenType.ORDER_BY: 'ORDER_BY'>, 'ORDINALITY': <TokenType.ORDINALITY: 'ORDINALITY'>, 'OUTER': <TokenType.OUTER: 'OUTER'>, 'OVER': <TokenType.OVER: 'OVER'>, 'OVERLAPS': <TokenType.OVERLAPS: 'OVERLAPS'>, 'OVERWRITE': <TokenType.OVERWRITE: 'OVERWRITE'>, 'PARTITION': <TokenType.PARTITION: 'PARTITION'>, 'PARTITION BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PARTITIONED_BY': <TokenType.PARTITION_BY: 'PARTITION_BY'>, 'PERCENT': <TokenType.PERCENT: 'PERCENT'>, 'PIVOT': <TokenType.PIVOT: 'PIVOT'>, 'PRAGMA': <TokenType.PRAGMA: 'PRAGMA'>, 'PRIMARY KEY': <TokenType.PRIMARY_KEY: 'PRIMARY_KEY'>, 'PROCEDURE': <TokenType.PROCEDURE: 'PROCEDURE'>, 'QUALIFY': <TokenType.QUALIFY: 'QUALIFY'>, 'RANGE': <TokenType.RANGE: 'RANGE'>, 'RECURSIVE': <TokenType.RECURSIVE: 'RECURSIVE'>, 'REGEXP': <TokenType.RLIKE: 'RLIKE'>, 'RENAME': <TokenType.RENAME: 'RENAME'>, 'REPLACE': <TokenType.REPLACE: 'REPLACE'>, 'RETURNING': <TokenType.RETURNING: 'RETURNING'>, 'REFERENCES': <TokenType.REFERENCES: 'REFERENCES'>, 'RIGHT': <TokenType.RIGHT: 'RIGHT'>, 'RLIKE': <TokenType.RLIKE: 'RLIKE'>, 'ROLLBACK': <TokenType.ROLLBACK: 'ROLLBACK'>, 'ROLLUP': <TokenType.ROLLUP: 'ROLLUP'>, 'ROW': <TokenType.ROW: 'ROW'>, 'ROWS': <TokenType.ROWS: 'ROWS'>, 'SCHEMA': <TokenType.SCHEMA: 'SCHEMA'>, 'SELECT': <TokenType.SELECT: 'SELECT'>, 'SEMI': <TokenType.SEMI: 'SEMI'>, 'SET': <TokenType.SET: 'SET'>, 'SETTINGS': <TokenType.SETTINGS: 'SETTINGS'>, 'SHOW': <TokenType.SHOW: 'SHOW'>, 'SIMILAR TO': <TokenType.SIMILAR_TO: 'SIMILAR_TO'>, 'SOME': <TokenType.SOME: 'SOME'>, 'SORT BY': <TokenType.SORT_BY: 'SORT_BY'>, 'START WITH': <TokenType.START_WITH: 'START_WITH'>, 'STRAIGHT_JOIN': <TokenType.STRAIGHT_JOIN: 'STRAIGHT_JOIN'>, 'TABLE': <TokenType.TABLE: 'TABLE'>, 'TABLESAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'TEMP': <TokenType.TEMPORARY: 'TEMPORARY'>, 'TEMPORARY': <TokenType.TEMPORARY: 'TEMPORARY'>, 'THEN': <TokenType.THEN: 'THEN'>, 'TRUE': <TokenType.TRUE: 'TRUE'>, 'TRUNCATE': <TokenType.TRUNCATE: 'TRUNCATE'>, 'UNION': <TokenType.UNION: 'UNION'>, 'UNKNOWN': <TokenType.UNKNOWN: 'UNKNOWN'>, 'UNNEST': <TokenType.UNNEST: 'UNNEST'>, 'UNPIVOT': <TokenType.UNPIVOT: 'UNPIVOT'>, 'UPDATE': <TokenType.UPDATE: 'UPDATE'>, 'USE': <TokenType.USE: 'USE'>, 'USING': <TokenType.USING: 'USING'>, 'UUID': <TokenType.UUID: 'UUID'>, 'VALUES': <TokenType.VALUES: 'VALUES'>, 'VIEW': <TokenType.VIEW: 'VIEW'>, 'VOLATILE': <TokenType.VOLATILE: 'VOLATILE'>, 'WHEN': <TokenType.WHEN: 'WHEN'>, 'WHERE': <TokenType.WHERE: 'WHERE'>, 'WINDOW': <TokenType.WINDOW: 'WINDOW'>, 'WITH': <TokenType.WITH: 'WITH'>, 'APPLY': <TokenType.APPLY: 'APPLY'>, 'ARRAY': <TokenType.ARRAY: 'ARRAY'>, 'BIT': <TokenType.BIT: 'BIT'>, 'BOOL': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BOOLEAN': <TokenType.BOOLEAN: 'BOOLEAN'>, 'BYTE': <TokenType.TINYINT: 'TINYINT'>, 'MEDIUMINT': <TokenType.MEDIUMINT: 'MEDIUMINT'>, 'INT1': <TokenType.TINYINT: 'TINYINT'>, 'TINYINT': <TokenType.TINYINT: 'TINYINT'>, 'INT16': <TokenType.SMALLINT: 'SMALLINT'>, 'SHORT': <TokenType.SMALLINT: 'SMALLINT'>, 'SMALLINT': <TokenType.SMALLINT: 'SMALLINT'>, 'INT128': <TokenType.INT128: 'INT128'>, 'HUGEINT': <TokenType.INT128: 'INT128'>, 'INT2': <TokenType.SMALLINT: 'SMALLINT'>, 'INTEGER': <TokenType.INT: 'INT'>, 'INT': <TokenType.INT: 'INT'>, 'INT4': <TokenType.INT: 'INT'>, 'INT32': <TokenType.INT: 'INT'>, 'INT64': <TokenType.BIGINT: 'BIGINT'>, 'LONG': <TokenType.BIGINT: 'BIGINT'>, 'BIGINT': <TokenType.BIGINT: 'BIGINT'>, 'INT8': <TokenType.TINYINT: 'TINYINT'>, 'UINT': <TokenType.UINT: 'UINT'>, 'DEC': <TokenType.DECIMAL: 'DECIMAL'>, 'DECIMAL': <TokenType.DECIMAL: 'DECIMAL'>, 'BIGDECIMAL': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'BIGNUMERIC': <TokenType.BIGDECIMAL: 'BIGDECIMAL'>, 'LIST': <TokenType.LIST: 'LIST'>, 'MAP': <TokenType.MAP: 'MAP'>, 'NULLABLE': <TokenType.NULLABLE: 'NULLABLE'>, 'NUMBER': <TokenType.DECIMAL: 'DECIMAL'>, 'NUMERIC': <TokenType.DECIMAL: 'DECIMAL'>, 'FIXED': <TokenType.DECIMAL: 'DECIMAL'>, 'REAL': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT4': <TokenType.FLOAT: 'FLOAT'>, 'FLOAT8': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'DOUBLE PRECISION': <TokenType.DOUBLE: 'DOUBLE'>, 'JSON': <TokenType.JSON: 'JSON'>, 'JSONB': <TokenType.JSONB: 'JSONB'>, 'CHAR': <TokenType.CHAR: 'CHAR'>, 'CHARACTER': <TokenType.CHAR: 'CHAR'>, 'NCHAR': <TokenType.NCHAR: 'NCHAR'>, 'VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'VARCHAR2': <TokenType.VARCHAR: 'VARCHAR'>, 'NVARCHAR': <TokenType.NVARCHAR: 'NVARCHAR'>, 'NVARCHAR2': <TokenType.NVARCHAR: 'NVARCHAR'>, 'BPCHAR': <TokenType.BPCHAR: 'BPCHAR'>, 'STR': <TokenType.TEXT: 'TEXT'>, 'STRING': <TokenType.TEXT: 'TEXT'>, 'TEXT': <TokenType.TEXT: 'TEXT'>, 'LONGTEXT': <TokenType.LONGTEXT: 'LONGTEXT'>, 'MEDIUMTEXT': <TokenType.MEDIUMTEXT: 'MEDIUMTEXT'>, 'TINYTEXT': <TokenType.TINYTEXT: 'TINYTEXT'>, 'CLOB': <TokenType.TEXT: 'TEXT'>, 'LONGVARCHAR': <TokenType.TEXT: 'TEXT'>, 'BINARY': <TokenType.BINARY: 'BINARY'>, 'BLOB': <TokenType.VARBINARY: 'VARBINARY'>, 'LONGBLOB': <TokenType.LONGBLOB: 'LONGBLOB'>, 'MEDIUMBLOB': <TokenType.MEDIUMBLOB: 'MEDIUMBLOB'>, 'TINYBLOB': <TokenType.TINYBLOB: 'TINYBLOB'>, 'BYTEA': <TokenType.VARBINARY: 'VARBINARY'>, 'VARBINARY': <TokenType.VARBINARY: 'VARBINARY'>, 'TIME': <TokenType.TIME: 'TIME'>, 'TIMETZ': <TokenType.TIMETZ: 'TIMETZ'>, 'TIMESTAMP': <TokenType.TIMESTAMP: 'TIMESTAMP'>, 'TIMESTAMPTZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TIMESTAMPLTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMP_LTZ': <TokenType.TIMESTAMPLTZ: 'TIMESTAMPLTZ'>, 'TIMESTAMPNTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'TIMESTAMP_NTZ': <TokenType.TIMESTAMPNTZ: 'TIMESTAMPNTZ'>, 'DATE': <TokenType.DATE: 'DATE'>, 'DATETIME': <TokenType.DATETIME: 'DATETIME'>, 'INT4RANGE': <TokenType.INT4RANGE: 'INT4RANGE'>, 'INT4MULTIRANGE': <TokenType.INT4MULTIRANGE: 'INT4MULTIRANGE'>, 'INT8RANGE': <TokenType.INT8RANGE: 'INT8RANGE'>, 'INT8MULTIRANGE': <TokenType.INT8MULTIRANGE: 'INT8MULTIRANGE'>, 'NUMRANGE': <TokenType.NUMRANGE: 'NUMRANGE'>, 'NUMMULTIRANGE': <TokenType.NUMMULTIRANGE: 'NUMMULTIRANGE'>, 'TSRANGE': <TokenType.TSRANGE: 'TSRANGE'>, 'TSMULTIRANGE': <TokenType.TSMULTIRANGE: 'TSMULTIRANGE'>, 'TSTZRANGE': <TokenType.TSTZRANGE: 'TSTZRANGE'>, 'TSTZMULTIRANGE': <TokenType.TSTZMULTIRANGE: 'TSTZMULTIRANGE'>, 'DATERANGE': <TokenType.DATERANGE: 'DATERANGE'>, 'DATEMULTIRANGE': <TokenType.DATEMULTIRANGE: 'DATEMULTIRANGE'>, 'UNIQUE': <TokenType.UNIQUE: 'UNIQUE'>, 'VECTOR': <TokenType.VECTOR: 'VECTOR'>, 'STRUCT': <TokenType.STRUCT: 'STRUCT'>, 'SEQUENCE': <TokenType.SEQUENCE: 'SEQUENCE'>, 'VARIANT': <TokenType.VARIANT: 'VARIANT'>, 'ALTER': <TokenType.ALTER: 'ALTER'>, 'ANALYZE': <TokenType.COMMAND: 'COMMAND'>, 'CALL': <TokenType.COMMAND: 'COMMAND'>, 'COMMENT': <TokenType.COMMENT: 'COMMENT'>, 'EXPLAIN': <TokenType.COMMAND: 'COMMAND'>, 'GRANT': <TokenType.COMMAND: 'COMMAND'>, 'OPTIMIZE': <TokenType.COMMAND: 'COMMAND'>, 'PREPARE': <TokenType.COMMAND: 'COMMAND'>, 'VACUUM': <TokenType.COMMAND: 'COMMAND'>, 'USER-DEFINED': <TokenType.USERDEFINED: 'USERDEFINED'>, 'FOR VERSION': <TokenType.VERSION_SNAPSHOT: 'VERSION_SNAPSHOT'>, 'FOR TIMESTAMP': <TokenType.TIMESTAMP_SNAPSHOT: 'TIMESTAMP_SNAPSHOT'>, 'BYTEINT': <TokenType.INT: 'INT'>, 'CHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'CHARACTER VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'EXCLUDE': <TokenType.EXCEPT: 'EXCEPT'>, 'ILIKE ANY': <TokenType.ILIKE_ANY: 'ILIKE_ANY'>, 'LIKE ANY': <TokenType.LIKE_ANY: 'LIKE_ANY'>, 'MATCH_CONDITION': <TokenType.MATCH_CONDITION: 'MATCH_CONDITION'>, 'MATCH_RECOGNIZE': <TokenType.MATCH_RECOGNIZE: 'MATCH_RECOGNIZE'>, 'MINUS': <TokenType.EXCEPT: 'EXCEPT'>, 'NCHAR VARYING': <TokenType.VARCHAR: 'VARCHAR'>, 'PUT': <TokenType.COMMAND: 'COMMAND'>, 'REMOVE': <TokenType.COMMAND: 'COMMAND'>, 'RM': <TokenType.COMMAND: 'COMMAND'>, 'SAMPLE': <TokenType.TABLE_SAMPLE: 'TABLE_SAMPLE'>, 'SQL_DOUBLE': <TokenType.DOUBLE: 'DOUBLE'>, 'SQL_VARCHAR': <TokenType.VARCHAR: 'VARCHAR'>, 'STORAGE INTEGRATION': <TokenType.STORAGE_INTEGRATION: 'STORAGE_INTEGRATION'>, 'TAG': <TokenType.TAG: 'TAG'>, 'TIMESTAMP_TZ': <TokenType.TIMESTAMPTZ: 'TIMESTAMPTZ'>, 'TOP': <TokenType.TOP: 'TOP'>, 'WAREHOUSE': <TokenType.WAREHOUSE: 'WAREHOUSE'>, 'STREAMLIT': <TokenType.STREAMLIT: 'STREAMLIT'>}
SINGLE_TOKENS = {'(': <TokenType.L_PAREN: 'L_PAREN'>, ')': <TokenType.R_PAREN: 'R_PAREN'>, '[': <TokenType.L_BRACKET: 'L_BRACKET'>, ']': <TokenType.R_BRACKET: 'R_BRACKET'>, '{': <TokenType.L_BRACE: 'L_BRACE'>, '}': <TokenType.R_BRACE: 'R_BRACE'>, '&': <TokenType.AMP: 'AMP'>, '^': <TokenType.CARET: 'CARET'>, ':': <TokenType.COLON: 'COLON'>, ',': <TokenType.COMMA: 'COMMA'>, '.': <TokenType.DOT: 'DOT'>, '-': <TokenType.DASH: 'DASH'>, '=': <TokenType.EQ: 'EQ'>, '>': <TokenType.GT: 'GT'>, '<': <TokenType.LT: 'LT'>, '%': <TokenType.MOD: 'MOD'>, '!': <TokenType.NOT: 'NOT'>, '|': <TokenType.PIPE: 'PIPE'>, '+': <TokenType.PLUS: 'PLUS'>, ';': <TokenType.SEMICOLON: 'SEMICOLON'>, '/': <TokenType.SLASH: 'SLASH'>, '\\': <TokenType.BACKSLASH: 'BACKSLASH'>, '*': <TokenType.STAR: 'STAR'>, '~': <TokenType.TILDA: 'TILDA'>, '?': <TokenType.PLACEHOLDER: 'PLACEHOLDER'>, '@': <TokenType.PARAMETER: 'PARAMETER'>, '#': <TokenType.HASH: 'HASH'>, "'": <TokenType.UNKNOWN: 'UNKNOWN'>, '`': <TokenType.UNKNOWN: 'UNKNOWN'>, '"': <TokenType.UNKNOWN: 'UNKNOWN'>, '$': <TokenType.PARAMETER: 'PARAMETER'>}
VAR_SINGLE_TOKENS = {'$'}
COMMANDS = {<TokenType.COMMAND: 'COMMAND'>, <TokenType.FETCH: 'FETCH'>, <TokenType.EXECUTE: 'EXECUTE'>, <TokenType.RENAME: 'RENAME'>}
class Snowflake.Generator(sqlglot.generator.Generator):
 736    class Generator(generator.Generator):
 737        PARAMETER_TOKEN = "$"
 738        MATCHED_BY_SOURCE = False
 739        SINGLE_STRING_INTERVAL = True
 740        JOIN_HINTS = False
 741        TABLE_HINTS = False
 742        QUERY_HINTS = False
 743        AGGREGATE_FILTER_SUPPORTED = False
 744        SUPPORTS_TABLE_COPY = False
 745        COLLATE_IS_FUNC = True
 746        LIMIT_ONLY_LITERALS = True
 747        JSON_KEY_VALUE_PAIR_SEP = ","
 748        INSERT_OVERWRITE = " OVERWRITE INTO"
 749        STRUCT_DELIMITER = ("(", ")")
 750        COPY_PARAMS_ARE_WRAPPED = False
 751        COPY_PARAMS_EQ_REQUIRED = True
 752        STAR_EXCEPT = "EXCLUDE"
 753        SUPPORTS_EXPLODING_PROJECTIONS = False
 754        ARRAY_CONCAT_IS_VAR_LEN = False
 755        SUPPORTS_CONVERT_TIMEZONE = True
 756
 757        TRANSFORMS = {
 758            **generator.Generator.TRANSFORMS,
 759            exp.ApproxDistinct: rename_func("APPROX_COUNT_DISTINCT"),
 760            exp.ArgMax: rename_func("MAX_BY"),
 761            exp.ArgMin: rename_func("MIN_BY"),
 762            exp.Array: inline_array_sql,
 763            exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
 764            exp.ArrayContains: lambda self, e: self.func("ARRAY_CONTAINS", e.expression, e.this),
 765            exp.AtTimeZone: lambda self, e: self.func(
 766                "CONVERT_TIMEZONE", e.args.get("zone"), e.this
 767            ),
 768            exp.BitwiseXor: rename_func("BITXOR"),
 769            exp.Create: transforms.preprocess([_flatten_structured_types_unless_iceberg]),
 770            exp.DateAdd: date_delta_sql("DATEADD"),
 771            exp.DateDiff: date_delta_sql("DATEDIFF"),
 772            exp.DateStrToDate: datestrtodate_sql,
 773            exp.DayOfMonth: rename_func("DAYOFMONTH"),
 774            exp.DayOfWeek: rename_func("DAYOFWEEK"),
 775            exp.DayOfYear: rename_func("DAYOFYEAR"),
 776            exp.Explode: rename_func("FLATTEN"),
 777            exp.Extract: rename_func("DATE_PART"),
 778            exp.FromTimeZone: lambda self, e: self.func(
 779                "CONVERT_TIMEZONE", e.args.get("zone"), "'UTC'", e.this
 780            ),
 781            exp.GenerateSeries: lambda self, e: self.func(
 782                "ARRAY_GENERATE_RANGE", e.args["start"], e.args["end"] + 1, e.args.get("step")
 783            ),
 784            exp.GroupConcat: rename_func("LISTAGG"),
 785            exp.If: if_sql(name="IFF", false_value="NULL"),
 786            exp.JSONExtract: lambda self, e: self.func("GET_PATH", e.this, e.expression),
 787            exp.JSONExtractScalar: lambda self, e: self.func(
 788                "JSON_EXTRACT_PATH_TEXT", e.this, e.expression
 789            ),
 790            exp.JSONObject: lambda self, e: self.func("OBJECT_CONSTRUCT_KEEP_NULL", *e.expressions),
 791            exp.JSONPathRoot: lambda *_: "",
 792            exp.LogicalAnd: rename_func("BOOLAND_AGG"),
 793            exp.LogicalOr: rename_func("BOOLOR_AGG"),
 794            exp.Map: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 795            exp.Max: max_or_greatest,
 796            exp.Min: min_or_least,
 797            exp.ParseJSON: lambda self, e: self.func(
 798                "TRY_PARSE_JSON" if e.args.get("safe") else "PARSE_JSON", e.this
 799            ),
 800            exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
 801            exp.PercentileCont: transforms.preprocess(
 802                [transforms.add_within_group_for_percentiles]
 803            ),
 804            exp.PercentileDisc: transforms.preprocess(
 805                [transforms.add_within_group_for_percentiles]
 806            ),
 807            exp.Pivot: transforms.preprocess([_unqualify_unpivot_columns]),
 808            exp.RegexpILike: _regexpilike_sql,
 809            exp.Rand: rename_func("RANDOM"),
 810            exp.Select: transforms.preprocess(
 811                [
 812                    transforms.eliminate_distinct_on,
 813                    transforms.explode_to_unnest(),
 814                    transforms.eliminate_semi_and_anti_joins,
 815                    _unnest_generate_date_array,
 816                ]
 817            ),
 818            exp.SHA: rename_func("SHA1"),
 819            exp.StarMap: rename_func("OBJECT_CONSTRUCT"),
 820            exp.StartsWith: rename_func("STARTSWITH"),
 821            exp.StrPosition: lambda self, e: self.func(
 822                "POSITION", e.args.get("substr"), e.this, e.args.get("position")
 823            ),
 824            exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
 825            exp.Stuff: rename_func("INSERT"),
 826            exp.TimeAdd: date_delta_sql("TIMEADD"),
 827            exp.TimestampDiff: lambda self, e: self.func(
 828                "TIMESTAMPDIFF", e.unit, e.expression, e.this
 829            ),
 830            exp.TimestampTrunc: timestamptrunc_sql(),
 831            exp.TimeStrToTime: timestrtotime_sql,
 832            exp.TimeToStr: lambda self, e: self.func(
 833                "TO_CHAR", exp.cast(e.this, exp.DataType.Type.TIMESTAMP), self.format_time(e)
 834            ),
 835            exp.TimeToUnix: lambda self, e: f"EXTRACT(epoch_second FROM {self.sql(e, 'this')})",
 836            exp.ToArray: rename_func("TO_ARRAY"),
 837            exp.ToChar: lambda self, e: self.function_fallback_sql(e),
 838            exp.Trim: lambda self, e: self.func("TRIM", e.this, e.expression),
 839            exp.TsOrDsAdd: date_delta_sql("DATEADD", cast=True),
 840            exp.TsOrDsDiff: date_delta_sql("DATEDIFF"),
 841            exp.TsOrDsToDate: lambda self, e: self.func(
 842                "TRY_TO_DATE" if e.args.get("safe") else "TO_DATE", e.this, self.format_time(e)
 843            ),
 844            exp.UnixToTime: rename_func("TO_TIMESTAMP"),
 845            exp.VarMap: lambda self, e: var_map_sql(self, e, "OBJECT_CONSTRUCT"),
 846            exp.WeekOfYear: rename_func("WEEKOFYEAR"),
 847            exp.Xor: rename_func("BOOLXOR"),
 848        }
 849
 850        SUPPORTED_JSON_PATH_PARTS = {
 851            exp.JSONPathKey,
 852            exp.JSONPathRoot,
 853            exp.JSONPathSubscript,
 854        }
 855
 856        TYPE_MAPPING = {
 857            **generator.Generator.TYPE_MAPPING,
 858            exp.DataType.Type.NESTED: "OBJECT",
 859            exp.DataType.Type.STRUCT: "OBJECT",
 860        }
 861
 862        PROPERTIES_LOCATION = {
 863            **generator.Generator.PROPERTIES_LOCATION,
 864            exp.SetProperty: exp.Properties.Location.UNSUPPORTED,
 865            exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
 866        }
 867
 868        UNSUPPORTED_VALUES_EXPRESSIONS = {
 869            exp.Map,
 870            exp.StarMap,
 871            exp.Struct,
 872            exp.VarMap,
 873        }
 874
 875        def with_properties(self, properties: exp.Properties) -> str:
 876            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
 877
 878        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
 879            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
 880                values_as_table = False
 881
 882            return super().values_sql(expression, values_as_table=values_as_table)
 883
 884        def datatype_sql(self, expression: exp.DataType) -> str:
 885            expressions = expression.expressions
 886            if (
 887                expressions
 888                and expression.is_type(*exp.DataType.STRUCT_TYPES)
 889                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
 890            ):
 891                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
 892                return "OBJECT"
 893
 894            return super().datatype_sql(expression)
 895
 896        def tonumber_sql(self, expression: exp.ToNumber) -> str:
 897            return self.func(
 898                "TO_NUMBER",
 899                expression.this,
 900                expression.args.get("format"),
 901                expression.args.get("precision"),
 902                expression.args.get("scale"),
 903            )
 904
 905        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
 906            milli = expression.args.get("milli")
 907            if milli is not None:
 908                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
 909                expression.set("nano", milli_to_nano)
 910
 911            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
 912
 913        def trycast_sql(self, expression: exp.TryCast) -> str:
 914            value = expression.this
 915
 916            if value.type is None:
 917                from sqlglot.optimizer.annotate_types import annotate_types
 918
 919                value = annotate_types(value)
 920
 921            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
 922                return super().trycast_sql(expression)
 923
 924            # TRY_CAST only works for string values in Snowflake
 925            return self.cast_sql(expression)
 926
 927        def log_sql(self, expression: exp.Log) -> str:
 928            if not expression.expression:
 929                return self.func("LN", expression.this)
 930
 931            return super().log_sql(expression)
 932
 933        def unnest_sql(self, expression: exp.Unnest) -> str:
 934            unnest_alias = expression.args.get("alias")
 935            offset = expression.args.get("offset")
 936
 937            columns = [
 938                exp.to_identifier("seq"),
 939                exp.to_identifier("key"),
 940                exp.to_identifier("path"),
 941                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
 942                seq_get(unnest_alias.columns if unnest_alias else [], 0)
 943                or exp.to_identifier("value"),
 944                exp.to_identifier("this"),
 945            ]
 946
 947            if unnest_alias:
 948                unnest_alias.set("columns", columns)
 949            else:
 950                unnest_alias = exp.TableAlias(this="_u", columns=columns)
 951
 952            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
 953            alias = self.sql(unnest_alias)
 954            alias = f" AS {alias}" if alias else ""
 955            return f"{explode}{alias}"
 956
 957        def show_sql(self, expression: exp.Show) -> str:
 958            terse = "TERSE " if expression.args.get("terse") else ""
 959            history = " HISTORY" if expression.args.get("history") else ""
 960            like = self.sql(expression, "like")
 961            like = f" LIKE {like}" if like else ""
 962
 963            scope = self.sql(expression, "scope")
 964            scope = f" {scope}" if scope else ""
 965
 966            scope_kind = self.sql(expression, "scope_kind")
 967            if scope_kind:
 968                scope_kind = f" IN {scope_kind}"
 969
 970            starts_with = self.sql(expression, "starts_with")
 971            if starts_with:
 972                starts_with = f" STARTS WITH {starts_with}"
 973
 974            limit = self.sql(expression, "limit")
 975
 976            from_ = self.sql(expression, "from")
 977            if from_:
 978                from_ = f" FROM {from_}"
 979
 980            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
 981
 982        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
 983            # Other dialects don't support all of the following parameters, so we need to
 984            # generate default values as necessary to ensure the transpilation is correct
 985            group = expression.args.get("group")
 986            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
 987            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
 988            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
 989
 990            return self.func(
 991                "REGEXP_SUBSTR",
 992                expression.this,
 993                expression.expression,
 994                position,
 995                occurrence,
 996                parameters,
 997                group,
 998            )
 999
1000        def except_op(self, expression: exp.Except) -> str:
1001            if not expression.args.get("distinct"):
1002                self.unsupported("EXCEPT with All is not supported in Snowflake")
1003            return super().except_op(expression)
1004
1005        def intersect_op(self, expression: exp.Intersect) -> str:
1006            if not expression.args.get("distinct"):
1007                self.unsupported("INTERSECT with All is not supported in Snowflake")
1008            return super().intersect_op(expression)
1009
1010        def describe_sql(self, expression: exp.Describe) -> str:
1011            # Default to table if kind is unknown
1012            kind_value = expression.args.get("kind") or "TABLE"
1013            kind = f" {kind_value}" if kind_value else ""
1014            this = f" {self.sql(expression, 'this')}"
1015            expressions = self.expressions(expression, flat=True)
1016            expressions = f" {expressions}" if expressions else ""
1017            return f"DESCRIBE{kind}{this}{expressions}"
1018
1019        def generatedasidentitycolumnconstraint_sql(
1020            self, expression: exp.GeneratedAsIdentityColumnConstraint
1021        ) -> str:
1022            start = expression.args.get("start")
1023            start = f" START {start}" if start else ""
1024            increment = expression.args.get("increment")
1025            increment = f" INCREMENT {increment}" if increment else ""
1026            return f"AUTOINCREMENT{start}{increment}"
1027
1028        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1029            this = self.sql(expression, "this")
1030            return f"SWAP WITH {this}"
1031
1032        def cluster_sql(self, expression: exp.Cluster) -> str:
1033            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
1034
1035        def struct_sql(self, expression: exp.Struct) -> str:
1036            keys = []
1037            values = []
1038
1039            for i, e in enumerate(expression.expressions):
1040                if isinstance(e, exp.PropertyEQ):
1041                    keys.append(
1042                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1043                    )
1044                    values.append(e.expression)
1045                else:
1046                    keys.append(exp.Literal.string(f"_{i}"))
1047                    values.append(e)
1048
1049            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
1050
1051        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1052            if expression.args.get("weight") or expression.args.get("accuracy"):
1053                self.unsupported(
1054                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1055                )
1056
1057            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
1058
1059        def alterset_sql(self, expression: exp.AlterSet) -> str:
1060            exprs = self.expressions(expression, flat=True)
1061            exprs = f" {exprs}" if exprs else ""
1062            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1063            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1064            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1065            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1066            tag = self.expressions(expression, key="tag", flat=True)
1067            tag = f" TAG {tag}" if tag else ""
1068
1069            return f"SET{exprs}{file_format}{copy_options}{tag}"

Generator converts a given syntax tree to the corresponding SQL string.

Arguments:
  • pretty: Whether to format the produced SQL string. Default: False.
  • identify: Determines when an identifier should be quoted. Possible values are: False (default): Never quote, except in cases where it's mandatory by the dialect. True or 'always': Always quote. 'safe': Only quote identifiers that are case insensitive.
  • normalize: Whether to normalize identifiers to lowercase. Default: False.
  • pad: The pad size in a formatted string. For example, this affects the indentation of a projection in a query, relative to its nesting level. Default: 2.
  • indent: The indentation size in a formatted string. For example, this affects the indentation of subqueries and filters under a WHERE clause. Default: 2.
  • normalize_functions: How to normalize function names. Possible values are: "upper" or True (default): Convert names to uppercase. "lower": Convert names to lowercase. False: Disables function name normalization.
  • unsupported_level: Determines the generator's behavior when it encounters unsupported expressions. Default ErrorLevel.WARN.
  • max_unsupported: Maximum number of unsupported messages to include in a raised UnsupportedError. This is only relevant if unsupported_level is ErrorLevel.RAISE. Default: 3
  • leading_comma: Whether the comma is leading or trailing in select expressions. This is only relevant when generating in pretty mode. Default: False
  • max_text_width: The max number of characters in a segment before creating new lines in pretty mode. The default is on the smaller end because the length only represents a segment and not the true line length. Default: 80
  • comments: Whether to preserve comments in the output SQL code. Default: True
PARAMETER_TOKEN = '$'
MATCHED_BY_SOURCE = False
SINGLE_STRING_INTERVAL = True
JOIN_HINTS = False
TABLE_HINTS = False
QUERY_HINTS = False
AGGREGATE_FILTER_SUPPORTED = False
SUPPORTS_TABLE_COPY = False
COLLATE_IS_FUNC = True
LIMIT_ONLY_LITERALS = True
JSON_KEY_VALUE_PAIR_SEP = ','
INSERT_OVERWRITE = ' OVERWRITE INTO'
STRUCT_DELIMITER = ('(', ')')
COPY_PARAMS_ARE_WRAPPED = False
COPY_PARAMS_EQ_REQUIRED = True
STAR_EXCEPT = 'EXCLUDE'
SUPPORTS_EXPLODING_PROJECTIONS = False
ARRAY_CONCAT_IS_VAR_LEN = False
SUPPORTS_CONVERT_TIMEZONE = True
TRANSFORMS = {<class 'sqlglot.expressions.JSONPathKey'>: <function <lambda>>, <class 'sqlglot.expressions.JSONPathRoot'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONPathSubscript'>: <function <lambda>>, <class 'sqlglot.expressions.AllowedValuesProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.BackupProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CaseSpecificColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CharacterSetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CollateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CommentColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ConnectByRoot'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DateFormatColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DefaultColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.DynamicProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EmptyProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EncodeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.EphemeralColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExcludeColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ExternalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.GlobalProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.HeapProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IcebergProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InheritsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InlineLengthColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.InputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.IntervalSpan'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LanguageProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LocationProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.LogProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.MaterializedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NonClusteredColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.NotForReplicationColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnCommitProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OnUpdateColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Operator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.OutputModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PathColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.PivotAny'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ProjectionPolicyColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ReturnsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SampleProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SecureProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetConfigProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SetProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SettingsProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SharingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StabilityProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.Stream'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StreamingTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.StrictProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TemporaryProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TagColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TitleColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToMap'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ToTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransformModelProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.TransientProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UppercaseColumnConstraint'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.UnloggedProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VarMap'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.VolatileProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.WithOperator'>: <function Generator.<lambda>>, <class 'sqlglot.expressions.ApproxDistinct'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMax'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ArgMin'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Array'>: <function inline_array_sql>, <class 'sqlglot.expressions.ArrayConcat'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ArrayContains'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.AtTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.BitwiseXor'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Create'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.DateAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.DateStrToDate'>: <function datestrtodate_sql>, <class 'sqlglot.expressions.DayOfMonth'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfWeek'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.DayOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Explode'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Extract'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.FromTimeZone'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GenerateSeries'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.GroupConcat'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.If'>: <function if_sql.<locals>._if_sql>, <class 'sqlglot.expressions.JSONExtract'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONExtractScalar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.JSONObject'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.LogicalAnd'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.LogicalOr'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Map'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Max'>: <function max_or_greatest>, <class 'sqlglot.expressions.Min'>: <function min_or_least>, <class 'sqlglot.expressions.ParseJSON'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PartitionedByProperty'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.PercentileCont'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.PercentileDisc'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.Pivot'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.RegexpILike'>: <function _regexpilike_sql>, <class 'sqlglot.expressions.Rand'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Select'>: <function preprocess.<locals>._to_sql>, <class 'sqlglot.expressions.SHA'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StarMap'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StartsWith'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.StrPosition'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.StrToTime'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Stuff'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.TimeAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TimestampDiff'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimestampTrunc'>: <function timestamptrunc_sql.<locals>._timestamptrunc_sql>, <class 'sqlglot.expressions.TimeStrToTime'>: <function timestrtotime_sql>, <class 'sqlglot.expressions.TimeToStr'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TimeToUnix'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.ToArray'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.ToChar'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.Trim'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.TsOrDsAdd'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsDiff'>: <function date_delta_sql.<locals>._delta_sql>, <class 'sqlglot.expressions.TsOrDsToDate'>: <function Snowflake.Generator.<lambda>>, <class 'sqlglot.expressions.UnixToTime'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.WeekOfYear'>: <function rename_func.<locals>.<lambda>>, <class 'sqlglot.expressions.Xor'>: <function rename_func.<locals>.<lambda>>}
TYPE_MAPPING = {<Type.NCHAR: 'NCHAR'>: 'CHAR', <Type.NVARCHAR: 'NVARCHAR'>: 'VARCHAR', <Type.MEDIUMTEXT: 'MEDIUMTEXT'>: 'TEXT', <Type.LONGTEXT: 'LONGTEXT'>: 'TEXT', <Type.TINYTEXT: 'TINYTEXT'>: 'TEXT', <Type.MEDIUMBLOB: 'MEDIUMBLOB'>: 'BLOB', <Type.LONGBLOB: 'LONGBLOB'>: 'BLOB', <Type.TINYBLOB: 'TINYBLOB'>: 'BLOB', <Type.INET: 'INET'>: 'INET', <Type.ROWVERSION: 'ROWVERSION'>: 'VARBINARY', <Type.NESTED: 'NESTED'>: 'OBJECT', <Type.STRUCT: 'STRUCT'>: 'OBJECT'}
PROPERTIES_LOCATION = {<class 'sqlglot.expressions.AllowedValuesProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AlgorithmProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.AutoIncrementProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.AutoRefreshProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BackupProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.BlockCompressionProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CharacterSetProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ChecksumProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.CollateProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.CopyGrantsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Cluster'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ClusteredByProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DataBlocksizeProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.DataDeletionProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DefinerProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DictRange'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DynamicProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.DistKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.DistStyleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EmptyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.EngineProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExecuteAsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ExternalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.FallbackProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.FileFormatProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.FreespaceProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.GlobalProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.HeapProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.InheritsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IcebergProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.InputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.IsolatedLoadingProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.JournalProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.LanguageProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LikeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LocationProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.LockingProperty'>: <Location.POST_ALIAS: 'POST_ALIAS'>, <class 'sqlglot.expressions.LogProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.MaterializedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.MergeBlockRatioProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.NoPrimaryIndexProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.OnProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OnCommitProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.Order'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.OutputModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PartitionedByProperty'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.PartitionedOfProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.PrimaryKey'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Property'>: <Location.POST_WITH: 'POST_WITH'>, <class 'sqlglot.expressions.RemoteWithConnectionModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.ReturnsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatDelimitedProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.RowFormatSerdeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SampleProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SchemaCommentProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SecureProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.SerdeProperties'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.Set'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SettingsProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SetProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.SetConfigProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SharingProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SequenceProperties'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.SortKeyProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlReadWriteProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.SqlSecurityProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StabilityProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.StreamingTableProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.StrictProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TemporaryProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ToTableProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.TransientProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.TransformModelProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.MergeTreeTTL'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.UnloggedProperty'>: <Location.POST_CREATE: 'POST_CREATE'>, <class 'sqlglot.expressions.ViewAttributeProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.VolatileProperty'>: <Location.UNSUPPORTED: 'UNSUPPORTED'>, <class 'sqlglot.expressions.WithDataProperty'>: <Location.POST_EXPRESSION: 'POST_EXPRESSION'>, <class 'sqlglot.expressions.WithJournalTableProperty'>: <Location.POST_NAME: 'POST_NAME'>, <class 'sqlglot.expressions.WithSchemaBindingProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>, <class 'sqlglot.expressions.WithSystemVersioningProperty'>: <Location.POST_SCHEMA: 'POST_SCHEMA'>}
UNSUPPORTED_VALUES_EXPRESSIONS = {<class 'sqlglot.expressions.Struct'>, <class 'sqlglot.expressions.Map'>, <class 'sqlglot.expressions.VarMap'>, <class 'sqlglot.expressions.StarMap'>}
def with_properties(self, properties: sqlglot.expressions.Properties) -> str:
875        def with_properties(self, properties: exp.Properties) -> str:
876            return self.properties(properties, wrapped=False, prefix=self.sep(""), sep=" ")
def values_sql( self, expression: sqlglot.expressions.Values, values_as_table: bool = True) -> str:
878        def values_sql(self, expression: exp.Values, values_as_table: bool = True) -> str:
879            if expression.find(*self.UNSUPPORTED_VALUES_EXPRESSIONS):
880                values_as_table = False
881
882            return super().values_sql(expression, values_as_table=values_as_table)
def datatype_sql(self, expression: sqlglot.expressions.DataType) -> str:
884        def datatype_sql(self, expression: exp.DataType) -> str:
885            expressions = expression.expressions
886            if (
887                expressions
888                and expression.is_type(*exp.DataType.STRUCT_TYPES)
889                and any(isinstance(field_type, exp.DataType) for field_type in expressions)
890            ):
891                # The correct syntax is OBJECT [ (<key> <value_type [NOT NULL] [, ...]) ]
892                return "OBJECT"
893
894            return super().datatype_sql(expression)
def tonumber_sql(self, expression: sqlglot.expressions.ToNumber) -> str:
896        def tonumber_sql(self, expression: exp.ToNumber) -> str:
897            return self.func(
898                "TO_NUMBER",
899                expression.this,
900                expression.args.get("format"),
901                expression.args.get("precision"),
902                expression.args.get("scale"),
903            )
def timestampfromparts_sql(self, expression: sqlglot.expressions.TimestampFromParts) -> str:
905        def timestampfromparts_sql(self, expression: exp.TimestampFromParts) -> str:
906            milli = expression.args.get("milli")
907            if milli is not None:
908                milli_to_nano = milli.pop() * exp.Literal.number(1000000)
909                expression.set("nano", milli_to_nano)
910
911            return rename_func("TIMESTAMP_FROM_PARTS")(self, expression)
def trycast_sql(self, expression: sqlglot.expressions.TryCast) -> str:
913        def trycast_sql(self, expression: exp.TryCast) -> str:
914            value = expression.this
915
916            if value.type is None:
917                from sqlglot.optimizer.annotate_types import annotate_types
918
919                value = annotate_types(value)
920
921            if value.is_type(*exp.DataType.TEXT_TYPES, exp.DataType.Type.UNKNOWN):
922                return super().trycast_sql(expression)
923
924            # TRY_CAST only works for string values in Snowflake
925            return self.cast_sql(expression)
def log_sql(self, expression: sqlglot.expressions.Log) -> str:
927        def log_sql(self, expression: exp.Log) -> str:
928            if not expression.expression:
929                return self.func("LN", expression.this)
930
931            return super().log_sql(expression)
def unnest_sql(self, expression: sqlglot.expressions.Unnest) -> str:
933        def unnest_sql(self, expression: exp.Unnest) -> str:
934            unnest_alias = expression.args.get("alias")
935            offset = expression.args.get("offset")
936
937            columns = [
938                exp.to_identifier("seq"),
939                exp.to_identifier("key"),
940                exp.to_identifier("path"),
941                offset.pop() if isinstance(offset, exp.Expression) else exp.to_identifier("index"),
942                seq_get(unnest_alias.columns if unnest_alias else [], 0)
943                or exp.to_identifier("value"),
944                exp.to_identifier("this"),
945            ]
946
947            if unnest_alias:
948                unnest_alias.set("columns", columns)
949            else:
950                unnest_alias = exp.TableAlias(this="_u", columns=columns)
951
952            explode = f"TABLE(FLATTEN(INPUT => {self.sql(expression.expressions[0])}))"
953            alias = self.sql(unnest_alias)
954            alias = f" AS {alias}" if alias else ""
955            return f"{explode}{alias}"
def show_sql(self, expression: sqlglot.expressions.Show) -> str:
957        def show_sql(self, expression: exp.Show) -> str:
958            terse = "TERSE " if expression.args.get("terse") else ""
959            history = " HISTORY" if expression.args.get("history") else ""
960            like = self.sql(expression, "like")
961            like = f" LIKE {like}" if like else ""
962
963            scope = self.sql(expression, "scope")
964            scope = f" {scope}" if scope else ""
965
966            scope_kind = self.sql(expression, "scope_kind")
967            if scope_kind:
968                scope_kind = f" IN {scope_kind}"
969
970            starts_with = self.sql(expression, "starts_with")
971            if starts_with:
972                starts_with = f" STARTS WITH {starts_with}"
973
974            limit = self.sql(expression, "limit")
975
976            from_ = self.sql(expression, "from")
977            if from_:
978                from_ = f" FROM {from_}"
979
980            return f"SHOW {terse}{expression.name}{history}{like}{scope_kind}{scope}{starts_with}{limit}{from_}"
def regexpextract_sql(self, expression: sqlglot.expressions.RegexpExtract) -> str:
982        def regexpextract_sql(self, expression: exp.RegexpExtract) -> str:
983            # Other dialects don't support all of the following parameters, so we need to
984            # generate default values as necessary to ensure the transpilation is correct
985            group = expression.args.get("group")
986            parameters = expression.args.get("parameters") or (group and exp.Literal.string("c"))
987            occurrence = expression.args.get("occurrence") or (parameters and exp.Literal.number(1))
988            position = expression.args.get("position") or (occurrence and exp.Literal.number(1))
989
990            return self.func(
991                "REGEXP_SUBSTR",
992                expression.this,
993                expression.expression,
994                position,
995                occurrence,
996                parameters,
997                group,
998            )
def except_op(self, expression: sqlglot.expressions.Except) -> str:
1000        def except_op(self, expression: exp.Except) -> str:
1001            if not expression.args.get("distinct"):
1002                self.unsupported("EXCEPT with All is not supported in Snowflake")
1003            return super().except_op(expression)
def intersect_op(self, expression: sqlglot.expressions.Intersect) -> str:
1005        def intersect_op(self, expression: exp.Intersect) -> str:
1006            if not expression.args.get("distinct"):
1007                self.unsupported("INTERSECT with All is not supported in Snowflake")
1008            return super().intersect_op(expression)
def describe_sql(self, expression: sqlglot.expressions.Describe) -> str:
1010        def describe_sql(self, expression: exp.Describe) -> str:
1011            # Default to table if kind is unknown
1012            kind_value = expression.args.get("kind") or "TABLE"
1013            kind = f" {kind_value}" if kind_value else ""
1014            this = f" {self.sql(expression, 'this')}"
1015            expressions = self.expressions(expression, flat=True)
1016            expressions = f" {expressions}" if expressions else ""
1017            return f"DESCRIBE{kind}{this}{expressions}"
def generatedasidentitycolumnconstraint_sql( self, expression: sqlglot.expressions.GeneratedAsIdentityColumnConstraint) -> str:
1019        def generatedasidentitycolumnconstraint_sql(
1020            self, expression: exp.GeneratedAsIdentityColumnConstraint
1021        ) -> str:
1022            start = expression.args.get("start")
1023            start = f" START {start}" if start else ""
1024            increment = expression.args.get("increment")
1025            increment = f" INCREMENT {increment}" if increment else ""
1026            return f"AUTOINCREMENT{start}{increment}"
def swaptable_sql(self, expression: sqlglot.expressions.SwapTable) -> str:
1028        def swaptable_sql(self, expression: exp.SwapTable) -> str:
1029            this = self.sql(expression, "this")
1030            return f"SWAP WITH {this}"
def cluster_sql(self, expression: sqlglot.expressions.Cluster) -> str:
1032        def cluster_sql(self, expression: exp.Cluster) -> str:
1033            return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
def struct_sql(self, expression: sqlglot.expressions.Struct) -> str:
1035        def struct_sql(self, expression: exp.Struct) -> str:
1036            keys = []
1037            values = []
1038
1039            for i, e in enumerate(expression.expressions):
1040                if isinstance(e, exp.PropertyEQ):
1041                    keys.append(
1042                        exp.Literal.string(e.name) if isinstance(e.this, exp.Identifier) else e.this
1043                    )
1044                    values.append(e.expression)
1045                else:
1046                    keys.append(exp.Literal.string(f"_{i}"))
1047                    values.append(e)
1048
1049            return self.func("OBJECT_CONSTRUCT", *flatten(zip(keys, values)))
def approxquantile_sql(self, expression: sqlglot.expressions.ApproxQuantile) -> str:
1051        def approxquantile_sql(self, expression: exp.ApproxQuantile) -> str:
1052            if expression.args.get("weight") or expression.args.get("accuracy"):
1053                self.unsupported(
1054                    "APPROX_PERCENTILE with weight and/or accuracy arguments are not supported in Snowflake"
1055                )
1056
1057            return self.func("APPROX_PERCENTILE", expression.this, expression.args.get("quantile"))
def alterset_sql(self, expression: sqlglot.expressions.AlterSet) -> str:
1059        def alterset_sql(self, expression: exp.AlterSet) -> str:
1060            exprs = self.expressions(expression, flat=True)
1061            exprs = f" {exprs}" if exprs else ""
1062            file_format = self.expressions(expression, key="file_format", flat=True, sep=" ")
1063            file_format = f" STAGE_FILE_FORMAT = ({file_format})" if file_format else ""
1064            copy_options = self.expressions(expression, key="copy_options", flat=True, sep=" ")
1065            copy_options = f" STAGE_COPY_OPTIONS = ({copy_options})" if copy_options else ""
1066            tag = self.expressions(expression, key="tag", flat=True)
1067            tag = f" TAG {tag}" if tag else ""
1068
1069            return f"SET{exprs}{file_format}{copy_options}{tag}"
SELECT_KINDS: Tuple[str, ...] = ()
TRY_SUPPORTED = False
SUPPORTS_UESCAPE = False
SUPPORTS_NULLABLE_TYPES = False
AFTER_HAVING_MODIFIER_TRANSFORMS = {'windows': <function Generator.<lambda>>, 'qualify': <function Generator.<lambda>>}
Inherited Members
sqlglot.generator.Generator
Generator
NULL_ORDERING_SUPPORTED
IGNORE_NULLS_IN_FUNC
LOCKING_READS_SUPPORTED
EXPLICIT_SET_OP
WRAP_DERIVED_VALUES
CREATE_FUNCTION_RETURN_AS
INTERVAL_ALLOWS_PLURAL_FORM
LIMIT_FETCH
RENAME_TABLE_WITH_DB
GROUPINGS_SEP
INDEX_ON
QUERY_HINT_SEP
IS_BOOL_ALLOWED
DUPLICATE_KEY_UPDATE_WITH_SET
LIMIT_IS_TOP
RETURNING_END
EXTRACT_ALLOWS_QUOTES
TZ_TO_WITH_TIME_ZONE
NVL2_SUPPORTED
VALUES_AS_TABLE
ALTER_TABLE_INCLUDE_COLUMN_KEYWORD
UNNEST_WITH_ORDINALITY
SEMI_ANTI_JOIN_WITH_SIDE
COMPUTED_COLUMN_WITH_TYPE
TABLESAMPLE_REQUIRES_PARENS
TABLESAMPLE_SIZE_IS_ROWS
TABLESAMPLE_KEYWORDS
TABLESAMPLE_WITH_METHOD
TABLESAMPLE_SEED_KEYWORD
DATA_TYPE_SPECIFIERS_ALLOWED
ENSURE_BOOLS
CTE_RECURSIVE_KEYWORD_REQUIRED
SUPPORTS_SINGLE_ARG_CONCAT
LAST_DAY_SUPPORTS_DATE_PART
SUPPORTS_TABLE_ALIAS_COLUMNS
UNPIVOT_ALIASES_ARE_IDENTIFIERS
SUPPORTS_SELECT_INTO
SUPPORTS_UNLOGGED_TABLES
SUPPORTS_CREATE_TABLE_LIKE
LIKE_PROPERTY_INSIDE_SCHEMA
MULTI_ARG_DISTINCT
JSON_TYPE_REQUIRED_FOR_EXTRACTION
JSON_PATH_BRACKETED_KEY_SUPPORTED
JSON_PATH_SINGLE_QUOTE_ESCAPE
CAN_IMPLEMENT_ARRAY_ANY
SUPPORTS_TO_NUMBER
SET_OP_MODIFIERS
COPY_HAS_INTO_KEYWORD
HEX_FUNC
WITH_PROPERTIES_PREFIX
QUOTE_JSON_PATH
PAD_FILL_PATTERN_IS_REQUIRED
PARSE_JSON_NAME
TIME_PART_SINGULARS
TOKEN_MAPPING
NAMED_PLACEHOLDER_TOKEN
RESERVED_KEYWORDS
WITH_SEPARATED_COMMENTS
EXCLUDE_COMMENTS
UNWRAPPED_INTERVAL_VALUES
PARAMETERIZABLE_TEXT_TYPES
EXPRESSIONS_WITHOUT_NESTED_CTES
SENTINEL_LINE_BREAK
pretty
identify
normalize
pad
unsupported_level
max_unsupported
leading_comma
max_text_width
comments
dialect
normalize_functions
unsupported_messages
generate
preprocess
unsupported
sep
seg
pad_comment
maybe_comment
wrap
no_identify
normalize_func
indent
sql
uncache_sql
cache_sql
characterset_sql
column_parts
column_sql
columnposition_sql
columndef_sql
columnconstraint_sql
computedcolumnconstraint_sql
autoincrementcolumnconstraint_sql
compresscolumnconstraint_sql
generatedasrowcolumnconstraint_sql
periodforsystemtimeconstraint_sql
notnullcolumnconstraint_sql
transformcolumnconstraint_sql
primarykeycolumnconstraint_sql
uniquecolumnconstraint_sql
createable_sql
create_sql
sequenceproperties_sql
clone_sql
heredoc_sql
prepend_ctes
with_sql
cte_sql
tablealias_sql
bitstring_sql
hexstring_sql
bytestring_sql
unicodestring_sql
rawstring_sql
datatypeparam_sql
directory_sql
delete_sql
drop_sql
except_sql
fetch_sql
filter_sql
hint_sql
indexparameters_sql
index_sql
identifier_sql
hex_sql
lowerhex_sql
inputoutputformat_sql
national_sql
partition_sql
properties_sql
root_properties
properties
locate_properties
property_name
property_sql
likeproperty_sql
fallbackproperty_sql
journalproperty_sql
freespaceproperty_sql
checksumproperty_sql
mergeblockratioproperty_sql
datablocksizeproperty_sql
blockcompressionproperty_sql
isolatedloadingproperty_sql
partitionboundspec_sql
partitionedofproperty_sql
lockingproperty_sql
withdataproperty_sql
withsystemversioningproperty_sql
insert_sql
intersect_sql
introducer_sql
kill_sql
pseudotype_sql
objectidentifier_sql
onconflict_sql
returning_sql
rowformatdelimitedproperty_sql
withtablehint_sql
indextablehint_sql
historicaldata_sql
table_parts
table_sql
tablesample_sql
pivot_sql
version_sql
tuple_sql
update_sql
var_sql
into_sql
from_sql
groupingsets_sql
rollup_sql
cube_sql
group_sql
having_sql
connect_sql
prior_sql
join_sql
lambda_sql
lateral_op
lateral_sql
limit_sql
offset_sql
setitem_sql
set_sql
pragma_sql
lock_sql
literal_sql
escape_str
loaddata_sql
null_sql
boolean_sql
order_sql
withfill_sql
distribute_sql
sort_sql
ordered_sql
matchrecognizemeasure_sql
matchrecognize_sql
query_modifiers
options_modifier
queryoption_sql
offset_limit_modifiers
after_limit_modifiers
select_sql
schema_sql
schema_columns_sql
star_sql
parameter_sql
sessionparameter_sql
placeholder_sql
subquery_sql
qualify_sql
set_operations
union_sql
union_op
prewhere_sql
where_sql
window_sql
partition_by_sql
windowspec_sql
withingroup_sql
between_sql
bracket_offset_expressions
bracket_sql
all_sql
any_sql
exists_sql
case_sql
constraint_sql
nextvaluefor_sql
extract_sql
trim_sql
convert_concat_args
concat_sql
concatws_sql
check_sql
foreignkey_sql
primarykey_sql
if_sql
matchagainst_sql
jsonkeyvalue_sql
jsonpath_sql
json_path_part
formatjson_sql
jsonobject_sql
jsonobjectagg_sql
jsonarray_sql
jsonarrayagg_sql
jsoncolumndef_sql
jsonschema_sql
jsontable_sql
openjsoncolumndef_sql
openjson_sql
in_sql
in_unnest_op
interval_sql
return_sql
reference_sql
anonymous_sql
paren_sql
neg_sql
not_sql
alias_sql
pivotalias_sql
aliases_sql
atindex_sql
attimezone_sql
fromtimezone_sql
add_sql
and_sql
or_sql
xor_sql
connector_sql
bitwiseand_sql
bitwiseleftshift_sql
bitwisenot_sql
bitwiseor_sql
bitwiserightshift_sql
bitwisexor_sql
cast_sql
currentdate_sql
collate_sql
command_sql
comment_sql
mergetreettlaction_sql
mergetreettl_sql
transaction_sql
commit_sql
rollback_sql
altercolumn_sql
alterdiststyle_sql
altersortkey_sql
renametable_sql
renamecolumn_sql
alter_sql
add_column_sql
droppartition_sql
addconstraint_sql
distinct_sql
ignorenulls_sql
respectnulls_sql
havingmax_sql
intdiv_sql
dpipe_sql
div_sql
overlaps_sql
distance_sql
dot_sql
eq_sql
propertyeq_sql
escape_sql
glob_sql
gt_sql
gte_sql
ilike_sql
ilikeany_sql
is_sql
like_sql
likeany_sql
similarto_sql
lt_sql
lte_sql
mod_sql
mul_sql
neq_sql
nullsafeeq_sql
nullsafeneq_sql
slice_sql
sub_sql
try_sql
use_sql
binary
function_fallback_sql
func
format_args
too_wide
format_time
expressions
op_expressions
naked_property
tag_sql
token_sql
userdefinedfunction_sql
joinhint_sql
kwarg_sql
when_sql
merge_sql
tochar_sql
dictproperty_sql
dictrange_sql
dictsubproperty_sql
oncluster_sql
clusteredbyproperty_sql
anyvalue_sql
querytransform_sql
indexconstraintoption_sql
checkcolumnconstraint_sql
indexcolumnconstraint_sql
nvl2_sql
comprehension_sql
columnprefix_sql
opclass_sql
predict_sql
forin_sql
refresh_sql
toarray_sql
tsordstotime_sql
tsordstotimestamp_sql
tsordstodate_sql
unixdate_sql
lastday_sql
dateadd_sql
arrayany_sql
partitionrange_sql
truncatetable_sql
convert_sql
copyparameter_sql
credentials_sql
copy_sql
semicolon_sql
datadeletionproperty_sql
maskingpolicycolumnconstraint_sql
gapfill_sql
scope_resolution
scoperesolution_sql
parsejson_sql
rand_sql
changes_sql
pad_sql
summarize_sql
explodinggenerateseries_sql
arrayconcat_sql
converttimezone_sql
json_sql
jsonvalue_sql