Skip to content

Commit

Permalink
feat(spark, bigquery): Add support for UNIX_SECONDS(...)
Browse files Browse the repository at this point in the history
  • Loading branch information
VaggelisD committed Nov 5, 2024
1 parent 71f4a47 commit e290d11
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 0 deletions.
1 change: 1 addition & 0 deletions sqlglot/dialects/bigquery.py
Original file line number Diff line number Diff line change
Expand Up @@ -704,6 +704,7 @@ class Generator(generator.Generator):
WITH_PROPERTIES_PREFIX = "OPTIONS"
SUPPORTS_EXPLODING_PROJECTIONS = False
EXCEPT_INTERSECT_SUPPORT_ALL_CLAUSE = False
SUPPORTS_UNIX_SECONDS = True

TRANSFORMS = {
**generator.Generator.TRANSFORMS,
Expand Down
1 change: 1 addition & 0 deletions sqlglot/dialects/spark.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,6 +137,7 @@ class Generator(Spark2.Generator):
PAD_FILL_PATTERN_IS_REQUIRED = False
SUPPORTS_CONVERT_TIMEZONE = True
SUPPORTS_MEDIAN = True
SUPPORTS_UNIX_SECONDS = True

TYPE_MAPPING = {
**Spark2.Generator.TYPE_MAPPING,
Expand Down
4 changes: 4 additions & 0 deletions sqlglot/expressions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6516,6 +6516,10 @@ class UnixToTimeStr(Func):
pass


class UnixSeconds(Func):
pass


class Uuid(Func):
_sql_names = ["UUID", "GEN_RANDOM_UUID", "GENERATE_UUID", "UUID_STRING"]

Expand Down
15 changes: 15 additions & 0 deletions sqlglot/generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,6 +436,9 @@ class Generator(metaclass=_Generator):
# Whether MEDIAN(expr) is supported; if not, it will be generated as PERCENTILE_CONT(expr, 0.5)
SUPPORTS_MEDIAN = True

# Whether UNIX_SECONDS(timestamp) is supported
SUPPORTS_UNIX_SECONDS = False

# The name to generate for the JSONPath expression. If `None`, only `this` will be generated
PARSE_JSON_NAME: t.Optional[str] = "PARSE_JSON"

Expand Down Expand Up @@ -4472,3 +4475,15 @@ def overflowtruncatebehavior_sql(self, expression: exp.OverflowTruncateBehavior)
filler = f" {filler}" if filler else ""
with_count = "WITH COUNT" if expression.args.get("with_count") else "WITHOUT COUNT"
return f"TRUNCATE{filler} {with_count}"

def unixseconds_sql(self, expression: exp.UnixSeconds) -> str:
if self.SUPPORTS_UNIX_SECONDS:
return self.function_fallback_sql(expression)

start_ts = exp.cast(
exp.Literal.string("1970-01-01 00:00:00+00"), to=exp.DataType.Type.TIMESTAMPTZ
)

return self.sql(
exp.TimestampDiff(this=expression.this, expression=start_ts, unit=exp.var("SECONDS"))
)
19 changes: 19 additions & 0 deletions tests/dialects/test_bigquery.py
Original file line number Diff line number Diff line change
Expand Up @@ -2095,3 +2095,22 @@ def test_json_extract_scalar(self):
"snowflake": """SELECT JSON_EXTRACT_PATH_TEXT('{"name": "Jakob", "age": "6"}', 'age')""",
},
)

def test_unix_seconds(self):
self.validate_all(
"SELECT UNIX_SECONDS('2008-12-25 15:30:00+00')",
read={
"bigquery": "SELECT UNIX_SECONDS('2008-12-25 15:30:00+00')",
"spark": "SELECT UNIX_SECONDS('2008-12-25 15:30:00+00')",
"databricks": "SELECT UNIX_SECONDS('2008-12-25 15:30:00+00')",
},
write={
"spark": "SELECT UNIX_SECONDS('2008-12-25 15:30:00+00')",
"databricks": "SELECT UNIX_SECONDS('2008-12-25 15:30:00+00')",
"duckdb": "SELECT DATE_DIFF('SECONDS', CAST('1970-01-01 00:00:00+00' AS TIMESTAMPTZ), '2008-12-25 15:30:00+00')",
"snowflake": "SELECT TIMESTAMPDIFF(SECONDS, CAST('1970-01-01 00:00:00+00' AS TIMESTAMPTZ), '2008-12-25 15:30:00+00')",
},
)

for dialect in ("bigquery", "spark", "databricks"):
parse_one("UNIX_SECONDS(col)", dialect=dialect).assert_is(exp.UnixSeconds)

0 comments on commit e290d11

Please sign in to comment.