Skip to content

fastflowtransform.snapshots.runtime

BaseSnapshotRuntime

Base snapshot runtime mirroring the contracts runtime pattern.

Engines provide small hooks for identifier qualification, expressions, staging, and execution. All column names come from the executor constants.

Source code in src/fastflowtransform/snapshots/runtime/base.py
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
class BaseSnapshotRuntime[E: SnapshotExecutor]:
    """
    Base snapshot runtime mirroring the contracts runtime pattern.

    Engines provide small hooks for identifier qualification, expressions,
    staging, and execution. All column names come from the executor constants.
    """

    # Standard snapshot metadata column names (single source of truth for runtimes).
    SNAPSHOT_VALID_FROM_COL = "_ff_valid_from"
    SNAPSHOT_VALID_TO_COL = "_ff_valid_to"
    SNAPSHOT_IS_CURRENT_COL = "_ff_is_current"
    SNAPSHOT_HASH_COL = "_ff_snapshot_hash"
    SNAPSHOT_UPDATED_AT_COL = "_ff_updated_at"

    executor: E

    def __init__(self, executor: E):
        self.executor = executor

    # ---- Public entrypoints -------------------------------------------------
    def run_snapshot_sql(self, node: Node, env: Environment) -> None:
        ex = self.executor

        meta = self._snapshot_validate_node(node)
        cfg = resolve_snapshot_config(node, meta)

        body = self._snapshot_render_body(node, env)
        rel_name = relation_for(node.name)
        target = self._snapshot_target_identifier(rel_name)
        if not cfg.unique_key:
            raise ValueError(f"{node.path}: snapshot models require a non-empty unique_key list.")

        vf = self.SNAPSHOT_VALID_FROM_COL
        vt = self.SNAPSHOT_VALID_TO_COL
        is_cur = self.SNAPSHOT_IS_CURRENT_COL
        hash_col = self.SNAPSHOT_HASH_COL
        upd_meta = self.SNAPSHOT_UPDATED_AT_COL

        self._snapshot_prepare_target()

        # First run: create snapshot table
        if not ex.exists_relation(rel_name):
            sql = self._snapshot_first_run_sql(
                body=body,
                strategy=cfg.strategy,
                unique_key=cfg.unique_key,
                updated_at=cfg.updated_at,
                check_cols=cfg.check_cols,
                target=target,
                vf=vf,
                vt=vt,
                is_cur=is_cur,
                hash_col=hash_col,
                upd_meta=upd_meta,
            )
            self._snapshot_exec_and_wait(sql)
            return

        # Incremental update
        src_ref, cleanup = self._snapshot_source_ref(rel_name, body)
        try:
            keys_pred = " AND ".join([f"t.{k} = s.{k}" for k in cfg.unique_key]) or "FALSE"

            if cfg.strategy == "timestamp":
                if not cfg.updated_at:
                    raise ValueError(
                        f"{node.path}: strategy='timestamp' snapshots require an updated_at column."
                    )
                change_condition = f"s.{cfg.updated_at} > t.{upd_meta}"
                new_upd_expr = self._snapshot_updated_at_expr(cfg.updated_at, "s")
                new_valid_from_expr = self._snapshot_updated_at_expr(cfg.updated_at, "s")
                new_hash_expr = self._snapshot_null_hash()
            else:
                hash_expr_s = self._snapshot_hash_expr(cfg.check_cols, "s")
                change_condition = f"COALESCE({hash_expr_s}, '') <> COALESCE(t.{hash_col}, '')"
                new_upd_expr = (
                    self._snapshot_updated_at_expr(cfg.updated_at, "s")
                    if cfg.updated_at
                    else self._snapshot_current_timestamp()
                )
                new_valid_from_expr = self._snapshot_current_timestamp()
                new_hash_expr = hash_expr_s

            close_sql = self._snapshot_close_sql(
                target=target,
                src_ref=src_ref,
                keys_pred=keys_pred,
                change_condition=change_condition,
                vt=vt,
                is_cur=is_cur,
            )
            self._snapshot_exec_and_wait(close_sql)

            insert_sql = self._snapshot_insert_sql(
                target=target,
                src_ref=src_ref,
                keys_pred=keys_pred,
                first_key=cfg.unique_key[0],
                new_upd_expr=new_upd_expr,
                new_valid_from_expr=new_valid_from_expr,
                new_hash_expr=new_hash_expr,
                change_condition=change_condition,
                vf=vf,
                vt=vt,
                is_cur=is_cur,
                hash_col=hash_col,
                upd_meta=upd_meta,
            )
            self._snapshot_exec_and_wait(insert_sql)
        finally:
            with suppress(Exception):
                cleanup()

    def snapshot_prune(
        self,
        relation: str,
        unique_key: list[str],
        keep_last: int,
        *,
        dry_run: bool = False,
    ) -> None:
        """
        Delete older snapshot versions while keeping the most recent `keep_last`
        rows per business key (including the current row).
        """
        ex = self.executor

        if keep_last <= 0:
            return

        keys = [k for k in unique_key if k]
        if not keys:
            return

        target = self._snapshot_target_identifier(relation)
        vf = self.SNAPSHOT_VALID_FROM_COL

        key_select = ", ".join(keys)
        part_by = ", ".join(keys)

        ranked_sql = f"""
SELECT
  {key_select},
  {vf},
  ROW_NUMBER() OVER (
    PARTITION BY {part_by}
    ORDER BY {vf} DESC
  ) AS rn
FROM {target}
"""

        if dry_run:
            sql = f"""
WITH ranked AS (
  {ranked_sql}
)
SELECT COUNT(*) AS rows_to_delete
FROM ranked
WHERE rn > {int(keep_last)}
"""
            res = ex._execute_sql(sql)
            count = self._snapshot_fetch_count(res)
            echo(
                f"[DRY-RUN] snapshot_prune({relation}): would delete {count} row(s) "
                f"(keep_last={keep_last})"
            )
            return

        join_pred = " AND ".join([f"t.{k} = r.{k}" for k in keys])
        delete_sql = f"""
DELETE FROM {target} t
USING (
  {ranked_sql}
) r
WHERE
  r.rn > {int(keep_last)}
  AND {join_pred}
  AND t.{vf} = r.{vf}
"""
        ex._execute_sql(delete_sql)

    # ---- Core SQL builders -------------------------------------------------
    def _snapshot_first_run_sql(
        self,
        *,
        body: str,
        strategy: str,
        unique_key: list[str],
        updated_at: str | None,
        check_cols: list[str],
        target: str,
        vf: str,
        vt: str,
        is_cur: str,
        hash_col: str,
        upd_meta: str,
    ) -> str:
        if not unique_key:
            raise ValueError("Snapshot models require a non-empty unique_key list.")

        if strategy == "timestamp":
            if not updated_at:
                raise ValueError("strategy='timestamp' snapshots require an updated_at column.")
            return f"""
{self._snapshot_create_keyword()} {target} AS
SELECT
  s.*,
  {self._snapshot_updated_at_expr(updated_at, "s")} AS {upd_meta},
  {self._snapshot_updated_at_expr(updated_at, "s")} AS {vf},
  {self._snapshot_null_timestamp()} AS {vt},
  TRUE AS {is_cur},
  {self._snapshot_null_hash()} AS {hash_col}
FROM ({body}) AS s
"""

        if not check_cols:
            raise ValueError("strategy='check' snapshots require non-empty check_cols.")

        hash_expr = self._snapshot_hash_expr(check_cols, "s")
        upd_expr = (
            self._snapshot_updated_at_expr(updated_at, "s")
            if updated_at
            else self._snapshot_current_timestamp()
        )
        return f"""
{self._snapshot_create_keyword()} {target} AS
SELECT
  s.*,
  {upd_expr} AS {upd_meta},
  {self._snapshot_current_timestamp()} AS {vf},
  {self._snapshot_null_timestamp()} AS {vt},
  TRUE AS {is_cur},
  {hash_expr} AS {hash_col}
FROM ({body}) AS s
"""

    def _snapshot_close_sql(
        self,
        *,
        target: str,
        src_ref: str,
        keys_pred: str,
        change_condition: str,
        vt: str,
        is_cur: str,
    ) -> str:
        return f"""
UPDATE {target} AS t
SET
  {vt} = {self._snapshot_current_timestamp()},
  {is_cur} = FALSE
FROM {src_ref} AS s
WHERE
  {keys_pred}
  AND t.{is_cur} = TRUE
  AND {change_condition}
"""

    def _snapshot_insert_sql(
        self,
        *,
        target: str,
        src_ref: str,
        keys_pred: str,
        first_key: str,
        new_upd_expr: str,
        new_valid_from_expr: str,
        new_hash_expr: str,
        change_condition: str,
        vf: str,
        vt: str,
        is_cur: str,
        hash_col: str,
        upd_meta: str,
    ) -> str:
        return f"""
INSERT INTO {target}
SELECT
  s.*,
  {new_upd_expr} AS {upd_meta},
  {new_valid_from_expr} AS {vf},
  {self._snapshot_null_timestamp()} AS {vt},
  TRUE AS {is_cur},
  {new_hash_expr} AS {hash_col}
FROM {src_ref} AS s
LEFT JOIN {target} AS t
  ON {keys_pred}
  AND t.{is_cur} = TRUE
WHERE
  t.{first_key} IS NULL
  OR {change_condition}
"""

    # ---- Rendering helpers -------------------------------------------------
    def _snapshot_render_body(self, node: Node, env: Environment) -> str:
        ex = self.executor

        sql_rendered = ex.render_sql(
            node,
            env,
            ref_resolver=lambda name: ex._resolve_ref(name, env),
            source_resolver=ex._resolve_source,
        )
        sql_clean = ex._strip_leading_config(sql_rendered).strip()
        return ex._selectable_body(sql_clean).rstrip(" ;\n\t")

    def _snapshot_validate_node(self, node: Node) -> dict[str, Any]:
        ex = self.executor

        if node.kind != "sql":
            raise TypeError(
                f"Snapshot materialization is only supported for SQL models, "
                f"got kind={node.kind!r} for {node.name}."
            )

        meta = getattr(node, "meta", {}) or {}
        if not ex._meta_is_snapshot(meta):
            raise ValueError(f"Node {node.name} is not configured with materialized='snapshot'.")
        return meta

    # ---- Staging -----------------------------------------------------------
    def _snapshot_source_ref(
        self, rel_name: str, select_body: str
    ) -> tuple[str, Callable[[], None]]:
        """
        Return (source_ref, cleanup). Default: inline subquery.
        Engines can override to use temp views/tables and cleanup afterward.
        """
        return f"({select_body})", lambda: None

    # ---- Hooks (must be provided by engines) -------------------------------
    def _snapshot_target_identifier(self, rel_name: str) -> str:  # pragma: no cover - abstract
        raise NotImplementedError

    def _snapshot_current_timestamp(self) -> str:  # pragma: no cover - abstract
        raise NotImplementedError

    def _snapshot_null_timestamp(self) -> str:  # pragma: no cover - abstract
        raise NotImplementedError

    def _snapshot_null_hash(self) -> str:  # pragma: no cover - abstract
        raise NotImplementedError

    def _snapshot_hash_expr(self, check_cols: list[str], src_alias: str) -> str:  # pragma: no cover
        raise NotImplementedError

    # ---- Optional overrides -----------------------------------------------
    def _snapshot_cast_as_string(self, expr: str) -> str:
        return f"CAST({expr} AS STRING)"

    def _snapshot_updated_at_expr(self, updated_at: str, src_alias: str) -> str:
        return f"{src_alias}.{updated_at}"

    def _snapshot_prepare_target(self) -> None:
        """Hook for engines that need to ensure dataset/schema before writes."""
        return None

    def _snapshot_exec_and_wait(self, sql: str) -> None:
        """
        Execute SQL and, if necessary, wait for completion (jobs, lazy DataFrames).
        """
        res = self.executor._execute_sql(sql)
        if res is None:
            return
        for attr in ("result", "collect"):
            fn = getattr(res, attr, None)
            if callable(fn):
                with suppress(Exception):
                    fn()
                break

    # ---- Helpers -----------------------------------------------------------
    def _snapshot_concat_expr(self, columns: list[str], src_alias: str) -> str:
        parts = [
            self._snapshot_coalesce(self._snapshot_cast_as_string(f"{src_alias}.{col}"), "''")
            for col in columns
        ]
        return " || '||' || ".join(parts) if parts else "''"

    def _snapshot_coalesce(self, expr: str, default: str) -> str:
        return f"COALESCE({expr}, {default})"

    def _snapshot_create_keyword(self) -> str:
        """Hook to allow engines to override CREATE vs CREATE OR REPLACE."""
        return "CREATE TABLE"

    def _snapshot_fetch_count(self, res: Any) -> int:
        """
        Best-effort extraction of a single COUNT(*) value from various result shapes.
        """
        try:
            if hasattr(res, "fetchone"):
                row = res.fetchone()
                if row is not None:
                    return int(row[0])
            if hasattr(res, "fetchall"):
                rows = res.fetchall()
                if rows:
                    return int(rows[0][0])
            result_fn = getattr(res, "result", None)
            if callable(result_fn):
                rows_obj = result_fn()
                if isinstance(rows_obj, Iterable):
                    rows = list(rows_obj)
                    if rows:
                        return int(rows[0][0])
            collect_fn = getattr(res, "collect", None)
            if callable(collect_fn):
                rows_obj = collect_fn()
                if isinstance(rows_obj, Iterable):
                    rows = list(rows_obj)
                    if rows:
                        return int(rows[0][0])
            if isinstance(res, list) and res:
                return int(res[0][0])
        except Exception:
            return 0
        return 0

snapshot_prune

snapshot_prune(relation, unique_key, keep_last, *, dry_run=False)

Delete older snapshot versions while keeping the most recent keep_last rows per business key (including the current row).

Source code in src/fastflowtransform/snapshots/runtime/base.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    def snapshot_prune(
        self,
        relation: str,
        unique_key: list[str],
        keep_last: int,
        *,
        dry_run: bool = False,
    ) -> None:
        """
        Delete older snapshot versions while keeping the most recent `keep_last`
        rows per business key (including the current row).
        """
        ex = self.executor

        if keep_last <= 0:
            return

        keys = [k for k in unique_key if k]
        if not keys:
            return

        target = self._snapshot_target_identifier(relation)
        vf = self.SNAPSHOT_VALID_FROM_COL

        key_select = ", ".join(keys)
        part_by = ", ".join(keys)

        ranked_sql = f"""
SELECT
  {key_select},
  {vf},
  ROW_NUMBER() OVER (
    PARTITION BY {part_by}
    ORDER BY {vf} DESC
  ) AS rn
FROM {target}
"""

        if dry_run:
            sql = f"""
WITH ranked AS (
  {ranked_sql}
)
SELECT COUNT(*) AS rows_to_delete
FROM ranked
WHERE rn > {int(keep_last)}
"""
            res = ex._execute_sql(sql)
            count = self._snapshot_fetch_count(res)
            echo(
                f"[DRY-RUN] snapshot_prune({relation}): would delete {count} row(s) "
                f"(keep_last={keep_last})"
            )
            return

        join_pred = " AND ".join([f"t.{k} = r.{k}" for k in keys])
        delete_sql = f"""
DELETE FROM {target} t
USING (
  {ranked_sql}
) r
WHERE
  r.rn > {int(keep_last)}
  AND {join_pred}
  AND t.{vf} = r.{vf}
"""
        ex._execute_sql(delete_sql)

BigQuerySnapshotRuntime

Bases: BaseSnapshotRuntime[BigQuerySnapshotExecutor]

Snapshot runtime for BigQuery, matching the legacy mixin hooks.

Source code in src/fastflowtransform/snapshots/runtime/bigquery.py
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
class BigQuerySnapshotRuntime(BaseSnapshotRuntime[BigQuerySnapshotExecutor]):
    """
    Snapshot runtime for BigQuery, matching the legacy mixin hooks.
    """

    # ---- Engine hooks -----------------------------------------------------
    def _snapshot_prepare_target(self) -> None:
        self.executor._ensure_dataset()

    def _snapshot_target_identifier(self, rel_name: str) -> str:
        return self.executor._qualified_identifier(
            rel_name,
            project=getattr(self.executor, "project", None),
            dataset=getattr(self.executor, "dataset", None),
        )

    def _snapshot_current_timestamp(self) -> str:
        return "CURRENT_TIMESTAMP()"

    def _snapshot_null_timestamp(self) -> str:
        return "CAST(NULL AS TIMESTAMP)"

    def _snapshot_null_hash(self) -> str:
        return "CAST(NULL AS STRING)"

    def _snapshot_hash_expr(self, check_cols: list[str], src_alias: str) -> str:
        concat_expr = self._snapshot_concat_expr(check_cols, src_alias)
        return f"TO_HEX(MD5({concat_expr}))"

    def _snapshot_cast_as_string(self, expr: str) -> str:
        return f"CAST({expr} AS STRING)"

snapshot_prune

snapshot_prune(relation, unique_key, keep_last, *, dry_run=False)

Delete older snapshot versions while keeping the most recent keep_last rows per business key (including the current row).

Source code in src/fastflowtransform/snapshots/runtime/base.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    def snapshot_prune(
        self,
        relation: str,
        unique_key: list[str],
        keep_last: int,
        *,
        dry_run: bool = False,
    ) -> None:
        """
        Delete older snapshot versions while keeping the most recent `keep_last`
        rows per business key (including the current row).
        """
        ex = self.executor

        if keep_last <= 0:
            return

        keys = [k for k in unique_key if k]
        if not keys:
            return

        target = self._snapshot_target_identifier(relation)
        vf = self.SNAPSHOT_VALID_FROM_COL

        key_select = ", ".join(keys)
        part_by = ", ".join(keys)

        ranked_sql = f"""
SELECT
  {key_select},
  {vf},
  ROW_NUMBER() OVER (
    PARTITION BY {part_by}
    ORDER BY {vf} DESC
  ) AS rn
FROM {target}
"""

        if dry_run:
            sql = f"""
WITH ranked AS (
  {ranked_sql}
)
SELECT COUNT(*) AS rows_to_delete
FROM ranked
WHERE rn > {int(keep_last)}
"""
            res = ex._execute_sql(sql)
            count = self._snapshot_fetch_count(res)
            echo(
                f"[DRY-RUN] snapshot_prune({relation}): would delete {count} row(s) "
                f"(keep_last={keep_last})"
            )
            return

        join_pred = " AND ".join([f"t.{k} = r.{k}" for k in keys])
        delete_sql = f"""
DELETE FROM {target} t
USING (
  {ranked_sql}
) r
WHERE
  r.rn > {int(keep_last)}
  AND {join_pred}
  AND t.{vf} = r.{vf}
"""
        ex._execute_sql(delete_sql)

DatabricksSparkSnapshotRuntime

Snapshot runtime for Databricks/Spark (Delta/Parquet/Iceberg), extracted from the executor. Uses Spark DataFrame operations instead of SQL strings.

Source code in src/fastflowtransform/snapshots/runtime/databricks_spark.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
class DatabricksSparkSnapshotRuntime:
    """
    Snapshot runtime for Databricks/Spark (Delta/Parquet/Iceberg), extracted
    from the executor. Uses Spark DataFrame operations instead of SQL strings.
    """

    SNAPSHOT_VALID_FROM_COL = "_ff_valid_from"
    SNAPSHOT_VALID_TO_COL = "_ff_valid_to"
    SNAPSHOT_IS_CURRENT_COL = "_ff_is_current"
    SNAPSHOT_HASH_COL = "_ff_snapshot_hash"
    SNAPSHOT_UPDATED_AT_COL = "_ff_updated_at"

    executor: DatabricksSnapshotExecutor

    def __init__(self, executor: DatabricksSnapshotExecutor):
        self.executor = executor

    def run_snapshot_sql(self, node: Node, env: Any) -> None:
        ex = self.executor
        F = get_spark_functions()

        meta = self._validate_snapshot_node(node)
        cfg = resolve_snapshot_config(node, meta)

        strategy = cfg.strategy
        unique_key = cfg.unique_key
        updated_at = cfg.updated_at
        check_cols = cfg.check_cols

        body, rel_name, physical = self._snapshot_sql_body(node, env)

        vf = self.SNAPSHOT_VALID_FROM_COL
        vt = self.SNAPSHOT_VALID_TO_COL
        is_cur = self.SNAPSHOT_IS_CURRENT_COL
        hash_col = self.SNAPSHOT_HASH_COL
        upd_meta = self.SNAPSHOT_UPDATED_AT_COL

        if not ex.exists_relation(rel_name):
            self._snapshot_first_run(
                node=node,
                rel_name=rel_name,
                body=body,
                strategy=strategy,
                updated_at=updated_at,
                check_cols=check_cols,
                F=F,
                vf=vf,
                vt=vt,
                is_cur=is_cur,
                hash_col=hash_col,
                upd_meta=upd_meta,
            )
            return

        self._snapshot_incremental_run(
            node=node,
            body=body,
            rel_name=rel_name,
            physical=physical,
            strategy=strategy,
            unique_key=unique_key,
            updated_at=updated_at,
            check_cols=check_cols,
            F=F,
            vf=vf,
            vt=vt,
            is_cur=is_cur,
            hash_col=hash_col,
            upd_meta=upd_meta,
        )

    def snapshot_prune(
        self,
        relation: str,
        unique_key: list[str],
        keep_last: int,
        *,
        dry_run: bool = False,
    ) -> None:
        """
        Delete older snapshot versions while keeping the most recent `keep_last`
        rows per business key (including the current row), implemented as a
        DataFrame overwrite (no in-place DELETE).
        """
        if keep_last <= 0:
            return

        Window = get_spark_window()
        F = get_spark_functions()
        ex = self.executor

        if not unique_key:
            return

        vf = self.SNAPSHOT_VALID_FROM_COL

        try:
            physical = ex._physical_identifier(relation)
            df = ex.spark.table(physical)
        except Exception:
            return

        w = Window.partitionBy(*[F.col(k) for k in unique_key]).orderBy(F.col(vf).desc())
        ranked = df.withColumn("__ff_rn", F.row_number().over(w))

        if dry_run:
            cnt = ranked.filter(F.col("__ff_rn") > int(keep_last)).count()

            echo(
                f"[DRY-RUN] snapshot_prune({relation}): would delete {cnt} row(s) "
                f"(keep_last={keep_last})"
            )
            return

        pruned = ranked.filter(F.col("__ff_rn") <= int(keep_last)).drop("__ff_rn")

        # Materialize before overwrite to avoid Spark's self-read/overwrite issues.
        materialized: list[Any] = []

        def _materialize(df_any: Any) -> Any:
            try:
                cp = df_any.localCheckpoint(eager=True)
                materialized.append(cp)
                return cp
            except Exception:
                cached = df_any.cache()
                cached.count()
                materialized.append(cached)
                return cached

        try:
            out = _materialize(pruned)
            ex._save_df_as_table(relation, out)
        finally:
            for handle in materialized:
                with suppress(Exception):
                    handle.unpersist()

    # ---- Helpers ---------------------------------------------------------
    def _validate_snapshot_node(self, node: Node) -> dict[str, Any]:
        ex = self.executor
        if node.kind != "sql":
            raise TypeError(
                f"Snapshot materialization is only supported for SQL models, "
                f"got kind={node.kind!r} for {node.name}."
            )

        meta = getattr(node, "meta", {}) or {}
        if not ex._meta_is_snapshot(meta):
            raise ValueError(f"Node {node.name} is not configured with materialized='snapshot'.")
        return meta

    def _snapshot_sql_body(
        self,
        node: Node,
        env: Any,
    ) -> tuple[str, str, str]:
        ex = self.executor
        sql_rendered = ex.render_sql(
            node,
            env,
            ref_resolver=lambda name: ex._resolve_ref(name, env),
            source_resolver=ex._resolve_source,
        )
        sql_clean = ex._strip_leading_config(sql_rendered).strip()
        body = ex._selectable_body(sql_clean).rstrip(" ;\n\t")

        rel_name = relation_for(node.name)
        physical = ex._physical_identifier(rel_name)
        return body, rel_name, physical

    def _snapshot_first_run(
        self,
        *,
        node: Node,
        rel_name: str,
        body: str,
        strategy: str,
        updated_at: str | None,
        check_cols: list[str],
        F: Any,
        vf: str,
        vt: str,
        is_cur: str,
        hash_col: str,
        upd_meta: str,
    ) -> None:
        ex = self.executor
        src_df = ex._execute_sql(body)

        echo_debug(f"[snapshot] first run for {rel_name} (strategy={strategy})")

        if strategy == "timestamp":
            assert updated_at is not None, (
                "timestamp snapshots require a non-null updated_at column"
            )
            df_snap = (
                src_df.withColumn(upd_meta, F.col(updated_at))
                .withColumn(vf, F.col(updated_at))
                .withColumn(vt, F.lit(None).cast("timestamp"))
                .withColumn(is_cur, F.lit(True))
                .withColumn(hash_col, F.lit(None).cast("string"))
            )
        else:
            cols_expr = [F.coalesce(F.col(c).cast("string"), F.lit("")) for c in check_cols]
            concat_expr = F.concat_ws("||", *cols_expr)
            hash_expr = F.md5(concat_expr).cast("string")
            upd_expr = F.col(updated_at) if updated_at else F.current_timestamp()

            df_snap = (
                src_df.withColumn(upd_meta, upd_expr)
                .withColumn(vf, F.current_timestamp())
                .withColumn(vt, F.lit(None).cast("timestamp"))
                .withColumn(is_cur, F.lit(True))
                .withColumn(hash_col, hash_expr)
            )

        storage_meta = ex._storage_meta(node, rel_name)
        ex._save_df_as_table(rel_name, df_snap, storage=storage_meta)

    def _snapshot_incremental_run(
        self,
        *,
        node: Node,
        body: str,
        rel_name: str,
        physical: str,
        strategy: str,
        unique_key: list[str],
        updated_at: str | None,
        check_cols: list[str],
        F: Any,
        vf: str,
        vt: str,
        is_cur: str,
        hash_col: str,
        upd_meta: str,
    ) -> None:
        ex = self.executor
        echo_debug(f"[snapshot] incremental run for {rel_name} (strategy={strategy})")

        existing = ex.spark.table(physical)
        src_df = ex._execute_sql(body)

        missing_keys_src = [k for k in unique_key if k not in src_df.columns]
        missing_keys_snap = [k for k in unique_key if k not in existing.columns]
        if missing_keys_src or missing_keys_snap:
            raise ValueError(
                f"{node.path}: snapshot unique_key columns must exist on both source and "
                f"snapshot table. Missing on source={missing_keys_src}, "
                f"on snapshot={missing_keys_snap}."
            )

        if strategy == "check":
            cols_expr = [F.coalesce(F.col(c).cast("string"), F.lit("")) for c in check_cols]
            concat_expr = F.concat_ws("||", *cols_expr)
            src_df = src_df.withColumn("__ff_new_hash", F.md5(concat_expr).cast("string"))

        current_df = existing.filter(F.col(is_cur) == True)  # noqa: E712

        s_alias = src_df.alias("s")
        t_alias = current_df.alias("t")
        joined = s_alias.join(t_alias, on=unique_key, how="left")

        if strategy == "timestamp":
            assert updated_at is not None, (
                "timestamp snapshots require a non-null updated_at column"
            )
            s_upd = F.col(f"s.{updated_at}")
            t_upd = F.col(f"t.{upd_meta}")
            cond_new = t_upd.isNull()
            cond_changed = t_upd.isNotNull() & (s_upd > t_upd)
            changed_or_new = cond_new | cond_changed
        else:
            s_hash = F.col("s.__ff_new_hash")
            t_hash = F.col(f"t.{hash_col}")
            cond_new = t_hash.isNull()
            cond_changed = t_hash.isNotNull() & (s_hash != F.coalesce(t_hash, F.lit("")))
            changed_or_new = cond_new | cond_changed

        changed_keys = (
            joined.filter(changed_or_new)
            .select(*[F.col(f"s.{k}").alias(k) for k in unique_key])
            .dropDuplicates()
        )

        prev_noncurrent = existing.filter(F.col(is_cur) == False)  # noqa: E712
        preserved_current = current_df.join(changed_keys, on=unique_key, how="left_anti")

        closed_prev = (
            current_df.join(changed_keys, on=unique_key, how="inner")
            .withColumn(vt, F.current_timestamp())
            .withColumn(is_cur, F.lit(False))
        )

        new_src = src_df.join(changed_keys, on=unique_key, how="inner")
        if strategy == "timestamp":
            assert updated_at is not None, (
                "timestamp snapshots require a non-null updated_at column"
            )
            new_versions = (
                new_src.withColumn(upd_meta, F.col(updated_at))
                .withColumn(vf, F.col(updated_at))
                .withColumn(vt, F.lit(None).cast("timestamp"))
                .withColumn(is_cur, F.lit(True))
                .withColumn(hash_col, F.lit(None).cast("string"))
            )
        else:
            upd_expr = F.col(updated_at) if updated_at else F.current_timestamp()
            new_versions = (
                new_src.withColumn(upd_meta, upd_expr)
                .withColumn(vf, F.current_timestamp())
                .withColumn(vt, F.lit(None).cast("timestamp"))
                .withColumn(is_cur, F.lit(True))
                .withColumn(hash_col, F.col("__ff_new_hash"))
            )

        parts = [prev_noncurrent, preserved_current, closed_prev, new_versions]
        snapshot_df = reduce(lambda a, b: a.unionByName(b, allowMissingColumns=True), parts)
        if "__ff_new_hash" in snapshot_df.columns:
            snapshot_df = snapshot_df.drop("__ff_new_hash")

        # Break lineage so Spark doesn't see this as "read from and overwrite the same table"
        try:
            snapshot_df = snapshot_df.localCheckpoint(eager=True)
        except Exception:
            snapshot_df = snapshot_df.cache()
            snapshot_df.count()

        storage_meta = ex._storage_meta(node, rel_name)
        ex._save_df_as_table(rel_name, snapshot_df, storage=storage_meta)

snapshot_prune

snapshot_prune(relation, unique_key, keep_last, *, dry_run=False)

Delete older snapshot versions while keeping the most recent keep_last rows per business key (including the current row), implemented as a DataFrame overwrite (no in-place DELETE).

Source code in src/fastflowtransform/snapshots/runtime/databricks_spark.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def snapshot_prune(
    self,
    relation: str,
    unique_key: list[str],
    keep_last: int,
    *,
    dry_run: bool = False,
) -> None:
    """
    Delete older snapshot versions while keeping the most recent `keep_last`
    rows per business key (including the current row), implemented as a
    DataFrame overwrite (no in-place DELETE).
    """
    if keep_last <= 0:
        return

    Window = get_spark_window()
    F = get_spark_functions()
    ex = self.executor

    if not unique_key:
        return

    vf = self.SNAPSHOT_VALID_FROM_COL

    try:
        physical = ex._physical_identifier(relation)
        df = ex.spark.table(physical)
    except Exception:
        return

    w = Window.partitionBy(*[F.col(k) for k in unique_key]).orderBy(F.col(vf).desc())
    ranked = df.withColumn("__ff_rn", F.row_number().over(w))

    if dry_run:
        cnt = ranked.filter(F.col("__ff_rn") > int(keep_last)).count()

        echo(
            f"[DRY-RUN] snapshot_prune({relation}): would delete {cnt} row(s) "
            f"(keep_last={keep_last})"
        )
        return

    pruned = ranked.filter(F.col("__ff_rn") <= int(keep_last)).drop("__ff_rn")

    # Materialize before overwrite to avoid Spark's self-read/overwrite issues.
    materialized: list[Any] = []

    def _materialize(df_any: Any) -> Any:
        try:
            cp = df_any.localCheckpoint(eager=True)
            materialized.append(cp)
            return cp
        except Exception:
            cached = df_any.cache()
            cached.count()
            materialized.append(cached)
            return cached

    try:
        out = _materialize(pruned)
        ex._save_df_as_table(relation, out)
    finally:
        for handle in materialized:
            with suppress(Exception):
                handle.unpersist()

DuckSnapshotRuntime

Bases: BaseSnapshotRuntime[DuckSnapshotExecutor]

Snapshot runtime for DuckDB, extracted from the old SnapshotSqlMixin.

Source code in src/fastflowtransform/snapshots/runtime/duckdb.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
class DuckSnapshotRuntime(BaseSnapshotRuntime[DuckSnapshotExecutor]):
    """
    Snapshot runtime for DuckDB, extracted from the old SnapshotSqlMixin.
    """

    # ---- Engine hooks -----------------------------------------------------
    def _snapshot_target_identifier(self, rel_name: str) -> str:
        return self.executor._qualified(rel_name)

    def _snapshot_current_timestamp(self) -> str:
        return "current_timestamp"

    def _snapshot_null_timestamp(self) -> str:
        return "cast(null as timestamp)"

    def _snapshot_null_hash(self) -> str:
        return "cast(null as varchar)"

    def _snapshot_hash_expr(self, check_cols: list[str], src_alias: str) -> str:
        concat_expr = self._snapshot_concat_expr(check_cols, src_alias)
        return f"cast(md5({concat_expr}) as varchar)"

    def _snapshot_cast_as_string(self, expr: str) -> str:
        return f"cast({expr} as varchar)"

    def _snapshot_source_ref(
        self, rel_name: str, select_body: str
    ) -> tuple[str, Callable[[], None]]:
        src_view_name = f"__ff_snapshot_src_{rel_name}".replace(".", "_")
        src_quoted = self.executor._quote_identifier(src_view_name)
        self.executor._execute_sql(f"create or replace temp view {src_quoted} as {select_body}")

        def _cleanup() -> None:
            self.executor._execute_sql(f"drop view if exists {src_quoted}")

        return src_quoted, _cleanup

snapshot_prune

snapshot_prune(relation, unique_key, keep_last, *, dry_run=False)

Delete older snapshot versions while keeping the most recent keep_last rows per business key (including the current row).

Source code in src/fastflowtransform/snapshots/runtime/base.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    def snapshot_prune(
        self,
        relation: str,
        unique_key: list[str],
        keep_last: int,
        *,
        dry_run: bool = False,
    ) -> None:
        """
        Delete older snapshot versions while keeping the most recent `keep_last`
        rows per business key (including the current row).
        """
        ex = self.executor

        if keep_last <= 0:
            return

        keys = [k for k in unique_key if k]
        if not keys:
            return

        target = self._snapshot_target_identifier(relation)
        vf = self.SNAPSHOT_VALID_FROM_COL

        key_select = ", ".join(keys)
        part_by = ", ".join(keys)

        ranked_sql = f"""
SELECT
  {key_select},
  {vf},
  ROW_NUMBER() OVER (
    PARTITION BY {part_by}
    ORDER BY {vf} DESC
  ) AS rn
FROM {target}
"""

        if dry_run:
            sql = f"""
WITH ranked AS (
  {ranked_sql}
)
SELECT COUNT(*) AS rows_to_delete
FROM ranked
WHERE rn > {int(keep_last)}
"""
            res = ex._execute_sql(sql)
            count = self._snapshot_fetch_count(res)
            echo(
                f"[DRY-RUN] snapshot_prune({relation}): would delete {count} row(s) "
                f"(keep_last={keep_last})"
            )
            return

        join_pred = " AND ".join([f"t.{k} = r.{k}" for k in keys])
        delete_sql = f"""
DELETE FROM {target} t
USING (
  {ranked_sql}
) r
WHERE
  r.rn > {int(keep_last)}
  AND {join_pred}
  AND t.{vf} = r.{vf}
"""
        ex._execute_sql(delete_sql)

PostgresSnapshotRuntime

Bases: BaseSnapshotRuntime[PostgresSnapshotExecutor]

Snapshot runtime for Postgres, extracted from the legacy mixin hooks.

Source code in src/fastflowtransform/snapshots/runtime/postgres.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
class PostgresSnapshotRuntime(BaseSnapshotRuntime[PostgresSnapshotExecutor]):
    """
    Snapshot runtime for Postgres, extracted from the legacy mixin hooks.
    """

    # ---- Engine hooks -----------------------------------------------------
    def _snapshot_target_identifier(self, rel_name: str) -> str:
        return self.executor._qualified(rel_name)

    def _snapshot_current_timestamp(self) -> str:
        return "current_timestamp"

    def _snapshot_null_timestamp(self) -> str:
        return "cast(null as timestamp)"

    def _snapshot_null_hash(self) -> str:
        return "cast(null as text)"

    def _snapshot_hash_expr(self, check_cols: list[str], src_alias: str) -> str:
        concat_expr = self._snapshot_concat_expr(check_cols, src_alias)
        return f"md5({concat_expr})"

    def _snapshot_cast_as_string(self, expr: str) -> str:
        return f"cast({expr} as text)"

    def _snapshot_source_ref(
        self, rel_name: str, select_body: str
    ) -> tuple[str, Callable[[], None]]:
        src_name = f"__ff_snapshot_src_{rel_name}".replace(".", "_")
        src_q = _q_ident(src_name)
        self.executor._execute_sql(f"drop table if exists {src_q}")
        self.executor._execute_sql(f"create temporary table {src_q} as {select_body}")

        def _cleanup() -> None:
            self.executor._execute_sql(f"drop table if exists {src_q}")

        return src_q, _cleanup

snapshot_prune

snapshot_prune(relation, unique_key, keep_last, *, dry_run=False)

Delete older snapshot versions while keeping the most recent keep_last rows per business key (including the current row).

Source code in src/fastflowtransform/snapshots/runtime/base.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    def snapshot_prune(
        self,
        relation: str,
        unique_key: list[str],
        keep_last: int,
        *,
        dry_run: bool = False,
    ) -> None:
        """
        Delete older snapshot versions while keeping the most recent `keep_last`
        rows per business key (including the current row).
        """
        ex = self.executor

        if keep_last <= 0:
            return

        keys = [k for k in unique_key if k]
        if not keys:
            return

        target = self._snapshot_target_identifier(relation)
        vf = self.SNAPSHOT_VALID_FROM_COL

        key_select = ", ".join(keys)
        part_by = ", ".join(keys)

        ranked_sql = f"""
SELECT
  {key_select},
  {vf},
  ROW_NUMBER() OVER (
    PARTITION BY {part_by}
    ORDER BY {vf} DESC
  ) AS rn
FROM {target}
"""

        if dry_run:
            sql = f"""
WITH ranked AS (
  {ranked_sql}
)
SELECT COUNT(*) AS rows_to_delete
FROM ranked
WHERE rn > {int(keep_last)}
"""
            res = ex._execute_sql(sql)
            count = self._snapshot_fetch_count(res)
            echo(
                f"[DRY-RUN] snapshot_prune({relation}): would delete {count} row(s) "
                f"(keep_last={keep_last})"
            )
            return

        join_pred = " AND ".join([f"t.{k} = r.{k}" for k in keys])
        delete_sql = f"""
DELETE FROM {target} t
USING (
  {ranked_sql}
) r
WHERE
  r.rn > {int(keep_last)}
  AND {join_pred}
  AND t.{vf} = r.{vf}
"""
        ex._execute_sql(delete_sql)

SnowflakeSnowparkSnapshotRuntime

Bases: BaseSnapshotRuntime[SnowflakeSnapshotExecutor]

Snapshot runtime for Snowflake Snowpark, matching legacy mixin hooks.

Source code in src/fastflowtransform/snapshots/runtime/snowflake_snowpark.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
class SnowflakeSnowparkSnapshotRuntime(BaseSnapshotRuntime[SnowflakeSnapshotExecutor]):
    """
    Snapshot runtime for Snowflake Snowpark, matching legacy mixin hooks.
    """

    # ---- Engine hooks -----------------------------------------------------
    def _snapshot_target_identifier(self, rel_name: str) -> str:
        return self.executor._qualified(rel_name)

    def _snapshot_current_timestamp(self) -> str:
        return "CURRENT_TIMESTAMP()"

    def _snapshot_create_keyword(self) -> str:
        return "CREATE OR REPLACE TABLE"

    def _snapshot_null_timestamp(self) -> str:
        return "CAST(NULL AS TIMESTAMP)"

    def _snapshot_null_hash(self) -> str:
        return "CAST(NULL AS VARCHAR)"

    def _snapshot_hash_expr(self, check_cols: list[str], src_alias: str) -> str:
        concat_expr = self._snapshot_concat_expr(check_cols, src_alias)
        return f"CAST(MD5({concat_expr}) AS VARCHAR)"

    def _snapshot_cast_as_string(self, expr: str) -> str:
        return f"CAST({expr} AS VARCHAR)"

    def _snapshot_source_ref(
        self, rel_name: str, select_body: str
    ) -> tuple[str, Callable[[], None]]:
        src_name = f"__ff_snapshot_src_{rel_name}".replace(".", "_")
        src_quoted = _q_ident(src_name)
        self.executor._execute_sql(
            f"CREATE OR REPLACE TEMPORARY VIEW {src_quoted} AS {select_body}"
        )

        def _cleanup() -> None:
            self.executor._execute_sql(f"DROP VIEW IF EXISTS {src_quoted}")

        return src_quoted, _cleanup

snapshot_prune

snapshot_prune(relation, unique_key, keep_last, *, dry_run=False)

Delete older snapshot versions while keeping the most recent keep_last rows per business key (including the current row).

Source code in src/fastflowtransform/snapshots/runtime/base.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
    def snapshot_prune(
        self,
        relation: str,
        unique_key: list[str],
        keep_last: int,
        *,
        dry_run: bool = False,
    ) -> None:
        """
        Delete older snapshot versions while keeping the most recent `keep_last`
        rows per business key (including the current row).
        """
        ex = self.executor

        if keep_last <= 0:
            return

        keys = [k for k in unique_key if k]
        if not keys:
            return

        target = self._snapshot_target_identifier(relation)
        vf = self.SNAPSHOT_VALID_FROM_COL

        key_select = ", ".join(keys)
        part_by = ", ".join(keys)

        ranked_sql = f"""
SELECT
  {key_select},
  {vf},
  ROW_NUMBER() OVER (
    PARTITION BY {part_by}
    ORDER BY {vf} DESC
  ) AS rn
FROM {target}
"""

        if dry_run:
            sql = f"""
WITH ranked AS (
  {ranked_sql}
)
SELECT COUNT(*) AS rows_to_delete
FROM ranked
WHERE rn > {int(keep_last)}
"""
            res = ex._execute_sql(sql)
            count = self._snapshot_fetch_count(res)
            echo(
                f"[DRY-RUN] snapshot_prune({relation}): would delete {count} row(s) "
                f"(keep_last={keep_last})"
            )
            return

        join_pred = " AND ".join([f"t.{k} = r.{k}" for k in keys])
        delete_sql = f"""
DELETE FROM {target} t
USING (
  {ranked_sql}
) r
WHERE
  r.rn > {int(keep_last)}
  AND {join_pred}
  AND t.{vf} = r.{vf}
"""
        ex._execute_sql(delete_sql)