Skip to main content

ff_filter/graph/
filter_step.rs

1//! Internal filter step representation.
2
3use std::time::Duration;
4
5use super::builder::FilterGraphBuilder;
6use super::types::{
7    DrawTextOptions, EqBand, Rgb, ScaleAlgorithm, ToneMap, XfadeTransition, YadifMode,
8};
9use crate::animation::AnimatedValue;
10use crate::blend::BlendMode;
11
12// ── FilterStep ────────────────────────────────────────────────────────────────
13
14/// A single step in a filter chain.
15///
16/// Used by [`crate::FilterGraphBuilder`] to build pipeline filter graphs, and by
17/// [`crate::AudioTrack::effects`] to attach per-track effects in a multi-track mix.
18#[derive(Debug, Clone)]
19pub enum FilterStep {
20    /// Trim: keep only frames in `[start, end)` seconds.
21    Trim { start: f64, end: f64 },
22    /// Scale to a new resolution using the given resampling algorithm.
23    Scale {
24        width: u32,
25        height: u32,
26        algorithm: ScaleAlgorithm,
27    },
28    /// Crop a rectangular region.
29    Crop {
30        x: u32,
31        y: u32,
32        width: u32,
33        height: u32,
34    },
35    /// Overlay a second stream at position `(x, y)`.
36    Overlay { x: i32, y: i32 },
37    /// Fade-in from black starting at `start` seconds, over `duration` seconds.
38    FadeIn { start: f64, duration: f64 },
39    /// Fade-out to black starting at `start` seconds, over `duration` seconds.
40    FadeOut { start: f64, duration: f64 },
41    /// Audio fade-in from silence starting at `start` seconds, over `duration` seconds.
42    AFadeIn { start: f64, duration: f64 },
43    /// Audio fade-out to silence starting at `start` seconds, over `duration` seconds.
44    AFadeOut { start: f64, duration: f64 },
45    /// Fade-in from white starting at `start` seconds, over `duration` seconds.
46    FadeInWhite { start: f64, duration: f64 },
47    /// Fade-out to white starting at `start` seconds, over `duration` seconds.
48    FadeOutWhite { start: f64, duration: f64 },
49    /// Rotate clockwise by `angle_degrees`, filling exposed areas with `fill_color`.
50    Rotate {
51        angle_degrees: f64,
52        fill_color: String,
53    },
54    /// HDR-to-SDR tone mapping.
55    ToneMap(ToneMap),
56    /// Adjust audio volume (in dB; negative = quieter).
57    Volume(f64),
58    /// Mix `n` audio inputs together.
59    Amix(usize),
60    /// Multi-band parametric equalizer (low-shelf, high-shelf, or peak bands).
61    ///
62    /// Each band maps to its own `FFmpeg` filter node chained in sequence.
63    /// The `bands` vec must not be empty.
64    ParametricEq { bands: Vec<EqBand> },
65    /// Apply a 3D LUT from a `.cube` or `.3dl` file.
66    Lut3d { path: String },
67    /// Brightness/contrast/saturation adjustment via `FFmpeg` `eq` filter.
68    Eq {
69        brightness: f32,
70        contrast: f32,
71        saturation: f32,
72    },
73    /// Brightness / contrast / saturation / gamma via `FFmpeg` `eq` filter (optionally animated).
74    ///
75    /// Arguments are evaluated at [`Duration::ZERO`] for the initial graph build.
76    /// Per-frame updates are applied via `avfilter_graph_send_command` in #363.
77    EqAnimated {
78        /// Brightness offset. Range: −1.0 – 1.0 (neutral: 0.0).
79        brightness: AnimatedValue<f64>,
80        /// Contrast multiplier. Range: 0.0 – 3.0 (neutral: 1.0).
81        contrast: AnimatedValue<f64>,
82        /// Saturation multiplier. Range: 0.0 – 3.0 (neutral: 1.0; 0.0 = grayscale).
83        saturation: AnimatedValue<f64>,
84        /// Global gamma correction. Range: 0.1 – 10.0 (neutral: 1.0).
85        gamma: AnimatedValue<f64>,
86    },
87    /// Three-way color balance (shadows / midtones / highlights) via `FFmpeg` `colorbalance` filter
88    /// (optionally animated).
89    ///
90    /// Each tuple is `(R, G, B)`. Valid range per component: −1.0 – 1.0 (neutral: 0.0).
91    ///
92    /// Arguments are evaluated at [`Duration::ZERO`] for the initial graph build.
93    /// Per-frame updates are applied via `avfilter_graph_send_command` in #363.
94    ColorBalanceAnimated {
95        /// Shadows (lift) correction per channel. `FFmpeg` params: `"rs"`, `"gs"`, `"bs"`.
96        lift: AnimatedValue<(f64, f64, f64)>,
97        /// Midtones (gamma) correction per channel. `FFmpeg` params: `"rm"`, `"gm"`, `"bm"`.
98        gamma: AnimatedValue<(f64, f64, f64)>,
99        /// Highlights (gain) correction per channel. `FFmpeg` params: `"rh"`, `"gh"`, `"bh"`.
100        gain: AnimatedValue<(f64, f64, f64)>,
101    },
102    /// Per-channel RGB color curves adjustment.
103    Curves {
104        master: Vec<(f32, f32)>,
105        r: Vec<(f32, f32)>,
106        g: Vec<(f32, f32)>,
107        b: Vec<(f32, f32)>,
108    },
109    /// White balance correction via `colorchannelmixer`.
110    WhiteBalance { temperature_k: u32, tint: f32 },
111    /// Hue rotation by an arbitrary angle.
112    Hue { degrees: f32 },
113    /// Per-channel gamma correction via `FFmpeg` `eq` filter.
114    Gamma { r: f32, g: f32, b: f32 },
115    /// Three-way colour corrector (lift / gamma / gain) via `FFmpeg` `curves` filter.
116    ThreeWayCC {
117        /// Affects shadows (blacks). Neutral: `Rgb::NEUTRAL`.
118        lift: Rgb,
119        /// Affects midtones. Neutral: `Rgb::NEUTRAL`. All components must be > 0.0.
120        gamma: Rgb,
121        /// Affects highlights (whites). Neutral: `Rgb::NEUTRAL`.
122        gain: Rgb,
123    },
124    /// Vignette effect via `FFmpeg` `vignette` filter.
125    Vignette {
126        /// Radius angle in radians (valid range: 0.0 – π/2 ≈ 1.5708). Default: π/5 ≈ 0.628.
127        angle: f32,
128        /// Horizontal centre of the vignette. `0.0` maps to `w/2`.
129        x0: f32,
130        /// Vertical centre of the vignette. `0.0` maps to `h/2`.
131        y0: f32,
132    },
133    /// Horizontal flip (mirror left-right).
134    HFlip,
135    /// Vertical flip (mirror top-bottom).
136    VFlip,
137    /// Reverse video playback (buffers entire clip in memory — use only on short clips).
138    Reverse,
139    /// Reverse audio playback (buffers entire clip in memory — use only on short clips).
140    AReverse,
141    /// Pad to a target resolution with a fill color (letterbox / pillarbox).
142    Pad {
143        /// Target canvas width in pixels.
144        width: u32,
145        /// Target canvas height in pixels.
146        height: u32,
147        /// Horizontal offset of the source frame within the canvas.
148        /// Negative values are replaced with `(ow-iw)/2` (centred).
149        x: i32,
150        /// Vertical offset of the source frame within the canvas.
151        /// Negative values are replaced with `(oh-ih)/2` (centred).
152        y: i32,
153        /// Fill color (any `FFmpeg` color string, e.g. `"black"`, `"0x000000"`).
154        color: String,
155    },
156    /// Scale (preserving aspect ratio) then centre-pad to fill target dimensions
157    /// (letterbox or pillarbox as required).
158    ///
159    /// Implemented as a `scale` filter with `force_original_aspect_ratio=decrease`
160    /// followed by a `pad` filter that centres the scaled frame on the canvas.
161    FitToAspect {
162        /// Target canvas width in pixels.
163        width: u32,
164        /// Target canvas height in pixels.
165        height: u32,
166        /// Fill color for the bars (any `FFmpeg` color string, e.g. `"black"`).
167        color: String,
168    },
169    /// Gaussian blur with configurable radius.
170    ///
171    /// `sigma` is the blur radius. Valid range: 0.0 – 10.0 (values near 0.0 are
172    /// nearly a no-op; higher values produce a stronger blur).
173    GBlur {
174        /// Blur radius (standard deviation). Must be ≥ 0.0.
175        sigma: f32,
176    },
177    /// Crop with optionally animated boundaries (pixels, `f64` for sub-pixel precision).
178    ///
179    /// Arguments are evaluated at [`Duration::ZERO`] for the initial graph build.
180    /// Per-frame updates are applied via `avfilter_graph_send_command` in #363.
181    CropAnimated {
182        /// X offset of the top-left corner, in pixels.
183        x: AnimatedValue<f64>,
184        /// Y offset of the top-left corner, in pixels.
185        y: AnimatedValue<f64>,
186        /// Width of the cropped region. Must evaluate to > 0 at `Duration::ZERO`.
187        width: AnimatedValue<f64>,
188        /// Height of the cropped region. Must evaluate to > 0 at `Duration::ZERO`.
189        height: AnimatedValue<f64>,
190    },
191    /// Gaussian blur with an optionally animated sigma (blur radius).
192    ///
193    /// Arguments are evaluated at [`Duration::ZERO`] for the initial graph build.
194    /// Per-frame updates are applied via `avfilter_graph_send_command` in #363.
195    GBlurAnimated {
196        /// Blur radius (standard deviation). Must evaluate to ≥ 0.0 at `Duration::ZERO`.
197        sigma: AnimatedValue<f64>,
198    },
199    /// Sharpen or blur via unsharp mask (luma + chroma strength).
200    ///
201    /// Positive values sharpen; negative values blur. Valid range for each
202    /// component: −1.5 – 1.5.
203    Unsharp {
204        /// Luma (brightness) sharpening/blurring amount. Range: −1.5 – 1.5.
205        luma_strength: f32,
206        /// Chroma (colour) sharpening/blurring amount. Range: −1.5 – 1.5.
207        chroma_strength: f32,
208    },
209    /// High Quality 3D noise reduction (`hqdn3d`).
210    ///
211    /// Typical values: `luma_spatial=4.0`, `chroma_spatial=3.0`,
212    /// `luma_tmp=6.0`, `chroma_tmp=4.5`. All values must be ≥ 0.0.
213    Hqdn3d {
214        /// Spatial luma noise reduction strength. Must be ≥ 0.0.
215        luma_spatial: f32,
216        /// Spatial chroma noise reduction strength. Must be ≥ 0.0.
217        chroma_spatial: f32,
218        /// Temporal luma noise reduction strength. Must be ≥ 0.0.
219        luma_tmp: f32,
220        /// Temporal chroma noise reduction strength. Must be ≥ 0.0.
221        chroma_tmp: f32,
222    },
223    /// Non-local means noise reduction (`nlmeans`).
224    ///
225    /// `strength` controls the denoising intensity; range 1.0–30.0.
226    /// Higher values remove more noise but are significantly more CPU-intensive.
227    ///
228    /// NOTE: nlmeans is CPU-intensive; avoid for real-time pipelines.
229    Nlmeans {
230        /// Denoising strength. Must be in the range [1.0, 30.0].
231        strength: f32,
232    },
233    /// Deinterlace using the `yadif` filter.
234    Yadif {
235        /// Deinterlacing mode controlling output frame rate and spatial checks.
236        mode: YadifMode,
237    },
238    /// Cross-dissolve transition between two video streams (`xfade`).
239    ///
240    /// Requires two input slots: slot 0 is clip A, slot 1 is clip B.
241    /// `duration` is the overlap length in seconds; `offset` is the PTS
242    /// offset (in seconds) at which clip B begins.
243    XFade {
244        /// Transition style.
245        transition: XfadeTransition,
246        /// Overlap duration in seconds. Must be > 0.0.
247        duration: f64,
248        /// PTS offset (seconds) where clip B starts.
249        offset: f64,
250    },
251    /// Draw text onto the video using the `drawtext` filter.
252    DrawText {
253        /// Full set of drawtext parameters.
254        opts: DrawTextOptions,
255    },
256    /// Burn-in SRT subtitles (hard subtitles) using the `subtitles` filter.
257    SubtitlesSrt {
258        /// Absolute or relative path to the `.srt` file.
259        path: String,
260    },
261    /// Burn-in ASS/SSA styled subtitles using the `ass` filter.
262    SubtitlesAss {
263        /// Absolute or relative path to the `.ass` or `.ssa` file.
264        path: String,
265    },
266    /// Playback speed change using `setpts` (video) and chained `atempo` (audio).
267    ///
268    /// `factor > 1.0` = fast motion; `factor < 1.0` = slow motion.
269    /// Valid range: 0.1–100.0.
270    ///
271    /// Video path: `setpts=PTS/{factor}`.
272    /// Audio path: the `atempo` filter only accepts [0.5, 2.0] per instance;
273    /// `filter_inner` chains multiple instances to cover the full range.
274    Speed {
275        /// Speed multiplier. Must be in [0.1, 100.0].
276        factor: f64,
277    },
278    /// EBU R128 two-pass loudness normalization.
279    ///
280    /// Pass 1 measures integrated loudness with `ebur128=peak=true:metadata=1`.
281    /// Pass 2 applies a linear volume correction so the output reaches `target_lufs`.
282    /// All audio frames are buffered in memory between the two passes — use only
283    /// for clips that fit comfortably in RAM.
284    LoudnessNormalize {
285        /// Target integrated loudness in LUFS (e.g. −23.0). Must be < 0.0.
286        target_lufs: f32,
287        /// True-peak ceiling in dBTP (e.g. −1.0). Must be ≤ 0.0.
288        true_peak_db: f32,
289        /// Target loudness range in LU (e.g. 7.0). Must be > 0.0.
290        lra: f32,
291    },
292    /// Peak-level two-pass normalization using `astats`.
293    ///
294    /// Pass 1 measures the true peak with `astats=metadata=1`.
295    /// Pass 2 applies `volume={gain}dB` so the output peak reaches `target_db`.
296    /// All audio frames are buffered in memory between passes — use only
297    /// for clips that fit comfortably in RAM.
298    NormalizePeak {
299        /// Target peak level in dBFS (e.g. −1.0). Must be ≤ 0.0.
300        target_db: f32,
301    },
302    /// Noise gate via `FFmpeg`'s `agate` filter.
303    ///
304    /// Audio below `threshold_db` is attenuated; audio above passes through.
305    /// The threshold is converted from dBFS to the linear scale expected by
306    /// `agate`'s `threshold` parameter (`linear = 10^(dB/20)`).
307    ANoiseGate {
308        /// Gate open/close threshold in dBFS (e.g. −40.0).
309        threshold_db: f32,
310        /// Attack time in milliseconds — how quickly the gate opens. Must be > 0.0.
311        attack_ms: f32,
312        /// Release time in milliseconds — how quickly the gate closes. Must be > 0.0.
313        release_ms: f32,
314    },
315    /// Dynamic range compressor via `FFmpeg`'s `acompressor` filter.
316    ///
317    /// Reduces the dynamic range of the audio signal: peaks above
318    /// `threshold_db` are attenuated by `ratio`:1.  `makeup_db` applies
319    /// additional gain after compression to restore perceived loudness.
320    ACompressor {
321        /// Compression threshold in dBFS (e.g. −20.0).
322        threshold_db: f32,
323        /// Compression ratio (e.g. 4.0 = 4:1). Must be ≥ 1.0.
324        ratio: f32,
325        /// Attack time in milliseconds. Must be > 0.0.
326        attack_ms: f32,
327        /// Release time in milliseconds. Must be > 0.0.
328        release_ms: f32,
329        /// Make-up gain in dB applied after compression (e.g. 6.0).
330        makeup_db: f32,
331    },
332    /// Downmix stereo to mono via `FFmpeg`'s `pan` filter.
333    ///
334    /// Both channels are mixed with equal weight:
335    /// `mono|c0=0.5*c0+0.5*c1`.  The output has a single channel.
336    StereoToMono,
337    /// Remap audio channels using `FFmpeg`'s `channelmap` filter.
338    ///
339    /// `mapping` is a `|`-separated list of output channel names taken
340    /// from input channels, e.g. `"FR|FL"` swaps left and right.
341    /// Must not be empty.
342    ChannelMap {
343        /// `FFmpeg` channelmap mapping expression (e.g. `"FR|FL"`).
344        mapping: String,
345    },
346    /// A/V sync correction via audio delay or advance.
347    ///
348    /// Positive `ms`: uses `FFmpeg`'s `adelay` filter to shift audio later.
349    /// Negative `ms`: uses `FFmpeg`'s `atrim` filter to trim the audio start,
350    /// effectively advancing audio by `|ms|` milliseconds.
351    /// Zero `ms`: uses `adelay` with zero delay (no-op).
352    AudioDelay {
353        /// Delay in milliseconds. Positive = delay; negative = advance.
354        ms: f64,
355    },
356    /// Concatenate `n` sequential video input segments via `FFmpeg`'s `concat` filter.
357    ///
358    /// Requires `n` video input slots (0 through `n-1`). `n` must be ≥ 2.
359    ConcatVideo {
360        /// Number of video input segments to concatenate. Must be ≥ 2.
361        n: u32,
362    },
363    /// Concatenate `n` sequential audio input segments via `FFmpeg`'s `concat` filter.
364    ///
365    /// Requires `n` audio input slots (0 through `n-1`). `n` must be ≥ 2.
366    ConcatAudio {
367        /// Number of audio input segments to concatenate. Must be ≥ 2.
368        n: u32,
369    },
370    /// Freeze a single frame for a configurable duration using `FFmpeg`'s `loop` filter.
371    ///
372    /// The frame nearest to `pts` seconds is held for `duration` seconds, then
373    /// playback resumes. Frame numbers are approximated using a 25 fps assumption;
374    /// accuracy depends on the source stream's actual frame rate.
375    FreezeFrame {
376        /// Timestamp of the frame to freeze, in seconds. Must be >= 0.0.
377        pts: f64,
378        /// Duration to hold the frozen frame, in seconds. Must be > 0.0.
379        duration: f64,
380    },
381    /// Scrolling text ticker (right-to-left) using the `drawtext` filter.
382    ///
383    /// The text starts off-screen to the right and scrolls left at
384    /// `speed_px_per_sec` pixels per second using the expression
385    /// `x = w - t * speed`.
386    Ticker {
387        /// Text to display. Special characters (`\`, `:`, `'`) are escaped.
388        text: String,
389        /// Y position as an `FFmpeg` expression, e.g. `"h-50"` or `"10"`.
390        y: String,
391        /// Horizontal scroll speed in pixels per second (must be > 0.0).
392        speed_px_per_sec: f32,
393        /// Font size in points.
394        font_size: u32,
395        /// Font color as an `FFmpeg` color string, e.g. `"white"` or `"0xFFFFFF"`.
396        font_color: String,
397    },
398    /// Join two video clips with a cross-dissolve transition.
399    ///
400    /// Compound step — expands in `filter_inner` to:
401    /// ```text
402    /// in0 → trim(end=clip_a_end+dissolve_dur) → setpts → xfade[0]
403    /// in1 → trim(start=max(0, clip_b_start−dissolve_dur)) → setpts → xfade[1]
404    /// ```
405    ///
406    /// Requires two video input slots: slot 0 = clip A, slot 1 = clip B.
407    /// `clip_a_end` and `dissolve_dur` must be > 0.0.
408    JoinWithDissolve {
409        /// Timestamp (seconds) where clip A ends. Must be > 0.0.
410        clip_a_end: f64,
411        /// Timestamp (seconds) where clip B content starts (before the overlap).
412        clip_b_start: f64,
413        /// Cross-dissolve overlap duration in seconds. Must be > 0.0.
414        dissolve_dur: f64,
415    },
416    /// Composite a PNG image (watermark / logo) over video with optional opacity.
417    ///
418    /// This is a compound step: internally it creates a `movie` source,
419    /// a `lut` alpha-scaling filter, and an `overlay` compositing filter.
420    /// The image file is loaded once at graph construction time.
421    OverlayImage {
422        /// Absolute or relative path to the `.png` file.
423        path: String,
424        /// Horizontal position as an `FFmpeg` expression, e.g. `"10"` or `"W-w-10"`.
425        x: String,
426        /// Vertical position as an `FFmpeg` expression, e.g. `"10"` or `"H-h-10"`.
427        y: String,
428        /// Opacity 0.0 (fully transparent) to 1.0 (fully opaque).
429        opacity: f32,
430    },
431
432    /// Blend a `top` layer over the current stream (bottom) using the given mode.
433    ///
434    /// This is a compound step:
435    /// - **Normal** mode: `[top]colorchannelmixer=aa=<opacity>[top_faded];
436    ///   [bottom][top_faded]overlay=format=auto:shortest=1[out]`
437    ///   (the `colorchannelmixer` step is omitted when `opacity == 1.0`).
438    /// - All other modes return [`crate::FilterError::InvalidConfig`] from
439    ///   [`crate::FilterGraphBuilder::build`] until implemented.
440    ///
441    /// The `top` builder's steps are applied to the second input slot (`in1`).
442    /// `opacity` is clamped to `[0.0, 1.0]` by the builder method.
443    ///
444    /// `Box<FilterGraphBuilder>` is used to break the otherwise-recursive type:
445    /// `FilterStep` → `FilterGraphBuilder` → `Vec<FilterStep>`.
446    Blend {
447        /// Filter pipeline for the top (foreground) layer.
448        top: Box<FilterGraphBuilder>,
449        /// How the two layers are combined.
450        mode: BlendMode,
451        /// Opacity of the top layer in `[0.0, 1.0]`; 1.0 = fully opaque.
452        opacity: f32,
453    },
454
455    /// Remove pixels matching `color` using `FFmpeg`'s `chromakey` filter,
456    /// producing a `yuva420p` output with transparent areas where the key
457    /// color was detected.
458    ///
459    /// Use this for YCbCr-encoded sources (most video).  For RGB sources
460    /// use `colorkey` instead.
461    ChromaKey {
462        /// `FFmpeg` color string, e.g. `"green"`, `"0x00FF00"`, `"#00FF00"`.
463        color: String,
464        /// Match radius in `[0.0, 1.0]`; higher = more pixels removed.
465        similarity: f32,
466        /// Edge softness in `[0.0, 1.0]`; `0.0` = hard edge.
467        blend: f32,
468    },
469
470    /// Remove pixels matching `color` in RGB space using `FFmpeg`'s `colorkey`
471    /// filter, producing an `rgba` output with transparent areas where the key
472    /// color was detected.
473    ///
474    /// Use this for RGB-encoded sources.  For YCbCr-encoded video (most video)
475    /// use `chromakey` instead.
476    ColorKey {
477        /// `FFmpeg` color string, e.g. `"green"`, `"0x00FF00"`, `"#00FF00"`.
478        color: String,
479        /// Match radius in `[0.0, 1.0]`; higher = more pixels removed.
480        similarity: f32,
481        /// Edge softness in `[0.0, 1.0]`; `0.0` = hard edge.
482        blend: f32,
483    },
484
485    /// Reduce color spill from the key color on subject edges using `FFmpeg`'s
486    /// `hue` filter to desaturate the spill hue region.
487    ///
488    /// Applies `hue=h=0:s=(1.0 - strength)`.  `strength=0.0` leaves the image
489    /// unchanged; `strength=1.0` fully desaturates.
490    ///
491    /// `key_color` is stored for future use by a more targeted per-hue
492    /// implementation.
493    SpillSuppress {
494        /// `FFmpeg` color string identifying the spill color, e.g. `"green"`.
495        key_color: String,
496        /// Suppression intensity in `[0.0, 1.0]`; `0.0` = no effect, `1.0` = full suppression.
497        strength: f32,
498    },
499
500    /// Merge a grayscale `matte` as the alpha channel of the input video using
501    /// `FFmpeg`'s `alphamerge` filter.
502    ///
503    /// White (luma=255) in the matte produces fully opaque output; black (luma=0)
504    /// produces fully transparent output.
505    ///
506    /// This is a compound step: the `matte` builder's pipeline is applied to the
507    /// second input slot (`in1`) before the `alphamerge` filter is linked.
508    ///
509    /// `Box<FilterGraphBuilder>` breaks the otherwise-recursive type, following
510    /// the same pattern as [`FilterStep::Blend`].
511    AlphaMatte {
512        /// Pipeline for the grayscale matte stream (slot 1).
513        matte: Box<FilterGraphBuilder>,
514    },
515
516    /// Key out pixels by luminance value using `FFmpeg`'s `lumakey` filter.
517    ///
518    /// Pixels whose normalized luma is within `tolerance` of `threshold` are
519    /// made transparent.  When `invert` is `true`, a `geq` filter is appended
520    /// to negate the alpha channel, effectively swapping transparent and opaque
521    /// regions.
522    ///
523    /// - `threshold`: luma cutoff in `[0.0, 1.0]`; `0.0` = black, `1.0` = white.
524    /// - `tolerance`: match radius around the threshold in `[0.0, 1.0]`.
525    /// - `softness`: edge feather width in `[0.0, 1.0]`; `0.0` = hard edge.
526    /// - `invert`: when `false`, keys out bright regions (pixels matching the
527    ///   threshold); when `true`, the alpha is negated after keying, making
528    ///   the complementary region transparent instead.
529    ///
530    /// Output carries an alpha channel (`yuva420p`).
531    LumaKey {
532        /// Luma cutoff in `[0.0, 1.0]`.
533        threshold: f32,
534        /// Match radius around the threshold in `[0.0, 1.0]`.
535        tolerance: f32,
536        /// Edge feather width in `[0.0, 1.0]`; `0.0` = hard edge.
537        softness: f32,
538        /// When `true`, the alpha channel is negated after keying.
539        invert: bool,
540    },
541
542    /// Apply a rectangular alpha mask using `FFmpeg`'s `geq` filter.
543    ///
544    /// Pixels inside the rectangle defined by (`x`, `y`, `width`, `height`)
545    /// are made fully opaque (`alpha=255`); pixels outside are made fully
546    /// transparent (`alpha=0`).  When `invert` is `true` the roles are swapped:
547    /// inside becomes transparent and outside becomes opaque.
548    ///
549    /// - `x`, `y`: top-left corner of the rectangle (in pixels).
550    /// - `width`, `height`: rectangle dimensions (must be > 0).
551    /// - `invert`: when `false`, keeps the interior; when `true`, keeps the
552    ///   exterior.
553    ///
554    /// `width` and `height` are validated in [`build`](FilterGraphBuilder::build);
555    /// zero values return [`crate::FilterError::InvalidConfig`].
556    ///
557    /// The output carries an alpha channel (`rgba`).
558    RectMask {
559        /// Left edge of the rectangle (pixels from the left).
560        x: u32,
561        /// Top edge of the rectangle (pixels from the top).
562        y: u32,
563        /// Width of the rectangle in pixels (must be > 0).
564        width: u32,
565        /// Height of the rectangle in pixels (must be > 0).
566        height: u32,
567        /// When `true`, the mask is inverted: outside is opaque, inside is transparent.
568        invert: bool,
569    },
570
571    /// Feather (soften) the alpha channel edges using a Gaussian blur.
572    ///
573    /// Splits the stream into a color copy and an alpha copy, blurs the alpha
574    /// plane with `gblur=sigma=<radius>`, then re-merges:
575    ///
576    /// ```text
577    /// [in]split=2[color][with_alpha];
578    /// [with_alpha]alphaextract[alpha_only];
579    /// [alpha_only]gblur=sigma=<radius>[alpha_blurred];
580    /// [color][alpha_blurred]alphamerge[out]
581    /// ```
582    ///
583    /// `radius` is the blur kernel half-size in pixels and must be > 0.
584    /// Validated in [`build`](FilterGraphBuilder::build); `radius == 0` returns
585    /// [`crate::FilterError::InvalidConfig`].
586    ///
587    /// Typically chained after a keying or masking step
588    /// (e.g. [`FilterStep::ChromaKey`], [`FilterStep::RectMask`],
589    /// [`FilterStep::PolygonMatte`]).  Applying this step to a fully-opaque
590    /// video (no prior alpha) is a no-op because a uniform alpha of 255 blurs
591    /// to 255 everywhere.
592    FeatherMask {
593        /// Gaussian blur kernel half-size in pixels (must be > 0).
594        radius: u32,
595    },
596
597    /// Simulate motion blur by blending consecutive frames via `FFmpeg`'s `tblend` filter.
598    ///
599    /// `shutter_angle_degrees` controls the blend ratio; 360° equals a full
600    /// frame-period exposure (maximum blur). `sub_frames` is the number of
601    /// frames blended and must be in [2, 16]; it is validated by
602    /// [`FilterGraph::motion_blur`](crate::FilterGraph::motion_blur).
603    MotionBlur {
604        /// Shutter angle in degrees (0° = no blur, 360° = full-period blur).
605        shutter_angle_degrees: f32,
606        /// Number of frames blended. Must be in [2, 16].
607        sub_frames: u8,
608    },
609
610    /// Correct radial lens distortion using two polynomial coefficients via
611    /// `FFmpeg`'s `lenscorrection` filter.
612    ///
613    /// Negative values correct barrel distortion; positive values correct
614    /// pincushion distortion. Both `k1` and `k2` must be in [−1.0, 1.0];
615    /// validated by [`FilterGraph::lens_correction`](crate::FilterGraph::lens_correction).
616    LensCorrection {
617        /// First-order radial distortion coefficient. Range: [−1.0, 1.0].
618        k1: f32,
619        /// Second-order radial distortion coefficient. Range: [−1.0, 1.0].
620        k2: f32,
621    },
622
623    /// Add synthetic per-frame random film grain to luma and chroma channels
624    /// via `FFmpeg`'s `noise` filter.
625    ///
626    /// `luma_strength` and `chroma_strength` are clamped to [0.0, 100.0].
627    /// The `allf=t` flag varies the seed each frame to simulate real film grain.
628    FilmGrain {
629        /// Grain strength applied to the luma (Y) plane. Clamped to [0.0, 100.0].
630        luma_strength: f32,
631        /// Grain strength applied to the Cb and Cr planes. Clamped to [0.0, 100.0].
632        chroma_strength: f32,
633    },
634
635    /// Uniform scale by a fractional multiplier via `FFmpeg`'s `scale` filter.
636    ///
637    /// Both width and height are multiplied by `factor`. Used to hide warped
638    /// border pixels left after lens distortion correction.
639    ScaleMultiplier {
640        /// Scale factor applied to both dimensions (e.g. `1.05` = 5 % zoom-in).
641        factor: f32,
642    },
643
644    /// Reduce lateral chromatic aberration by independently shifting the R and B
645    /// channels via `FFmpeg`'s `rgbashift` filter.
646    ///
647    /// `rh` and `bh` are the horizontal pixel shifts for the red and blue
648    /// channels respectively. Derived from scale deviations by
649    /// [`FilterGraph::fix_chromatic_aberration`](crate::FilterGraph::fix_chromatic_aberration).
650    ChromaticAberration {
651        /// Horizontal shift for the red channel in pixels (positive = right).
652        rh: i32,
653        /// Horizontal shift for the blue channel in pixels (positive = right).
654        bh: i32,
655    },
656
657    /// Glow / bloom effect: blends blurred highlights back over the image via
658    /// `split`, `curves`, `gblur`, and `blend` filters.
659    ///
660    /// This is a compound step — see
661    /// [`FilterGraph::glow`](crate::FilterGraph::glow) for parameter semantics.
662    Glow {
663        /// Luminance threshold that triggers the glow (clamped to [0.0, 1.0]).
664        threshold: f32,
665        /// Gaussian blur radius in pixels (clamped to [0.5, 50.0]).
666        radius: f32,
667        /// Additive blend strength (clamped to [0.0, 2.0]).
668        intensity: f32,
669    },
670
671    /// Convolution reverb using an impulse response (IR) audio file.
672    ///
673    /// The IR is loaded via `FFmpeg`'s `amovie` filter, optionally delayed by
674    /// `pre_delay_ms` via `adelay`, then convolved with the main audio stream
675    /// via `FFmpeg`'s `afir` filter.
676    ///
677    /// This is a compound step — see
678    /// [`FilterGraph::reverb_ir`](crate::FilterGraph::reverb_ir) for parameter
679    /// semantics.
680    ReverbIr {
681        /// Absolute or relative path to the `.wav` or `.flac` IR file.
682        ir_path: String,
683        /// Wet (reverb) mix level in [0.0, 1.0].
684        wet: f32,
685        /// Dry (original) mix level in [0.0, 1.0].
686        dry: f32,
687        /// Pre-delay before the reverb tail in milliseconds (clamped to 0–500).
688        pre_delay_ms: u32,
689    },
690
691    /// Algorithmic multi-tap echo/reverb via `FFmpeg`'s `aecho` filter.
692    ///
693    /// `in_gain` and `out_gain` are amplitude multipliers clamped to [0.0, 1.0].
694    /// `delays` contains delay times in milliseconds (one per tap); `decays`
695    /// contains the corresponding decay factors in [0.0, 1.0].  Both vecs must
696    /// have equal length in the range 1–8; validated by
697    /// [`FilterGraph::reverb_echo`](crate::FilterGraph::reverb_echo).
698    ReverbEcho {
699        /// Input gain (amplitude multiplier). Clamped to [0.0, 1.0].
700        in_gain: f32,
701        /// Output gain (amplitude multiplier). Clamped to [0.0, 1.0].
702        out_gain: f32,
703        /// Delay times in milliseconds (one per tap).
704        delays: Vec<f32>,
705        /// Decay factors per tap. Clamped to [0.0, 1.0].
706        decays: Vec<f32>,
707    },
708
709    /// Pitch shift without tempo change.
710    ///
711    /// Shifts audio pitch by `semitones` semitones without altering playback
712    /// duration.  Implemented as `asetrate` (changes the declared sample rate
713    /// to shift pitch) followed by `atempo` (restores the original duration).
714    ///
715    /// Range: [−12.0, 12.0]; validated by
716    /// [`FilterGraph::pitch_shift`](crate::FilterGraph::pitch_shift).
717    ///
718    /// This is a compound step — `filter_name()` returns `"asetrate"` for
719    /// `validate_filter_steps`; the actual graph construction is handled by
720    /// `filter_inner::build::build_audio_graph`.
721    PitchShift {
722        /// Pitch shift in semitones. Range: [−12.0, 12.0].
723        semitones: f32,
724    },
725
726    /// Time-stretch audio without changing pitch via `FFmpeg`'s `atempo` filter.
727    ///
728    /// `factor < 1.0` = slower (longer duration); `factor > 1.0` = faster
729    /// (shorter duration).  Range: [0.1, 10.0].  Values outside [0.5, 2.0]
730    /// are realised by chaining multiple `atempo` instances (each in [0.5, 2.0]).
731    ///
732    /// Validated by [`FilterGraph::time_stretch`](crate::FilterGraph::time_stretch).
733    TimeStretch {
734        /// Speed / duration factor. 0.5 = 2× longer; 2.0 = 2× shorter. Range: [0.1, 10.0].
735        factor: f32,
736    },
737
738    /// Simultaneously change audio speed and pitch by the same factor.
739    ///
740    /// Equivalent to playing a tape at a different speed: `factor > 1.0` makes
741    /// audio faster and higher; `factor < 1.0` makes it slower and lower.
742    ///
743    /// Uses `FFmpeg`'s `asetrate` to multiply the declared sample rate by
744    /// `factor` without resampling.  Range: [0.1, 10.0]; validated by
745    /// [`FilterGraph::speed_change`](crate::FilterGraph::speed_change).
746    SpeedChange {
747        /// Speed/pitch multiplier. Range: [0.1, 10.0].
748        factor: f64,
749    },
750
751    /// Spectral noise reduction using a statistical noise-type model.
752    ///
753    /// Uses `FFmpeg`'s `afftdn` filter.  `noise_type_flag` is the single-letter
754    /// `nt` parameter (`"w"` = white, `"p"` = pink, `"b"` = brown).
755    /// `nr_level` is the reduction amount in dB, clamped to [0.0, 97.0].
756    ///
757    /// Created by [`FilterGraph::noise_reduce`](crate::FilterGraph::noise_reduce).
758    NoiseReduce {
759        /// `afftdn` `nt` flag: `"w"`, `"p"`, or `"b"`.
760        noise_type_flag: String,
761        /// Noise reduction amount in dB. Clamped to [0.0, 97.0].
762        nr_level: f32,
763    },
764
765    /// Spectral noise reduction using a captured noise profile.
766    ///
767    /// Uses `FFmpeg`'s `afftdn` with the `pl` (profile length) option: the
768    /// filter learns the noise profile from the first `profile_duration_secs`
769    /// seconds, then subtracts it from the rest of the stream.
770    /// `nr_level` is the reduction amount in dB, clamped to [0.0, 97.0].
771    ///
772    /// Created by
773    /// [`FilterGraph::noise_reduce_profile`](crate::FilterGraph::noise_reduce_profile).
774    NoiseReduceProfile {
775        /// Duration in seconds from which to capture the noise profile. Minimum 0.1.
776        profile_duration_secs: f32,
777        /// Noise reduction amount in dB. Clamped to [0.0, 97.0].
778        nr_level: f32,
779    },
780
781    /// Sidechain compression for audio ducking via `FFmpeg`'s `sidechaincompress` filter.
782    ///
783    /// Reduces the background audio level when the foreground (sidechain) signal
784    /// exceeds the threshold.  Push background audio to slot 0 and foreground
785    /// audio to slot 1.
786    ///
787    /// `threshold_linear` is the trigger level as a linear amplitude (pre-converted
788    /// from dBFS by [`FilterGraph::duck`](crate::FilterGraph::duck)).
789    /// `ratio`, `attack_ms`, and `release_ms` are validated by
790    /// [`FilterGraph::duck`](crate::FilterGraph::duck).
791    Duck {
792        /// Compression threshold as a linear amplitude ratio in (0.0, 1.0].
793        threshold_linear: f32,
794        /// Compression ratio (e.g. 20.0 for near hard-limiting). Must be >= 1.0.
795        ratio: f32,
796        /// Attack time in milliseconds. Must be >= 0.0.
797        attack_ms: f32,
798        /// Release time in milliseconds. Must be >= 0.0.
799        release_ms: f32,
800    },
801
802    /// Apply a polygon alpha mask using `FFmpeg`'s `geq` filter with a
803    /// crossing-number point-in-polygon test.
804    ///
805    /// Pixels inside the polygon are fully opaque (`alpha=255`); pixels outside
806    /// are fully transparent (`alpha=0`).  When `invert` is `true` the roles
807    /// are swapped.
808    ///
809    /// - `vertices`: polygon corners as `(x, y)` in `[0.0, 1.0]` (normalised
810    ///   to frame size).  Minimum 3, maximum 16.
811    /// - `invert`: when `false`, inside = opaque; when `true`, outside = opaque.
812    ///
813    /// Vertex count and coordinates are validated in
814    /// [`build`](FilterGraphBuilder::build); out-of-range values return
815    /// [`crate::FilterError::InvalidConfig`].
816    ///
817    /// The `geq` expression is constructed from the vertex list at graph
818    /// build time.  Degenerate polygons (zero area) produce a fully-transparent
819    /// mask.  The output carries an alpha channel (`rgba`).
820    PolygonMatte {
821        /// Polygon corners in normalised `[0.0, 1.0]` frame coordinates.
822        vertices: Vec<(f32, f32)>,
823        /// When `true`, the mask is inverted: outside is opaque, inside is transparent.
824        invert: bool,
825    },
826}
827
828/// Convert a color temperature in Kelvin to linear RGB multipliers using
829/// Tanner Helland's algorithm.
830///
831/// Returns `(r, g, b)` each in `[0.0, 1.0]`.
832fn kelvin_to_rgb(temp_k: u32) -> (f64, f64, f64) {
833    let t = (f64::from(temp_k) / 100.0).clamp(10.0, 400.0);
834    let r = if t <= 66.0 {
835        1.0
836    } else {
837        (329.698_727_446_4 * (t - 60.0).powf(-0.133_204_759_2) / 255.0).clamp(0.0, 1.0)
838    };
839    let g = if t <= 66.0 {
840        ((99.470_802_586_1 * t.ln() - 161.119_568_166_1) / 255.0).clamp(0.0, 1.0)
841    } else {
842        ((288.122_169_528_3 * (t - 60.0).powf(-0.075_514_849_2)) / 255.0).clamp(0.0, 1.0)
843    };
844    let b = if t >= 66.0 {
845        1.0
846    } else if t <= 19.0 {
847        0.0
848    } else {
849        ((138.517_731_223_1 * (t - 10.0).ln() - 305.044_792_730_7) / 255.0).clamp(0.0, 1.0)
850    };
851    (r, g, b)
852}
853
854impl FilterStep {
855    /// Returns the libavfilter filter name for this step.
856    pub(crate) fn filter_name(&self) -> &'static str {
857        match self {
858            Self::Trim { .. } => "trim",
859            Self::Scale { .. } => "scale",
860            Self::Crop { .. } => "crop",
861            Self::Overlay { .. } => "overlay",
862            Self::FadeIn { .. }
863            | Self::FadeOut { .. }
864            | Self::FadeInWhite { .. }
865            | Self::FadeOutWhite { .. } => "fade",
866            Self::AFadeIn { .. } | Self::AFadeOut { .. } => "afade",
867            Self::Rotate { .. } => "rotate",
868            Self::ToneMap(_) => "tonemap",
869            Self::Volume(_) => "volume",
870            Self::Amix(_) => "amix",
871            // ParametricEq is a compound step; "equalizer" is used only by
872            // validate_filter_steps as a best-effort existence check.  The
873            // actual nodes are built by `filter_inner::add_parametric_eq_chain`.
874            Self::ParametricEq { .. } => "equalizer",
875            Self::Lut3d { .. } => "lut3d",
876            Self::Eq { .. } => "eq",
877            Self::EqAnimated { .. } => "eq",
878            Self::ColorBalanceAnimated { .. } => "colorbalance",
879            Self::Curves { .. } => "curves",
880            Self::WhiteBalance { .. } => "colorchannelmixer",
881            Self::Hue { .. } => "hue",
882            Self::Gamma { .. } => "eq",
883            Self::ThreeWayCC { .. } => "curves",
884            Self::Vignette { .. } => "vignette",
885            Self::HFlip => "hflip",
886            Self::VFlip => "vflip",
887            Self::Reverse => "reverse",
888            Self::AReverse => "areverse",
889            Self::Pad { .. } => "pad",
890            // FitToAspect is implemented as scale + pad; "scale" is validated at
891            // build time.  The pad filter is inserted by filter_inner at graph
892            // construction time.
893            Self::FitToAspect { .. } => "scale",
894            Self::GBlur { .. } => "gblur",
895            Self::Unsharp { .. } => "unsharp",
896            Self::Hqdn3d { .. } => "hqdn3d",
897            Self::Nlmeans { .. } => "nlmeans",
898            Self::Yadif { .. } => "yadif",
899            Self::XFade { .. } => "xfade",
900            Self::DrawText { .. } | Self::Ticker { .. } => "drawtext",
901            // "setpts" is checked at build-time; the audio path uses "atempo"
902            // which is verified at graph-construction time in filter_inner.
903            Self::Speed { .. } => "setpts",
904            Self::FreezeFrame { .. } => "loop",
905            Self::LoudnessNormalize { .. } => "ebur128",
906            Self::NormalizePeak { .. } => "astats",
907            Self::ANoiseGate { .. } => "agate",
908            Self::ACompressor { .. } => "acompressor",
909            Self::StereoToMono => "pan",
910            Self::ChannelMap { .. } => "channelmap",
911            // AudioDelay dispatches to adelay (positive) or atrim (negative) at
912            // build time; "adelay" is returned here for validate_filter_steps only.
913            Self::AudioDelay { .. } => "adelay",
914            Self::ConcatVideo { .. } | Self::ConcatAudio { .. } => "concat",
915            // JoinWithDissolve is a compound step (trim+setpts → xfade ← setpts+trim);
916            // "xfade" is used by validate_filter_steps as the primary filter check.
917            Self::JoinWithDissolve { .. } => "xfade",
918            Self::SubtitlesSrt { .. } => "subtitles",
919            Self::SubtitlesAss { .. } => "ass",
920            // OverlayImage is a compound step (movie → lut → overlay); "overlay"
921            // is used only by validate_filter_steps as a best-effort existence
922            // check.  The actual graph construction is handled by
923            // `filter_inner::build::add_overlay_image_step`.
924            Self::OverlayImage { .. } => "overlay",
925            // Blend is a compound step; "overlay" is used as the primary filter
926            // for validate_filter_steps.  Unimplemented modes are caught by
927            // build() before validate_filter_steps is reached.
928            Self::Blend { .. } => "overlay",
929            Self::ChromaKey { .. } => "chromakey",
930            Self::ColorKey { .. } => "colorkey",
931            Self::SpillSuppress { .. } => "hue",
932            // AlphaMatte is a compound step (matte pipeline → alphamerge);
933            // "alphamerge" is used by validate_filter_steps as the primary check.
934            Self::AlphaMatte { .. } => "alphamerge",
935            // LumaKey is a compound step when invert=true (lumakey + geq);
936            // "lumakey" is used here for validate_filter_steps.
937            Self::LumaKey { .. } => "lumakey",
938            // RectMask uses geq to set alpha per-pixel based on rectangle bounds.
939            Self::RectMask { .. } => "geq",
940            // FeatherMask is a compound step (split → alphaextract → gblur → alphamerge);
941            // "alphaextract" is used by validate_filter_steps as the primary check.
942            Self::FeatherMask { .. } => "alphaextract",
943            // PolygonMatte uses geq with a crossing-number point-in-polygon expression.
944            Self::PolygonMatte { .. } => "geq",
945            Self::CropAnimated { .. } => "crop",
946            Self::GBlurAnimated { .. } => "gblur",
947            Self::MotionBlur { .. } => "tblend",
948            Self::LensCorrection { .. } => "lenscorrection",
949            Self::FilmGrain { .. } => "noise",
950            Self::ScaleMultiplier { .. } => "scale",
951            Self::ChromaticAberration { .. } => "rgbashift",
952            // Glow is a compound step (split → curves → gblur → blend);
953            // "split" is used by validate_filter_steps as the primary check.
954            Self::Glow { .. } => "split",
955            // ReverbIr is a compound step (amovie[+adelay] → afir);
956            // "afir" is used by validate_filter_steps as the primary check.
957            Self::ReverbIr { .. } => "afir",
958            Self::ReverbEcho { .. } => "aecho",
959            // PitchShift is a compound step (asetrate → atempo);
960            // "asetrate" is used by validate_filter_steps as the primary check.
961            Self::PitchShift { .. } => "asetrate",
962            // TimeStretch uses one or more chained atempo filters.
963            Self::TimeStretch { .. } => "atempo",
964            // SpeedChange uses asetrate to shift speed and pitch together.
965            Self::SpeedChange { .. } => "asetrate",
966            Self::NoiseReduce { .. } | Self::NoiseReduceProfile { .. } => "afftdn",
967            // Duck is a two-input compound step; "sidechaincompress" is checked at
968            // build time by validate_filter_steps.
969            Self::Duck { .. } => "sidechaincompress",
970        }
971    }
972
973    /// Returns the `args` string passed to `avfilter_graph_create_filter`.
974    pub(crate) fn args(&self) -> String {
975        match self {
976            Self::Trim { start, end } => format!("start={start}:end={end}"),
977            Self::Scale {
978                width,
979                height,
980                algorithm,
981            } => format!("w={width}:h={height}:flags={}", algorithm.as_flags_str()),
982            Self::Crop {
983                x,
984                y,
985                width,
986                height,
987            } => {
988                format!("x={x}:y={y}:w={width}:h={height}")
989            }
990            Self::Overlay { x, y } => format!("x={x}:y={y}"),
991            Self::FadeIn { start, duration } => {
992                format!("type=in:start_time={start}:duration={duration}")
993            }
994            Self::FadeOut { start, duration } => {
995                format!("type=out:start_time={start}:duration={duration}")
996            }
997            Self::FadeInWhite { start, duration } => {
998                format!("type=in:start_time={start}:duration={duration}:color=white")
999            }
1000            Self::FadeOutWhite { start, duration } => {
1001                format!("type=out:start_time={start}:duration={duration}:color=white")
1002            }
1003            Self::AFadeIn { start, duration } => {
1004                format!("type=in:start_time={start}:duration={duration}")
1005            }
1006            Self::AFadeOut { start, duration } => {
1007                format!("type=out:start_time={start}:duration={duration}")
1008            }
1009            Self::Rotate {
1010                angle_degrees,
1011                fill_color,
1012            } => {
1013                format!(
1014                    "angle={}:fillcolor={fill_color}",
1015                    angle_degrees.to_radians()
1016                )
1017            }
1018            Self::ToneMap(algorithm) => format!("tonemap={}", algorithm.as_str()),
1019            Self::Volume(db) => format!("volume={db}dB"),
1020            Self::Amix(inputs) => format!("inputs={inputs}"),
1021            // args() for ParametricEq is not used by the build loop (which is
1022            // bypassed in favour of add_parametric_eq_chain); provided here for
1023            // completeness using the first band's args.
1024            Self::ParametricEq { bands } => bands.first().map(EqBand::args).unwrap_or_default(),
1025            Self::Lut3d { path } => format!("file={path}:interp=trilinear"),
1026            Self::Eq {
1027                brightness,
1028                contrast,
1029                saturation,
1030            } => format!("brightness={brightness}:contrast={contrast}:saturation={saturation}"),
1031            Self::EqAnimated {
1032                brightness,
1033                contrast,
1034                saturation,
1035                gamma,
1036            } => {
1037                let b = brightness.value_at(Duration::ZERO);
1038                let c = contrast.value_at(Duration::ZERO);
1039                let s = saturation.value_at(Duration::ZERO);
1040                let g = gamma.value_at(Duration::ZERO);
1041                format!("brightness={b}:contrast={c}:saturation={s}:gamma={g}")
1042            }
1043            Self::ColorBalanceAnimated { lift, gamma, gain } => {
1044                let (rl, gl, bl) = lift.value_at(Duration::ZERO);
1045                let (rm, gm, bm) = gamma.value_at(Duration::ZERO);
1046                let (rh, gh, bh) = gain.value_at(Duration::ZERO);
1047                format!("rs={rl}:gs={gl}:bs={bl}:rm={rm}:gm={gm}:bm={bm}:rh={rh}:gh={gh}:bh={bh}")
1048            }
1049            Self::Curves { master, r, g, b } => {
1050                let fmt = |pts: &[(f32, f32)]| -> String {
1051                    pts.iter()
1052                        .map(|(x, y)| format!("{x}/{y}"))
1053                        .collect::<Vec<_>>()
1054                        .join(" ")
1055                };
1056                [("master", master.as_slice()), ("r", r), ("g", g), ("b", b)]
1057                    .iter()
1058                    .filter(|(_, pts)| !pts.is_empty())
1059                    .map(|(name, pts)| format!("{name}='{}'", fmt(pts)))
1060                    .collect::<Vec<_>>()
1061                    .join(":")
1062            }
1063            Self::WhiteBalance {
1064                temperature_k,
1065                tint,
1066            } => {
1067                let (r, g, b) = kelvin_to_rgb(*temperature_k);
1068                let g_adj = (g + f64::from(*tint)).clamp(0.0, 2.0);
1069                format!("rr={r}:gg={g_adj}:bb={b}")
1070            }
1071            Self::Hue { degrees } => format!("h={degrees}"),
1072            Self::Gamma { r, g, b } => format!("gamma_r={r}:gamma_g={g}:gamma_b={b}"),
1073            Self::Vignette { angle, x0, y0 } => {
1074                let cx = if *x0 == 0.0 {
1075                    "w/2".to_string()
1076                } else {
1077                    x0.to_string()
1078                };
1079                let cy = if *y0 == 0.0 {
1080                    "h/2".to_string()
1081                } else {
1082                    y0.to_string()
1083                };
1084                format!("angle={angle}:x0={cx}:y0={cy}")
1085            }
1086            Self::ThreeWayCC { lift, gamma, gain } => {
1087                // Convert lift/gamma/gain to a 3-point per-channel curves representation.
1088                // The formula maps:
1089                //   input 0.0 → (lift - 1.0) * gain  (black point)
1090                //   input 0.5 → (0.5 * lift)^(1/gamma) * gain  (midtone)
1091                //   input 1.0 → gain  (white point)
1092                // All neutral (1.0) produces the identity curve 0/0 0.5/0.5 1/1.
1093                let curve = |l: f32, gm: f32, gn: f32| -> String {
1094                    let l = f64::from(l);
1095                    let gm = f64::from(gm);
1096                    let gn = f64::from(gn);
1097                    let black = ((l - 1.0) * gn).clamp(0.0, 1.0);
1098                    let mid = ((0.5 * l).powf(1.0 / gm) * gn).clamp(0.0, 1.0);
1099                    let white = gn.clamp(0.0, 1.0);
1100                    format!("0/{black} 0.5/{mid} 1/{white}")
1101                };
1102                format!(
1103                    "r='{}':g='{}':b='{}'",
1104                    curve(lift.r, gamma.r, gain.r),
1105                    curve(lift.g, gamma.g, gain.g),
1106                    curve(lift.b, gamma.b, gain.b),
1107                )
1108            }
1109            Self::HFlip | Self::VFlip | Self::Reverse | Self::AReverse => String::new(),
1110            Self::GBlur { sigma } => format!("sigma={sigma}"),
1111            Self::Unsharp {
1112                luma_strength,
1113                chroma_strength,
1114            } => format!(
1115                "luma_msize_x=5:luma_msize_y=5:luma_amount={luma_strength}:\
1116                 chroma_msize_x=5:chroma_msize_y=5:chroma_amount={chroma_strength}"
1117            ),
1118            Self::Hqdn3d {
1119                luma_spatial,
1120                chroma_spatial,
1121                luma_tmp,
1122                chroma_tmp,
1123            } => format!("{luma_spatial}:{chroma_spatial}:{luma_tmp}:{chroma_tmp}"),
1124            Self::Nlmeans { strength } => format!("s={strength}"),
1125            Self::Yadif { mode } => format!("mode={}", *mode as i32),
1126            Self::XFade {
1127                transition,
1128                duration,
1129                offset,
1130            } => {
1131                let t = transition.as_str();
1132                format!("transition={t}:duration={duration}:offset={offset}")
1133            }
1134            Self::DrawText { opts } => {
1135                // Escape special characters recognised by the drawtext filter.
1136                let escaped = opts
1137                    .text
1138                    .replace('\\', "\\\\")
1139                    .replace(':', "\\:")
1140                    .replace('\'', "\\'");
1141                let mut parts = vec![
1142                    format!("text='{escaped}'"),
1143                    format!("x={}", opts.x),
1144                    format!("y={}", opts.y),
1145                    format!("fontsize={}", opts.font_size),
1146                    format!("fontcolor={}@{:.2}", opts.font_color, opts.opacity),
1147                ];
1148                if let Some(ref ff) = opts.font_file {
1149                    parts.push(format!("fontfile={ff}"));
1150                }
1151                if let Some(ref bc) = opts.box_color {
1152                    parts.push("box=1".to_string());
1153                    parts.push(format!("boxcolor={bc}"));
1154                    parts.push(format!("boxborderw={}", opts.box_border_width));
1155                }
1156                parts.join(":")
1157            }
1158            Self::Ticker {
1159                text,
1160                y,
1161                speed_px_per_sec,
1162                font_size,
1163                font_color,
1164            } => {
1165                // Use the same escaping as DrawText.
1166                let escaped = text
1167                    .replace('\\', "\\\\")
1168                    .replace(':', "\\:")
1169                    .replace('\'', "\\'");
1170                // x = w - t * speed: at t=0 the text starts fully off the right
1171                // edge (x = w) and scrolls left by `speed` pixels per second.
1172                format!(
1173                    "text='{escaped}':x=w-t*{speed_px_per_sec}:y={y}:\
1174                     fontsize={font_size}:fontcolor={font_color}"
1175                )
1176            }
1177            // Video path: divide PTS by factor to change playback speed.
1178            // Audio path args are built by filter_inner (chained atempo).
1179            Self::Speed { factor } => format!("PTS/{factor}"),
1180            // args() is not used by the build loop for LoudnessNormalize (two-pass
1181            // is handled entirely in filter_inner); provided here for completeness.
1182            Self::LoudnessNormalize { .. } => "peak=true:metadata=1".to_string(),
1183            // args() is not used by the build loop for NormalizePeak (two-pass
1184            // is handled entirely in filter_inner); provided here for completeness.
1185            Self::NormalizePeak { .. } => "metadata=1".to_string(),
1186            Self::FreezeFrame { pts, duration } => {
1187                // The `loop` filter needs a frame index and a loop count, not PTS or
1188                // wall-clock duration.  We approximate both using 25 fps; accuracy
1189                // depends on the source stream's actual frame rate.
1190                #[allow(clippy::cast_possible_truncation)]
1191                let start = (*pts * 25.0) as i64;
1192                #[allow(clippy::cast_possible_truncation)]
1193                let loop_count = (*duration * 25.0) as i64;
1194                format!("loop={loop_count}:size=1:start={start}")
1195            }
1196            Self::SubtitlesSrt { path } | Self::SubtitlesAss { path } => {
1197                format!("filename={path}")
1198            }
1199            // args() for OverlayImage returns the overlay positional args (x:y).
1200            // These are not consumed by add_and_link_step (which is bypassed for
1201            // this compound step); they exist here only for completeness.
1202            Self::OverlayImage { x, y, .. } => format!("{x}:{y}"),
1203            // args() for Blend is not consumed by add_and_link_step (which is
1204            // bypassed in favour of add_blend_normal_step).  Provided for
1205            // completeness using the Normal-mode overlay args.
1206            Self::Blend { .. } => "format=auto:shortest=1".to_string(),
1207            Self::ChromaKey {
1208                color,
1209                similarity,
1210                blend,
1211            } => format!("color={color}:similarity={similarity}:blend={blend}"),
1212            Self::ColorKey {
1213                color,
1214                similarity,
1215                blend,
1216            } => format!("color={color}:similarity={similarity}:blend={blend}"),
1217            Self::SpillSuppress { strength, .. } => format!("h=0:s={}", 1.0 - strength),
1218            // args() is not consumed by add_and_link_step (which is bypassed for
1219            // this compound step); provided here for completeness.
1220            Self::AlphaMatte { .. } => String::new(),
1221            Self::LumaKey {
1222                threshold,
1223                tolerance,
1224                softness,
1225                ..
1226            } => format!("threshold={threshold}:tolerance={tolerance}:softness={softness}"),
1227            // args() is not consumed by add_and_link_step (which is bypassed for
1228            // this compound step); provided here for completeness.
1229            Self::FeatherMask { .. } => String::new(),
1230            Self::RectMask {
1231                x,
1232                y,
1233                width,
1234                height,
1235                invert,
1236            } => {
1237                let xw = x + width - 1;
1238                let yh = y + height - 1;
1239                let (inside, outside) = if *invert { (0, 255) } else { (255, 0) };
1240                format!(
1241                    "r='r(X,Y)':g='g(X,Y)':b='b(X,Y)':\
1242                     a='if(between(X,{x},{xw})*between(Y,{y},{yh}),{inside},{outside})'"
1243                )
1244            }
1245            Self::PolygonMatte { vertices, invert } => {
1246                // Build a crossing-number point-in-polygon expression.
1247                // For each edge (ax,ay)→(bx,by), a horizontal ray from (X,Y) going
1248                // right crosses the edge when Y is in [min(ay,by), max(ay,by)) and
1249                // the intersection x > X.  Exact horizontal edges (dy==0) are skipped.
1250                let n = vertices.len();
1251                let mut edge_exprs = Vec::new();
1252                for i in 0..n {
1253                    let (ax, ay) = vertices[i];
1254                    let (bx, by) = vertices[(i + 1) % n];
1255                    let dy = by - ay;
1256                    if dy == 0.0 {
1257                        // Horizontal edge — never crosses a horizontal ray; skip.
1258                        continue;
1259                    }
1260                    let min_y = ay.min(by);
1261                    let max_y = ay.max(by);
1262                    let dx = bx - ax;
1263                    // x_intersect = ax*iw + (Y - ay*ih) * dx*iw / (dy*ih)
1264                    edge_exprs.push(format!(
1265                        "if(gte(Y,{min_y}*ih)*lt(Y,{max_y}*ih)*gt({ax}*iw+(Y-{ay}*ih)*{dx}*iw/({dy}*ih),X),1,0)"
1266                    ));
1267                }
1268                let sum = if edge_exprs.is_empty() {
1269                    "0".to_string()
1270                } else {
1271                    edge_exprs.join("+")
1272                };
1273                let (inside, outside) = if *invert { (0, 255) } else { (255, 0) };
1274                format!(
1275                    "r='r(X,Y)':g='g(X,Y)':b='b(X,Y)':\
1276                     a='if(gt(mod({sum},2),0),{inside},{outside})'"
1277                )
1278            }
1279            Self::FitToAspect { width, height, .. } => {
1280                // Scale to fit within the target dimensions, preserving the source
1281                // aspect ratio.  The accompanying pad filter (inserted by
1282                // filter_inner after this scale filter) centres the result on the
1283                // target canvas.
1284                format!("w={width}:h={height}:force_original_aspect_ratio=decrease")
1285            }
1286            Self::Pad {
1287                width,
1288                height,
1289                x,
1290                y,
1291                color,
1292            } => {
1293                let px = if *x < 0 {
1294                    "(ow-iw)/2".to_string()
1295                } else {
1296                    x.to_string()
1297                };
1298                let py = if *y < 0 {
1299                    "(oh-ih)/2".to_string()
1300                } else {
1301                    y.to_string()
1302                };
1303                format!("width={width}:height={height}:x={px}:y={py}:color={color}")
1304            }
1305            Self::ANoiseGate {
1306                threshold_db,
1307                attack_ms,
1308                release_ms,
1309            } => {
1310                // `agate` expects threshold as a linear amplitude ratio (0.0–1.0).
1311                let threshold_linear = 10f32.powf(threshold_db / 20.0);
1312                format!("threshold={threshold_linear:.6}:attack={attack_ms}:release={release_ms}")
1313            }
1314            Self::ACompressor {
1315                threshold_db,
1316                ratio,
1317                attack_ms,
1318                release_ms,
1319                makeup_db,
1320            } => {
1321                format!(
1322                    "threshold={threshold_db}dB:ratio={ratio}:attack={attack_ms}:\
1323                     release={release_ms}:makeup={makeup_db}dB"
1324                )
1325            }
1326            Self::StereoToMono => "mono|c0=0.5*c0+0.5*c1".to_string(),
1327            Self::ChannelMap { mapping } => format!("map={mapping}"),
1328            // args() is not used directly for AudioDelay — the audio build loop
1329            // dispatches to add_raw_filter_step with the correct filter name and
1330            // args based on the sign of ms.  These are provided for completeness.
1331            Self::AudioDelay { ms } => {
1332                if *ms >= 0.0 {
1333                    format!("delays={ms}:all=1")
1334                } else {
1335                    format!("start={}", -ms / 1000.0)
1336                }
1337            }
1338            Self::ConcatVideo { n } => format!("n={n}:v=1:a=0"),
1339            Self::ConcatAudio { n } => format!("n={n}:v=0:a=1"),
1340            // args() for JoinWithDissolve is not used by the build loop (which is
1341            // bypassed in favour of add_join_with_dissolve_step); provided here for
1342            // completeness using the xfade args.
1343            Self::JoinWithDissolve {
1344                clip_a_end,
1345                dissolve_dur,
1346                ..
1347            } => format!("transition=dissolve:duration={dissolve_dur}:offset={clip_a_end}"),
1348            Self::CropAnimated {
1349                x,
1350                y,
1351                width,
1352                height,
1353            } => {
1354                let x0 = x.value_at(Duration::ZERO);
1355                let y0 = y.value_at(Duration::ZERO);
1356                let w0 = width.value_at(Duration::ZERO);
1357                let h0 = height.value_at(Duration::ZERO);
1358                format!("x={x0}:y={y0}:w={w0}:h={h0}")
1359            }
1360            Self::GBlurAnimated { sigma } => {
1361                let s0 = sigma.value_at(Duration::ZERO);
1362                format!("sigma={s0}")
1363            }
1364            Self::MotionBlur {
1365                shutter_angle_degrees,
1366                ..
1367            } => {
1368                let alpha = f64::from(*shutter_angle_degrees / 360.0).clamp(0.0, 1.0);
1369                let keep = 1.0 - alpha;
1370                let blend = alpha;
1371                format!("all_expr='A*{keep}+B*{blend}'")
1372            }
1373            Self::LensCorrection { k1, k2 } => format!("k1={k1}:k2={k2}"),
1374            Self::FilmGrain {
1375                luma_strength,
1376                chroma_strength,
1377            } => {
1378                #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
1379                let ls = luma_strength.clamp(0.0, 100.0) as u32;
1380                #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
1381                let cs = chroma_strength.clamp(0.0, 100.0) as u32;
1382                format!("alls={ls}:c0s={cs}:c1s={cs}:allf=t")
1383            }
1384            Self::ScaleMultiplier { factor } => {
1385                format!("w=iw*{factor}:h=ih*{factor}")
1386            }
1387            Self::ChromaticAberration { rh, bh } => {
1388                format!("rh={rh}:bh={bh}:edge=smear")
1389            }
1390            // args() is not consumed by add_and_link_step (which is bypassed for
1391            // this compound step); provided here for completeness.
1392            Self::Glow {
1393                threshold,
1394                radius,
1395                intensity,
1396            } => {
1397                let t = threshold.clamp(0.0, 1.0);
1398                let r = radius.clamp(0.5, 50.0);
1399                let iv = intensity.clamp(0.0, 2.0);
1400                let hi_lo = format!("0/0 {t}/0 1/1");
1401                format!(
1402                    "split=2[base][hl];[hl]curves=all='{hi_lo}'[glow_src];\
1403                     [glow_src]gblur=sigma={r}[glow];\
1404                     [base][glow]blend=all_mode=addition:all_opacity={iv}"
1405                )
1406            }
1407            Self::ReverbEcho {
1408                in_gain,
1409                out_gain,
1410                delays,
1411                decays,
1412            } => {
1413                let delay_str = delays
1414                    .iter()
1415                    .map(|d| d.to_string())
1416                    .collect::<Vec<_>>()
1417                    .join("|");
1418                let decay_str = decays
1419                    .iter()
1420                    .map(|d| d.to_string())
1421                    .collect::<Vec<_>>()
1422                    .join("|");
1423                format!(
1424                    "in_gain={ig}:out_gain={og}:delays={ds}:decays={dec}",
1425                    ig = in_gain,
1426                    og = out_gain,
1427                    ds = delay_str,
1428                    dec = decay_str,
1429                )
1430            }
1431            // args() is not consumed by add_and_link_step (which is bypassed for
1432            // this compound step); provided here for completeness.
1433            Self::ReverbIr {
1434                ir_path,
1435                wet,
1436                dry,
1437                pre_delay_ms,
1438            } => {
1439                let delay = pre_delay_ms.min(&500);
1440                let delay_part = if *delay > 0 {
1441                    format!(",adelay={delay}:all=1")
1442                } else {
1443                    String::new()
1444                };
1445                format!("amovie={ir_path}{delay_part}[ir];[0:a][ir]afir=dry={dry}:wet={wet}")
1446            }
1447            // args() is not consumed by add_and_link_step (which is bypassed for
1448            // this compound step); provided here for completeness.
1449            Self::PitchShift { semitones } => {
1450                let rate = 2f64.powf(f64::from(*semitones) / 12.0);
1451                let atempo = 1.0 / rate;
1452                format!("asetrate=sr*{rate:.6},atempo={atempo:.6}")
1453            }
1454            // args() is not consumed by add_and_link_step (bypassed in favour of
1455            // add_atempo_chain); provided here for single-instance completeness.
1456            Self::TimeStretch { factor } => format!("{factor:.6}"),
1457            // args() is not consumed by add_and_link_step (bypassed; sample rate
1458            // is resolved from buffersrc_args at build time); provided for completeness.
1459            Self::SpeedChange { factor } => format!("asetrate=sr*{factor:.6}"),
1460            Self::NoiseReduce {
1461                noise_type_flag,
1462                nr_level,
1463            } => format!("nt={noise_type_flag}:nr={nr_level}"),
1464            Self::NoiseReduceProfile {
1465                profile_duration_secs,
1466                nr_level,
1467            } => format!("nr={nr_level}:nf=-25:nt=w:pl={profile_duration_secs}"),
1468            // args() is not consumed by add_and_link_step (bypassed for this
1469            // compound two-input step); provided for completeness.
1470            Self::Duck {
1471                threshold_linear,
1472                ratio,
1473                attack_ms,
1474                release_ms,
1475            } => format!(
1476                "threshold={threshold_linear}:ratio={ratio}:attack={attack_ms}:release={release_ms}"
1477            ),
1478        }
1479    }
1480}