Skip to main content

flodl_cli/
init.rs

1//! `fdl init <name>` -- scaffold a new floDl project.
2//!
3//! Two modes:
4//! - Default: mounted libtorch (same model as the main flodl repo)
5//! - `--docker`: standalone Docker scaffold (libtorch baked into images)
6
7use std::fs;
8use std::path::Path;
9use std::process::Command;
10
11pub fn run(name: Option<&str>, docker: bool) -> Result<(), String> {
12    let name = name.ok_or("usage: fdl init <project-name>")?;
13    validate_name(name)?;
14
15    if Path::new(name).exists() {
16        return Err(format!("'{}' already exists", name));
17    }
18
19    let crate_name = name.replace('-', "_");
20    let flodl_dep = resolve_flodl_dep();
21
22    fs::create_dir_all(format!("{}/src", name))
23        .map_err(|e| format!("cannot create directory: {}", e))?;
24
25    if docker {
26        scaffold_docker(name, &crate_name, &flodl_dep)?;
27    } else {
28        scaffold_mounted(name, &crate_name, &flodl_dep)?;
29    }
30
31    // Shared files
32    write_file(
33        &format!("{}/src/main.rs", name),
34        &main_rs_template(),
35    )?;
36    write_file(
37        &format!("{}/.gitignore", name),
38        &gitignore_template(docker),
39    )?;
40
41    println!();
42    println!("Project '{}' created. Next steps:", name);
43    println!();
44    println!("  cd {}", name);
45    if docker {
46        println!("  make build    # first build (downloads libtorch, ~5 min)");
47        println!("  make test     # run tests");
48        println!("  make run      # train the model");
49    } else {
50        println!("  ./fdl setup   # detect hardware + download libtorch");
51        println!("  make test     # run tests");
52        println!("  make run      # train the model");
53    }
54    println!("  make shell    # interactive shell");
55    println!();
56    println!("Edit src/main.rs to build your model.");
57    println!();
58    println!("Guides:");
59    println!("  Tutorials:         https://flodl.dev/guide/tensors");
60    println!("  Graph Tree:        https://flodl.dev/guide/graph-tree");
61    println!("  PyTorch migration: https://flodl.dev/guide/migration");
62    println!("  Troubleshooting:   https://flodl.dev/guide/troubleshooting");
63
64    Ok(())
65}
66
67fn validate_name(name: &str) -> Result<(), String> {
68    if name.is_empty() {
69        return Err("project name cannot be empty".into());
70    }
71    if !name.chars().all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_') {
72        return Err("project name must contain only letters, digits, hyphens, underscores".into());
73    }
74    Ok(())
75}
76
77fn resolve_flodl_dep() -> String {
78    // Try crates.io for the latest version
79    if let Some(version) = crates_io_version() {
80        format!("flodl = \"{}\"", version)
81    } else {
82        "flodl = { git = \"https://github.com/fab2s/floDl.git\" }".into()
83    }
84}
85
86fn crates_io_version() -> Option<String> {
87    let output = Command::new("curl")
88        .args(["-sL", "https://crates.io/api/v1/crates/flodl"])
89        .output()
90        .ok()?;
91    let body = String::from_utf8_lossy(&output.stdout);
92    // Extract "max_stable_version":"X.Y.Z"
93    let marker = "\"max_stable_version\":\"";
94    let start = body.find(marker)? + marker.len();
95    let end = start + body[start..].find('"')?;
96    let version = &body[start..end];
97    if version.is_empty() { None } else { Some(version.to_string()) }
98}
99
100// ---------------------------------------------------------------------------
101// Docker scaffold (standalone, libtorch baked into images)
102// ---------------------------------------------------------------------------
103
104fn scaffold_docker(name: &str, crate_name: &str, flodl_dep: &str) -> Result<(), String> {
105    write_file(
106        &format!("{}/Cargo.toml", name),
107        &cargo_toml_template(crate_name, flodl_dep),
108    )?;
109    write_file(
110        &format!("{}/Dockerfile.cpu", name),
111        DOCKERFILE_CPU,
112    )?;
113    write_file(
114        &format!("{}/Dockerfile.cuda", name),
115        DOCKERFILE_CUDA,
116    )?;
117    write_file(
118        &format!("{}/docker-compose.yml", name),
119        &docker_compose_template(crate_name, true),
120    )?;
121    write_file(
122        &format!("{}/Makefile", name),
123        MAKEFILE_DOCKER,
124    )?;
125    Ok(())
126}
127
128// ---------------------------------------------------------------------------
129// Mounted scaffold (libtorch from host, like the main repo)
130// ---------------------------------------------------------------------------
131
132fn scaffold_mounted(name: &str, crate_name: &str, flodl_dep: &str) -> Result<(), String> {
133    write_file(
134        &format!("{}/Cargo.toml", name),
135        &cargo_toml_template(crate_name, flodl_dep),
136    )?;
137    write_file(
138        &format!("{}/Dockerfile", name),
139        DOCKERFILE_MOUNTED,
140    )?;
141    write_file(
142        &format!("{}/Dockerfile.cuda", name),
143        DOCKERFILE_CUDA_MOUNTED,
144    )?;
145    write_file(
146        &format!("{}/docker-compose.yml", name),
147        &docker_compose_template(crate_name, false),
148    )?;
149    write_file(
150        &format!("{}/Makefile", name),
151        MAKEFILE_MOUNTED,
152    )?;
153
154    // Copy fdl bootstrap into the project for self-contained setup
155    let fdl_script = include_str!("../assets/fdl");
156    write_file(&format!("{}/fdl", name), fdl_script)?;
157    #[cfg(unix)]
158    {
159        use std::os::unix::fs::PermissionsExt;
160        let _ = fs::set_permissions(
161            format!("{}/fdl", name),
162            fs::Permissions::from_mode(0o755),
163        );
164    }
165
166    Ok(())
167}
168
169// ---------------------------------------------------------------------------
170// Templates
171// ---------------------------------------------------------------------------
172
173fn cargo_toml_template(crate_name: &str, flodl_dep: &str) -> String {
174    format!(
175        r#"[package]
176name = "{crate_name}"
177version = "0.1.0"
178edition = "2024"
179
180[dependencies]
181{flodl_dep}
182
183# Optimize floDl in dev builds -- your code stays fast to compile.
184# After the first build, only your graph code recompiles (~2s).
185[profile.dev.package.flodl]
186opt-level = 3
187
188[profile.dev.package.flodl-sys]
189opt-level = 3
190
191# Release: cross-crate optimization for maximum throughput.
192[profile.release]
193lto = "thin"
194codegen-units = 1
195"#
196    )
197}
198
199fn main_rs_template() -> String {
200    r#"//! floDl training template.
201//!
202//! This is a starting point for your model. Edit the architecture,
203//! data loading, and training loop to fit your task.
204//!
205//! New to Rust? Read: https://flodl.dev/guide/rust-primer
206//! Stuck?       Read: https://flodl.dev/guide/troubleshooting
207
208use flodl::*;
209use flodl::monitor::Monitor;
210
211fn main() -> Result<()> {
212    // --- Model ---
213    let model = FlowBuilder::from(Linear::new(4, 32)?)
214        .through(GELU)
215        .through(LayerNorm::new(32)?)
216        .also(Linear::new(32, 32)?)       // residual connection
217        .through(Linear::new(32, 1)?)
218        .build()?;
219
220    // --- Optimizer ---
221    let params = model.parameters();
222    let mut optimizer = Adam::new(&params, 0.001);
223    let scheduler = CosineScheduler::new(0.001, 1e-6, 100);
224    model.train();
225
226    // --- Data ---
227    // Replace this with your data loading.
228    let opts = TensorOptions::default();
229    let batches: Vec<(Tensor, Tensor)> = (0..32)
230        .map(|_| {
231            let x = Tensor::randn(&[16, 4], opts).unwrap();
232            let y = Tensor::randn(&[16, 1], opts).unwrap();
233            (x, y)
234        })
235        .collect();
236
237    // --- Training loop ---
238    let num_epochs = 100usize;
239    let mut monitor = Monitor::new(num_epochs);
240    // monitor.serve(3000)?;              // uncomment for live dashboard
241    // monitor.watch(&model);             // uncomment to show graph SVG
242    // monitor.save_html("report.html");  // uncomment to save HTML report
243
244    for epoch in 0..num_epochs {
245        let t = std::time::Instant::now();
246        let mut epoch_loss = 0.0;
247
248        for (input_t, target_t) in &batches {
249            let input = Variable::new(input_t.clone(), true);
250            let target = Variable::new(target_t.clone(), false);
251
252            optimizer.zero_grad();
253            let pred = model.forward(&input)?;
254            let loss = mse_loss(&pred, &target)?;
255            loss.backward()?;
256            clip_grad_norm(&params, 1.0)?;
257            optimizer.step()?;
258
259            epoch_loss += loss.item()?;
260        }
261
262        let avg_loss = epoch_loss / batches.len() as f64;
263        let lr = scheduler.lr(epoch);
264        optimizer.set_lr(lr);
265        monitor.log(epoch, t.elapsed(), &[("loss", avg_loss), ("lr", lr)]);
266    }
267
268    monitor.finish();
269    Ok(())
270}
271"#
272    .into()
273}
274
275fn gitignore_template(docker: bool) -> String {
276    let mut s = String::from(
277        "/target
278*.fdl
279*.log
280*.csv
281*.html
282",
283    );
284    if docker {
285        s.push_str(
286            ".cargo-cache/
287.cargo-git/
288.cargo-cache-cuda/
289.cargo-git-cuda/
290",
291        );
292    } else {
293        s.push_str(
294            ".cargo-cache/
295.cargo-git/
296.cargo-cache-cuda/
297.cargo-git-cuda/
298libtorch/
299",
300        );
301    }
302    s
303}
304
305fn docker_compose_template(crate_name: &str, baked: bool) -> String {
306    if baked {
307        format!(
308            r#"services:
309  dev:
310    build:
311      context: .
312      dockerfile: Dockerfile.cpu
313    image: {crate_name}-dev
314    user: "${{UID:-1000}}:${{GID:-1000}}"
315    volumes:
316      - .:/workspace
317      - ./.cargo-cache:/usr/local/cargo/registry
318      - ./.cargo-git:/usr/local/cargo/git
319    working_dir: /workspace
320    stdin_open: true
321    tty: true
322
323  cuda:
324    build:
325      context: .
326      dockerfile: Dockerfile.cuda
327    image: {crate_name}-cuda
328    user: "${{UID:-1000}}:${{GID:-1000}}"
329    volumes:
330      - .:/workspace
331      - ./.cargo-cache-cuda:/usr/local/cargo/registry
332      - ./.cargo-git-cuda:/usr/local/cargo/git
333    working_dir: /workspace
334    stdin_open: true
335    tty: true
336    deploy:
337      resources:
338        reservations:
339          devices:
340            - driver: nvidia
341              count: all
342              capabilities: [gpu]
343"#
344        )
345    } else {
346        format!(
347            r#"services:
348  dev:
349    build:
350      context: .
351      dockerfile: Dockerfile
352    image: {crate_name}-dev
353    user: "${{UID:-1000}}:${{GID:-1000}}"
354    volumes:
355      - .:/workspace
356      - ./.cargo-cache:/usr/local/cargo/registry
357      - ./.cargo-git:/usr/local/cargo/git
358      - ${{LIBTORCH_CPU_PATH:-./libtorch/precompiled/cpu}}:/usr/local/libtorch:ro
359    working_dir: /workspace
360    stdin_open: true
361    tty: true
362
363  cuda:
364    build:
365      context: .
366      dockerfile: Dockerfile.cuda
367      args:
368        CUDA_VERSION: ${{CUDA_VERSION:-12.8.0}}
369    image: {crate_name}-cuda:${{CUDA_TAG:-12.8}}
370    user: "${{UID:-1000}}:${{GID:-1000}}"
371    volumes:
372      - .:/workspace
373      - ./.cargo-cache-cuda:/usr/local/cargo/registry
374      - ./.cargo-git-cuda:/usr/local/cargo/git
375      - ${{LIBTORCH_HOST_PATH:-./libtorch/precompiled/cu128}}:/usr/local/libtorch:ro
376    working_dir: /workspace
377    stdin_open: true
378    tty: true
379    deploy:
380      resources:
381        reservations:
382          devices:
383            - driver: nvidia
384              count: all
385              capabilities: [gpu]
386"#
387        )
388    }
389}
390
391// ---------------------------------------------------------------------------
392// Dockerfile templates
393// ---------------------------------------------------------------------------
394
395// Docker mode: libtorch baked into images
396const DOCKERFILE_CPU: &str = r#"# CPU-only dev image for floDl projects.
397FROM ubuntu:24.04
398
399ENV DEBIAN_FRONTEND=noninteractive
400
401RUN apt-get update && apt-get install -y --no-install-recommends \
402    wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
403    && rm -rf /var/lib/apt/lists/*
404
405# Rust
406ENV CARGO_HOME="/usr/local/cargo"
407ENV RUSTUP_HOME="/usr/local/rustup"
408RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
409    && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
410ENV PATH="${CARGO_HOME}/bin:${PATH}"
411
412# libtorch (CPU-only, ~200MB)
413ARG LIBTORCH_VERSION=2.10.0
414RUN wget -q https://download.pytorch.org/libtorch/cpu/libtorch-shared-with-deps-${LIBTORCH_VERSION}%2Bcpu.zip \
415    && unzip -q libtorch-shared-with-deps-${LIBTORCH_VERSION}+cpu.zip -d /usr/local \
416    && rm libtorch-shared-with-deps-${LIBTORCH_VERSION}+cpu.zip
417
418ENV LIBTORCH_PATH="/usr/local/libtorch"
419ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib"
420ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib"
421
422WORKDIR /workspace
423"#;
424
425const DOCKERFILE_CUDA: &str = r#"# CUDA dev image for floDl projects.
426# Requires: docker run --gpus all ...
427FROM nvidia/cuda:12.8.0-devel-ubuntu24.04
428
429ENV DEBIAN_FRONTEND=noninteractive
430
431RUN apt-get update && apt-get install -y --no-install-recommends \
432    wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
433    && rm -rf /var/lib/apt/lists/*
434
435# Rust
436ENV CARGO_HOME="/usr/local/cargo"
437ENV RUSTUP_HOME="/usr/local/rustup"
438RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
439    && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
440ENV PATH="${CARGO_HOME}/bin:${PATH}"
441
442# libtorch (CUDA 12.8)
443ARG LIBTORCH_VERSION=2.10.0
444RUN wget -q "https://download.pytorch.org/libtorch/cu128/libtorch-shared-with-deps-${LIBTORCH_VERSION}%2Bcu128.zip" \
445    && unzip -q "libtorch-shared-with-deps-${LIBTORCH_VERSION}+cu128.zip" -d /usr/local \
446    && rm "libtorch-shared-with-deps-${LIBTORCH_VERSION}+cu128.zip"
447
448ENV LIBTORCH_PATH="/usr/local/libtorch"
449ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
450ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
451ENV CUDA_HOME="/usr/local/cuda"
452
453WORKDIR /workspace
454"#;
455
456// Mounted mode: libtorch provided at runtime via volume mount
457const DOCKERFILE_MOUNTED: &str = r#"# CPU dev image for floDl projects (libtorch mounted at runtime).
458FROM ubuntu:24.04
459
460ENV DEBIAN_FRONTEND=noninteractive
461
462RUN apt-get update && apt-get install -y --no-install-recommends \
463    wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
464    && rm -rf /var/lib/apt/lists/*
465
466# Rust
467ENV CARGO_HOME="/usr/local/cargo"
468ENV RUSTUP_HOME="/usr/local/rustup"
469RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
470    && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
471ENV PATH="${CARGO_HOME}/bin:${PATH}"
472
473ENV LIBTORCH_PATH="/usr/local/libtorch"
474ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib"
475ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib"
476
477WORKDIR /workspace
478"#;
479
480const DOCKERFILE_CUDA_MOUNTED: &str = r#"# CUDA dev image for floDl projects (libtorch mounted at runtime).
481# Requires: docker run --gpus all ...
482ARG CUDA_VERSION=12.8.0
483FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu24.04
484
485ENV DEBIAN_FRONTEND=noninteractive
486
487RUN apt-get update && apt-get install -y --no-install-recommends \
488    wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
489    && rm -rf /var/lib/apt/lists/*
490
491# Rust
492ENV CARGO_HOME="/usr/local/cargo"
493ENV RUSTUP_HOME="/usr/local/rustup"
494RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
495    && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
496ENV PATH="${CARGO_HOME}/bin:${PATH}"
497
498ENV LIBTORCH_PATH="/usr/local/libtorch"
499ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
500ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
501ENV CUDA_HOME="/usr/local/cuda"
502
503WORKDIR /workspace
504"#;
505
506// ---------------------------------------------------------------------------
507// Makefile templates
508// ---------------------------------------------------------------------------
509
510const MAKEFILE_DOCKER: &str = r#"# Development commands -- all builds run inside Docker.
511#
512# Quick start:
513#   make build   -- compile (CPU)
514#   make test    -- run tests
515#   make run     -- cargo run
516#   make shell   -- interactive shell in container
517#
518# GPU (requires NVIDIA Container Toolkit):
519#   make cuda-build / cuda-test / cuda-run / cuda-shell
520
521COMPOSE = docker compose
522RUN     = $(COMPOSE) run --rm dev
523RUN_GPU = $(COMPOSE) run --rm cuda
524
525.PHONY: build test run check clippy shell image clean \
526        cuda-image cuda-build cuda-test cuda-run cuda-shell
527
528# --- CPU targets ---
529
530image:
531	@mkdir -p .cargo-cache .cargo-git
532	$(COMPOSE) build dev
533
534build: image
535	$(RUN) cargo build
536
537test: image
538	$(RUN) cargo test -- --nocapture
539
540run: image
541	$(RUN) cargo run
542
543check: image
544	$(RUN) cargo check
545
546clippy: image
547	$(RUN) cargo clippy -- -W clippy::all
548
549shell: image
550	$(COMPOSE) run --rm dev bash
551
552# --- CUDA targets ---
553
554cuda-image:
555	@mkdir -p .cargo-cache-cuda .cargo-git-cuda
556	$(COMPOSE) build cuda
557
558cuda-build: cuda-image
559	$(RUN_GPU) cargo build --features cuda
560
561cuda-test: cuda-image
562	$(RUN_GPU) cargo test --features cuda -- --nocapture
563
564cuda-run: cuda-image
565	$(RUN_GPU) cargo run --features cuda
566
567cuda-shell: cuda-image
568	$(COMPOSE) run --rm cuda bash
569
570# --- Cleanup ---
571
572clean:
573	$(COMPOSE) down -v --rmi local
574"#;
575
576const MAKEFILE_MOUNTED: &str = r#"# Development commands -- all builds run inside Docker.
577# libtorch is mounted from the host libtorch/ directory.
578#
579# Quick start:
580#   make setup   -- detect hardware, download libtorch, build image
581#   make build   -- compile (CPU)
582#   make test    -- run tests
583#   make run     -- cargo run
584#   make shell   -- interactive shell
585#
586# GPU (requires NVIDIA Container Toolkit):
587#   make cuda-build / cuda-test / cuda-run / cuda-shell
588
589COMPOSE = docker compose
590
591# --- libtorch auto-detection ---
592LIBTORCH_ACTIVE := $(shell cat libtorch/.active 2>/dev/null | tr -d '[:space:]')
593LIBTORCH_HOST_PATH := $(if $(LIBTORCH_ACTIVE),./libtorch/$(LIBTORCH_ACTIVE),)
594ARCH_FILE := $(if $(LIBTORCH_HOST_PATH),$(LIBTORCH_HOST_PATH)/.arch,)
595ARCH_CUDA := $(shell grep '^cuda=' $(ARCH_FILE) 2>/dev/null | cut -d= -f2)
596
597ifeq ($(ARCH_CUDA),none)
598  _CUDA_VER :=
599else ifneq ($(ARCH_CUDA),)
600  _CUDA_VER := $(ARCH_CUDA).0
601else
602  _CUDA_VER := 12.8.0
603endif
604CUDA_VERSION ?= $(_CUDA_VER)
605CUDA_TAG     ?= $(shell echo "$(CUDA_VERSION)" | cut -d. -f1,2)
606
607LIBTORCH_CPU_PATH := ./libtorch/precompiled/cpu
608
609export LIBTORCH_HOST_PATH
610export LIBTORCH_CPU_PATH
611export CUDA_VERSION
612export CUDA_TAG
613
614RUN     = $(COMPOSE) run --rm dev
615RUN_GPU = $(COMPOSE) run --rm cuda
616
617.PHONY: build test run check clippy shell image clean \
618        cuda-image cuda-build cuda-test cuda-run cuda-shell \
619        setup _require-libtorch _require-libtorch-cuda
620
621# --- libtorch guards ---
622
623_require-libtorch:
624	@if [ ! -d "$(LIBTORCH_CPU_PATH)/lib" ]; then \
625		echo ""; \
626		echo "ERROR: No CPU libtorch found."; \
627		echo "  Run: make setup"; \
628		echo "  Or:  ./fdl libtorch download --cpu"; \
629		echo ""; \
630		exit 1; \
631	fi
632
633_require-libtorch-cuda:
634	@if [ -z "$(LIBTORCH_HOST_PATH)" ] || [ ! -d "$(LIBTORCH_HOST_PATH)/lib" ]; then \
635		echo ""; \
636		echo "ERROR: No active CUDA libtorch found."; \
637		echo "  Run: make setup"; \
638		echo "  Or:  ./fdl libtorch download --cuda 12.8"; \
639		echo ""; \
640		exit 1; \
641	fi
642
643# --- CPU targets ---
644
645image:
646	@mkdir -p .cargo-cache .cargo-git
647	@if ! docker image inspect $$(basename $$(pwd))-dev:latest >/dev/null 2>&1; then \
648		$(COMPOSE) build dev; \
649	fi
650
651build: image _require-libtorch
652	$(RUN) cargo build
653
654test: image _require-libtorch
655	$(RUN) cargo test -- --nocapture
656
657run: image _require-libtorch
658	$(RUN) cargo run
659
660check: image _require-libtorch
661	$(RUN) cargo check
662
663clippy: image _require-libtorch
664	$(RUN) cargo clippy -- -W clippy::all
665
666shell: image
667	$(COMPOSE) run --rm dev bash
668
669# --- CUDA targets ---
670
671cuda-image:
672	@mkdir -p .cargo-cache-cuda .cargo-git-cuda
673	$(COMPOSE) build cuda
674
675cuda-build: cuda-image _require-libtorch-cuda
676	$(RUN_GPU) cargo build --features cuda
677
678cuda-test: cuda-image _require-libtorch-cuda
679	$(RUN_GPU) cargo test --features cuda -- --nocapture
680
681cuda-run: cuda-image _require-libtorch-cuda
682	$(RUN_GPU) cargo run --features cuda
683
684cuda-shell: cuda-image
685	$(COMPOSE) run --rm cuda bash
686
687# --- Setup ---
688
689setup:
690	./fdl setup --non-interactive
691
692# --- Cleanup ---
693
694clean:
695	$(COMPOSE) down -v --rmi local
696"#;
697
698// ---------------------------------------------------------------------------
699// File writing helper
700// ---------------------------------------------------------------------------
701
702fn write_file(path: &str, content: &str) -> Result<(), String> {
703    fs::write(path, content).map_err(|e| format!("cannot write {}: {}", path, e))
704}