1use std::fs;
9use std::path::Path;
10use std::process::Command;
11
12use crate::util::prompt;
13
14#[derive(Debug, Clone, Copy, PartialEq, Eq)]
15enum Mode {
16 Mounted,
17 Docker,
18 Native,
19}
20
21pub fn run(name: Option<&str>, docker: bool, native: bool) -> Result<(), String> {
22 let name = name.ok_or("usage: fdl init <project-name>")?;
23 validate_name(name)?;
24
25 if Path::new(name).exists() {
26 return Err(format!("'{}' already exists", name));
27 }
28
29 if docker && native {
30 return Err("--docker and --native are mutually exclusive".into());
31 }
32 let mode = if docker {
33 Mode::Docker
34 } else if native {
35 Mode::Native
36 } else {
37 pick_mode_interactively()
38 };
39
40 let crate_name = name.replace('-', "_");
41 let flodl_dep = resolve_flodl_dep();
42
43 fs::create_dir_all(format!("{}/src", name))
44 .map_err(|e| format!("cannot create directory: {}", e))?;
45
46 match mode {
47 Mode::Mounted => scaffold_mounted(name, &crate_name, &flodl_dep)?,
48 Mode::Docker => scaffold_docker(name, &crate_name, &flodl_dep)?,
49 Mode::Native => scaffold_native(name, &crate_name, &flodl_dep)?,
50 }
51
52 write_file(
54 &format!("{}/src/main.rs", name),
55 &main_rs_template(),
56 )?;
57 write_file(
58 &format!("{}/.gitignore", name),
59 &gitignore_template(mode),
60 )?;
61 write_file(
62 &format!("{}/fdl.yml.example", name),
63 &fdl_yml_example_template(name, mode),
64 )?;
65 write_fdl_bootstrap(name)?;
66
67 print_next_steps(name, mode);
68 crate::util::install_prompt::offer_global_install();
69 Ok(())
70}
71
72fn pick_mode_interactively() -> Mode {
76 println!();
77 if !prompt::ask_yn("Use Docker for builds?", true) {
78 return Mode::Native;
79 }
80 let choice = prompt::ask_choice(
82 "libtorch location",
83 &[
84 "Mounted from host (recommended: lighter image, swap CUDA variants)",
85 "Baked into the Docker image (zero host dependencies)",
86 ],
87 1,
88 );
89 match choice {
90 2 => Mode::Docker,
91 _ => Mode::Mounted,
92 }
93}
94
95fn print_next_steps(name: &str, mode: Mode) {
96 println!();
97 println!("Project '{}' created. Next steps:", name);
98 println!();
99 println!(" cd {}", name);
100 match mode {
101 Mode::Mounted => {
102 println!(" ./fdl setup # detect hardware + download libtorch");
103 println!(" ./fdl build # build the project");
104 }
105 Mode::Docker => {
106 println!(" ./fdl build # first build (downloads libtorch, ~5 min)");
107 }
108 Mode::Native => {
109 println!(" ./fdl libtorch download --cpu # or --cuda 12.8");
110 println!(" ./fdl build # cargo build on the host");
111 }
112 }
113 println!(" ./fdl test # run tests");
114 println!(" ./fdl run # train the model");
115 if mode != Mode::Native {
116 println!(" ./fdl shell # interactive shell");
117 }
118 println!();
119 println!("`./fdl --help` lists every command defined in fdl.yml.");
120 println!("Edit src/main.rs to build your model.");
121 println!();
122 println!("Guides:");
123 println!(" Tutorials: https://flodl.dev/guide/tensors");
124 println!(" Graph Tree: https://flodl.dev/guide/graph-tree");
125 println!(" PyTorch migration: https://flodl.dev/guide/migration");
126 println!(" Troubleshooting: https://flodl.dev/guide/troubleshooting");
127}
128
129fn write_fdl_bootstrap(name: &str) -> Result<(), String> {
130 let fdl_script = include_str!("../assets/fdl");
131 write_file(&format!("{}/fdl", name), fdl_script)?;
132 #[cfg(unix)]
133 {
134 use std::os::unix::fs::PermissionsExt;
135 let _ = fs::set_permissions(
136 format!("{}/fdl", name),
137 fs::Permissions::from_mode(0o755),
138 );
139 }
140 Ok(())
141}
142
143fn validate_name(name: &str) -> Result<(), String> {
144 if name.is_empty() {
145 return Err("project name cannot be empty".into());
146 }
147 if !name.chars().all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_') {
148 return Err("project name must contain only letters, digits, hyphens, underscores".into());
149 }
150 Ok(())
151}
152
153fn resolve_flodl_dep() -> String {
154 if let Some(version) = crates_io_version() {
156 format!("flodl = \"{}\"", version)
157 } else {
158 "flodl = { git = \"https://github.com/fab2s/floDl.git\" }".into()
159 }
160}
161
162fn crates_io_version() -> Option<String> {
163 let output = Command::new("curl")
164 .args(["-sL", "https://crates.io/api/v1/crates/flodl"])
165 .output()
166 .ok()?;
167 let body = String::from_utf8_lossy(&output.stdout);
168 let marker = "\"max_stable_version\":\"";
170 let start = body.find(marker)? + marker.len();
171 let end = start + body[start..].find('"')?;
172 let version = &body[start..end];
173 if version.is_empty() { None } else { Some(version.to_string()) }
174}
175
176fn scaffold_docker(name: &str, crate_name: &str, flodl_dep: &str) -> Result<(), String> {
181 write_file(
182 &format!("{}/Cargo.toml", name),
183 &cargo_toml_template(crate_name, flodl_dep),
184 )?;
185 write_file(
186 &format!("{}/Dockerfile.cpu", name),
187 DOCKERFILE_CPU,
188 )?;
189 write_file(
190 &format!("{}/Dockerfile.cuda", name),
191 DOCKERFILE_CUDA,
192 )?;
193 write_file(
194 &format!("{}/docker-compose.yml", name),
195 &docker_compose_template(crate_name, true),
196 )?;
197 Ok(())
198}
199
200fn scaffold_mounted(name: &str, crate_name: &str, flodl_dep: &str) -> Result<(), String> {
205 write_file(
206 &format!("{}/Cargo.toml", name),
207 &cargo_toml_template(crate_name, flodl_dep),
208 )?;
209 write_file(
210 &format!("{}/Dockerfile", name),
211 DOCKERFILE_MOUNTED,
212 )?;
213 write_file(
214 &format!("{}/Dockerfile.cuda", name),
215 DOCKERFILE_CUDA_MOUNTED,
216 )?;
217 write_file(
218 &format!("{}/docker-compose.yml", name),
219 &docker_compose_template(crate_name, false),
220 )?;
221 Ok(())
222}
223
224fn scaffold_native(name: &str, crate_name: &str, flodl_dep: &str) -> Result<(), String> {
229 write_file(
230 &format!("{}/Cargo.toml", name),
231 &cargo_toml_template(crate_name, flodl_dep),
232 )?;
233 Ok(())
236}
237
238fn cargo_toml_template(crate_name: &str, flodl_dep: &str) -> String {
243 format!(
244 r#"[package]
245name = "{crate_name}"
246version = "0.1.0"
247edition = "2024"
248
249[dependencies]
250{flodl_dep}
251
252# Optimize floDl in dev builds -- your code stays fast to compile.
253# After the first build, only your graph code recompiles (~2s).
254[profile.dev.package.flodl]
255opt-level = 3
256
257[profile.dev.package.flodl-sys]
258opt-level = 3
259
260# Release: cross-crate optimization for maximum throughput.
261[profile.release]
262lto = "thin"
263codegen-units = 1
264"#
265 )
266}
267
268fn main_rs_template() -> String {
269 r#"//! floDl training template.
270//!
271//! This is a starting point for your model. Edit the architecture,
272//! data loading, and training loop to fit your task.
273//!
274//! New to Rust? Read: https://flodl.dev/guide/rust-primer
275//! Stuck? Read: https://flodl.dev/guide/troubleshooting
276
277use flodl::*;
278use flodl::monitor::Monitor;
279
280fn main() -> Result<()> {
281 // --- Model ---
282 let model = FlowBuilder::from(Linear::new(4, 32)?)
283 .through(GELU)
284 .through(LayerNorm::new(32)?)
285 .also(Linear::new(32, 32)?) // residual connection
286 .through(Linear::new(32, 1)?)
287 .build()?;
288
289 // --- Optimizer ---
290 let params = model.parameters();
291 let mut optimizer = Adam::new(¶ms, 0.001);
292 let scheduler = CosineScheduler::new(0.001, 1e-6, 100);
293 model.train();
294
295 // --- Data ---
296 // Replace this with your data loading.
297 let opts = TensorOptions::default();
298 let batches: Vec<(Tensor, Tensor)> = (0..32)
299 .map(|_| {
300 let x = Tensor::randn(&[16, 4], opts).unwrap();
301 let y = Tensor::randn(&[16, 1], opts).unwrap();
302 (x, y)
303 })
304 .collect();
305
306 // --- Training loop ---
307 let num_epochs = 100usize;
308 let mut monitor = Monitor::new(num_epochs);
309 // monitor.serve(3000)?; // uncomment for live dashboard
310 // monitor.watch(&model); // uncomment to show graph SVG
311 // monitor.save_html("report.html"); // uncomment to save HTML report
312
313 for epoch in 0..num_epochs {
314 let t = std::time::Instant::now();
315 let mut epoch_loss = 0.0;
316
317 for (input_t, target_t) in &batches {
318 let input = Variable::new(input_t.clone(), true);
319 let target = Variable::new(target_t.clone(), false);
320
321 optimizer.zero_grad();
322 let pred = model.forward(&input)?;
323 let loss = mse_loss(&pred, &target)?;
324 loss.backward()?;
325 clip_grad_norm(¶ms, 1.0)?;
326 optimizer.step()?;
327
328 epoch_loss += loss.item()?;
329 }
330
331 let avg_loss = epoch_loss / batches.len() as f64;
332 let lr = scheduler.lr(epoch);
333 optimizer.set_lr(lr);
334 monitor.log(epoch, t.elapsed(), &[("loss", avg_loss), ("lr", lr)]);
335 }
336
337 monitor.finish();
338 Ok(())
339}
340"#
341 .into()
342}
343
344fn gitignore_template(mode: Mode) -> String {
345 let mut s = String::from(
346 "/target
347*.fdl
348*.log
349*.csv
350*.html
351
352# Local fdl config (fdl.yml.example is committed; fdl copies it on first run)
353fdl.yml
354fdl.yaml
355",
356 );
357 match mode {
358 Mode::Docker => {
359 s.push_str(
361 ".cargo-cache/
362.cargo-git/
363.cargo-cache-cuda/
364.cargo-git-cuda/
365",
366 );
367 }
368 Mode::Mounted => {
369 s.push_str(
371 ".cargo-cache/
372.cargo-git/
373.cargo-cache-cuda/
374.cargo-git-cuda/
375libtorch/
376",
377 );
378 }
379 Mode::Native => {
380 s.push_str("libtorch/\n");
383 }
384 }
385 s
386}
387
388fn docker_compose_template(crate_name: &str, baked: bool) -> String {
389 if baked {
390 format!(
391 r#"services:
392 dev:
393 build:
394 context: .
395 dockerfile: Dockerfile.cpu
396 image: {crate_name}-dev
397 user: "${{UID:-1000}}:${{GID:-1000}}"
398 volumes:
399 - .:/workspace
400 - ./.cargo-cache:/usr/local/cargo/registry
401 - ./.cargo-git:/usr/local/cargo/git
402 working_dir: /workspace
403 stdin_open: true
404 tty: true
405
406 cuda:
407 build:
408 context: .
409 dockerfile: Dockerfile.cuda
410 image: {crate_name}-cuda
411 user: "${{UID:-1000}}:${{GID:-1000}}"
412 volumes:
413 - .:/workspace
414 - ./.cargo-cache-cuda:/usr/local/cargo/registry
415 - ./.cargo-git-cuda:/usr/local/cargo/git
416 working_dir: /workspace
417 stdin_open: true
418 tty: true
419 deploy:
420 resources:
421 reservations:
422 devices:
423 - driver: nvidia
424 count: all
425 capabilities: [gpu]
426"#
427 )
428 } else {
429 format!(
430 r#"services:
431 dev:
432 build:
433 context: .
434 dockerfile: Dockerfile
435 image: {crate_name}-dev
436 user: "${{UID:-1000}}:${{GID:-1000}}"
437 volumes:
438 - .:/workspace
439 - ./.cargo-cache:/usr/local/cargo/registry
440 - ./.cargo-git:/usr/local/cargo/git
441 - ${{LIBTORCH_CPU_PATH:-./libtorch/precompiled/cpu}}:/usr/local/libtorch:ro
442 working_dir: /workspace
443 stdin_open: true
444 tty: true
445
446 cuda:
447 build:
448 context: .
449 dockerfile: Dockerfile.cuda
450 args:
451 CUDA_VERSION: ${{CUDA_VERSION:-12.8.0}}
452 image: {crate_name}-cuda:${{CUDA_TAG:-12.8}}
453 user: "${{UID:-1000}}:${{GID:-1000}}"
454 volumes:
455 - .:/workspace
456 - ./.cargo-cache-cuda:/usr/local/cargo/registry
457 - ./.cargo-git-cuda:/usr/local/cargo/git
458 - ${{LIBTORCH_HOST_PATH:-./libtorch/precompiled/cu128}}:/usr/local/libtorch:ro
459 working_dir: /workspace
460 stdin_open: true
461 tty: true
462 deploy:
463 resources:
464 reservations:
465 devices:
466 - driver: nvidia
467 count: all
468 capabilities: [gpu]
469"#
470 )
471 }
472}
473
474const DOCKERFILE_CPU: &str = r#"# CPU-only dev image for floDl projects.
480FROM ubuntu:24.04
481
482ENV DEBIAN_FRONTEND=noninteractive
483
484RUN apt-get update && apt-get install -y --no-install-recommends \
485 wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
486 && rm -rf /var/lib/apt/lists/*
487
488# Rust
489ENV CARGO_HOME="/usr/local/cargo"
490ENV RUSTUP_HOME="/usr/local/rustup"
491RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
492 && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
493ENV PATH="${CARGO_HOME}/bin:${PATH}"
494
495# libtorch (CPU-only, ~200MB)
496ARG LIBTORCH_VERSION=2.10.0
497RUN wget -q https://download.pytorch.org/libtorch/cpu/libtorch-shared-with-deps-${LIBTORCH_VERSION}%2Bcpu.zip \
498 && unzip -q libtorch-shared-with-deps-${LIBTORCH_VERSION}+cpu.zip -d /usr/local \
499 && rm libtorch-shared-with-deps-${LIBTORCH_VERSION}+cpu.zip
500
501ENV LIBTORCH_PATH="/usr/local/libtorch"
502ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib"
503ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib"
504
505WORKDIR /workspace
506"#;
507
508const DOCKERFILE_CUDA: &str = r#"# CUDA dev image for floDl projects.
509# Requires: docker run --gpus all ...
510FROM nvidia/cuda:12.8.0-devel-ubuntu24.04
511
512ENV DEBIAN_FRONTEND=noninteractive
513
514RUN apt-get update && apt-get install -y --no-install-recommends \
515 wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
516 && rm -rf /var/lib/apt/lists/*
517
518# Rust
519ENV CARGO_HOME="/usr/local/cargo"
520ENV RUSTUP_HOME="/usr/local/rustup"
521RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
522 && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
523ENV PATH="${CARGO_HOME}/bin:${PATH}"
524
525# libtorch (CUDA 12.8)
526ARG LIBTORCH_VERSION=2.10.0
527RUN wget -q "https://download.pytorch.org/libtorch/cu128/libtorch-shared-with-deps-${LIBTORCH_VERSION}%2Bcu128.zip" \
528 && unzip -q "libtorch-shared-with-deps-${LIBTORCH_VERSION}+cu128.zip" -d /usr/local \
529 && rm "libtorch-shared-with-deps-${LIBTORCH_VERSION}+cu128.zip"
530
531ENV LIBTORCH_PATH="/usr/local/libtorch"
532ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
533ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
534ENV CUDA_HOME="/usr/local/cuda"
535
536WORKDIR /workspace
537"#;
538
539const DOCKERFILE_MOUNTED: &str = r#"# CPU dev image for floDl projects (libtorch mounted at runtime).
541FROM ubuntu:24.04
542
543ENV DEBIAN_FRONTEND=noninteractive
544
545RUN apt-get update && apt-get install -y --no-install-recommends \
546 wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
547 && rm -rf /var/lib/apt/lists/*
548
549# Rust
550ENV CARGO_HOME="/usr/local/cargo"
551ENV RUSTUP_HOME="/usr/local/rustup"
552RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
553 && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
554ENV PATH="${CARGO_HOME}/bin:${PATH}"
555
556ENV LIBTORCH_PATH="/usr/local/libtorch"
557ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib"
558ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib"
559
560WORKDIR /workspace
561"#;
562
563const DOCKERFILE_CUDA_MOUNTED: &str = r#"# CUDA dev image for floDl projects (libtorch mounted at runtime).
564# Requires: docker run --gpus all ...
565ARG CUDA_VERSION=12.8.0
566FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu24.04
567
568ENV DEBIAN_FRONTEND=noninteractive
569
570RUN apt-get update && apt-get install -y --no-install-recommends \
571 wget curl unzip ca-certificates git gcc g++ pkg-config graphviz \
572 && rm -rf /var/lib/apt/lists/*
573
574# Rust
575ENV CARGO_HOME="/usr/local/cargo"
576ENV RUSTUP_HOME="/usr/local/rustup"
577RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable \
578 && chmod -R a+rwx "$CARGO_HOME" "$RUSTUP_HOME"
579ENV PATH="${CARGO_HOME}/bin:${PATH}"
580
581ENV LIBTORCH_PATH="/usr/local/libtorch"
582ENV LD_LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
583ENV LIBRARY_PATH="${LIBTORCH_PATH}/lib:/usr/local/cuda/lib64"
584ENV CUDA_HOME="/usr/local/cuda"
585
586WORKDIR /workspace
587"#;
588
589fn fdl_yml_example_template(project_name: &str, mode: Mode) -> String {
601 let use_docker = matches!(mode, Mode::Mounted | Mode::Docker);
602 let (cpu_svc, cuda_svc) = if use_docker {
603 ("\n docker: dev", "\n docker: cuda")
604 } else {
605 ("", "")
606 };
607 let cuda_note = if use_docker {
608 "(requires NVIDIA Container Toolkit)"
609 } else {
610 "(requires a matching CUDA toolkit on the host)"
611 };
612 let preamble = if use_docker {
613 "# Run any of these with `./fdl <cmd>` (or `fdl <cmd>` once installed\n\
614 # globally via `./fdl install`). Libtorch env vars are derived from\n\
615 # `libtorch/.active` automatically; missing libtorch surfaces as a\n\
616 # clean linker error, with `./fdl setup` one call away."
617 } else {
618 "# Native mode: commands run on the host. Make sure libtorch is\n\
619 # installed (`./fdl libtorch download --cpu` or `--cuda 12.8`)\n\
620 # and that `$LIBTORCH` / `$LD_LIBRARY_PATH` are exported so\n\
621 # cargo can link. `./fdl libtorch info` prints the commands you\n\
622 # need after a download."
623 };
624
625 let shell_block = if use_docker {
626 format!(
627 r#" shell:
628 description: Interactive shell (CPU container)
629 run: bash{cpu_svc}
630
631"#
632 )
633 } else {
634 String::new()
636 };
637
638 let cuda_shell_block = if use_docker {
639 format!(
640 r#" cuda-shell:
641 description: Interactive shell (CUDA container)
642 run: bash{cuda_svc}
643"#
644 )
645 } else {
646 String::new()
647 };
648
649 format!(
650 r#"description: {project_name}
651
652{preamble}
653
654commands:
655 # --- CPU ---
656 build:
657 description: Build (debug)
658 run: cargo build{cpu_svc}
659 test:
660 description: Run CPU tests
661 run: cargo test -- --nocapture{cpu_svc}
662 run:
663 description: cargo run
664 run: cargo run{cpu_svc}
665 check:
666 description: Type-check without building
667 run: cargo check{cpu_svc}
668 clippy:
669 description: Lint
670 run: cargo clippy -- -W clippy::all{cpu_svc}
671{shell_block} # --- CUDA {cuda_note} ---
672 cuda-build:
673 description: Build with CUDA feature
674 run: cargo build --features cuda{cuda_svc}
675 cuda-test:
676 description: Run CUDA tests
677 run: cargo test --features cuda -- --nocapture{cuda_svc}
678 cuda-run:
679 description: cargo run --features cuda
680 run: cargo run --features cuda{cuda_svc}
681{cuda_shell_block}"#
682 )
683}
684
685fn write_file(path: &str, content: &str) -> Result<(), String> {
690 fs::write(path, content).map_err(|e| format!("cannot write {}: {}", path, e))
691}