ferrotorch-core 0.1.9

Core tensor and autograd engine for ferrotorch — PyTorch in Rust
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
use rustc_hash::FxHashMap as HashMap;
use std::collections::VecDeque;

use crate::autograd::hooks::{run_grad_hooks, run_post_accumulate_hooks};
use crate::device::Device;
use crate::dtype::Float;
use crate::error::{FerrotorchError, FerrotorchResult};
use crate::tensor::{Tensor, TensorId};

/// Compute gradients of all leaf tensors that contribute to `root`.
///
/// Implements reverse-mode automatic differentiation:
/// 1. Collect all nodes reachable from `root` that have a `grad_fn`.
/// 2. Topological sort via Kahn's algorithm (iterative, no stack overflow).
/// 3. Walk in reverse topological order, calling each node's `GradFn::backward()`.
/// 4. Accumulate gradients additively on leaf tensors.
///
/// `root` must be a scalar tensor (0-dim or single element). After this call,
/// leaf tensors with `requires_grad = true` will have their `.grad()` populated.
pub fn backward<T: Float>(root: &Tensor<T>) -> FerrotorchResult<()> {
    backward_with_grad(root, None)
}

/// Run backward pass through the computation graph.
///
/// If `gradient` is `None`, the root must be scalar and an implicit seed of 1.0 is used.
/// If `gradient` is `Some`, it is used as the initial gradient for the root tensor,
/// allowing backward on non-scalar tensors (needed for multi-head outputs, Jacobian
/// computation, and custom loss functions).
pub fn backward_with_grad<T: Float>(
    root: &Tensor<T>,
    gradient: Option<&Tensor<T>>,
) -> FerrotorchResult<()> {
    let seed = if let Some(ext_grad) = gradient {
        // Validate that the external gradient shape matches the root shape.
        if ext_grad.shape() != root.shape() {
            return Err(FerrotorchError::ShapeMismatch {
                message: format!(
                    "gradient shape {:?} does not match root shape {:?}",
                    ext_grad.shape(),
                    root.shape(),
                ),
            });
        }
        ext_grad.clone()
    } else {
        // No external gradient: root must be scalar.
        if !root.is_scalar() && root.numel() != 1 {
            return Err(FerrotorchError::BackwardNonScalar {
                shape: root.shape().to_vec(),
            });
        }

        // Seed gradient: d(root)/d(root) = 1, on the same device as root.
        let ones_storage = crate::storage::TensorStorage::cpu(vec![<T as num_traits::One>::one()]);
        let seed_cpu = Tensor::from_storage(ones_storage, vec![], false)?;
        seed_cpu.to(root.device())?
    };

    // Phase 1: Collect all nodes and compute in-degree via BFS.
    //
    // We traverse the graph from `root` backward through `grad_fn().inputs()`.
    // `in_degree[id]` counts how many times a tensor is used as an input to
    // an operation — this is needed for Kahn's algorithm.
    let mut in_degree: HashMap<TensorId, usize> = HashMap::default();
    let mut node_map: HashMap<TensorId, &Tensor<T>> = HashMap::default();
    let mut queue: VecDeque<&Tensor<T>> = VecDeque::new();

    // Start from root.
    queue.push_back(root);
    in_degree.entry(root.id()).or_insert(0);
    node_map.insert(root.id(), root);

    while let Some(node) = queue.pop_front() {
        if let Some(grad_fn) = node.grad_fn() {
            for input in grad_fn.inputs() {
                let input_id = input.id();
                let count = in_degree.entry(input_id).or_insert(0);
                *count += 1;
                if let std::collections::hash_map::Entry::Vacant(e) = node_map.entry(input_id) {
                    e.insert(input);
                    queue.push_back(input);
                }
            }
        }
    }

    // Phase 2: Topological sort (Kahn's algorithm).
    //
    // Start with nodes that have in_degree == 0. The root always has in_degree 0
    // (nothing depends on it in the backward direction). Process nodes in
    // topological order, decrementing in_degree of their inputs.
    let mut topo_order: Vec<TensorId> = Vec::new();
    let mut bfs_queue: VecDeque<TensorId> = VecDeque::new();

    // Find all nodes with in_degree 0 (just the root in a standard graph).
    for (&id, &deg) in &in_degree {
        if deg == 0 {
            bfs_queue.push_back(id);
        }
    }

    while let Some(id) = bfs_queue.pop_front() {
        topo_order.push(id);
        if let Some(node) = node_map.get(&id) {
            if let Some(grad_fn) = node.grad_fn() {
                for input in grad_fn.inputs() {
                    if let Some(deg) = in_degree.get_mut(&input.id()) {
                        *deg -= 1;
                        if *deg == 0 {
                            bfs_queue.push_back(input.id());
                        }
                    }
                }
            }
        }
    }

    // Phase 3: Backward pass in topological order.
    //
    // We maintain a map of accumulated output gradients for each node.
    // For the root, the gradient is the seed (1.0).
    let mut grads: HashMap<TensorId, Tensor<T>> = HashMap::default();
    grads.insert(root.id(), seed);

    for &id in &topo_order {
        let node = match node_map.get(&id) {
            Some(n) => *n,
            None => continue,
        };

        let grad_output = match grads.remove(&id) {
            Some(g) => g,
            None => continue,
        };

        if let Some(grad_fn) = node.grad_fn() {
            // Materialize non-contiguous gradients before backward.
            // Stride-based views (from permute/transpose/narrow) may be
            // non-contiguous — backward functions expect contiguous data.
            let grad_output = if !grad_output.is_contiguous() {
                crate::methods::contiguous_t(&grad_output)?
            } else {
                grad_output
            };
            let input_grads = grad_fn.backward(&grad_output)?;
            let inputs = grad_fn.inputs();

            // B3 fix: validate that backward returned the correct number
            // of gradients. Without this, `zip` silently drops trailing
            // gradients when the backward function returns fewer than
            // expected, causing silent incorrect results.
            if input_grads.len() != inputs.len() {
                return Err(FerrotorchError::InvalidArgument {
                    message: format!(
                        "backward returned {} gradients but expected {}",
                        input_grads.len(),
                        inputs.len(),
                    ),
                });
            }

            for (input, maybe_grad) in inputs.iter().zip(input_grads.into_iter()) {
                if let Some(grad) = maybe_grad {
                    if input.requires_grad() {
                        // Run gradient hooks (if any), which may modify the gradient.
                        let hooks = input.hooks();
                        let has_hooks = {
                            let guard =
                                hooks.lock().map_err(|e| FerrotorchError::LockPoisoned {
                                    message: format!("hook storage mutex: {e}"),
                                })?;
                            (guard.has_grad_hooks(), guard.has_post_accumulate_hooks())
                        };
                        let grad = if has_hooks.0 {
                            run_grad_hooks(hooks, grad)?
                        } else {
                            grad
                        };

                        if input.is_leaf() {
                            // Leaf tensor: accumulate gradient on the tensor itself.
                            input.accumulate_grad(&grad)?;
                            // Run post-accumulate-grad hooks on the leaf (if any).
                            if has_hooks.1 {
                                run_post_accumulate_hooks(hooks, input)?;
                            }
                        } else {
                            // Non-leaf: accumulate into the grads map for the next iteration.
                            accumulate_non_leaf_grad(&mut grads, input, grad)?;
                        }
                    }
                }
            }
        }
    }

    Ok(())
}

/// Multi-threaded backward engine.
///
/// Same correctness as [`backward_with_grad`], but processes independent
/// backward nodes in parallel using a ready-queue pattern:
///
/// 1. Nodes with in-degree 0 are placed in a shared queue.
/// 2. Worker threads pull nodes, call `grad_fn.backward()`, accumulate grads.
/// 3. After processing, workers decrement in-degrees of the node's inputs.
///    When an input's in-degree reaches 0, it is pushed to the queue.
/// 4. Workers exit when the queue is empty and all nodes are processed.
///
/// Falls back to single-threaded for graphs with fewer than 8 nodes.
pub fn backward_parallel<T: Float>(
    root: &Tensor<T>,
    gradient: Option<&Tensor<T>>,
    num_workers: usize,
) -> FerrotorchResult<()> {
    use std::sync::atomic::{AtomicUsize, Ordering};
    use std::sync::{Arc, Condvar, Mutex};

    let seed = if let Some(ext_grad) = gradient {
        if ext_grad.shape() != root.shape() {
            return Err(FerrotorchError::ShapeMismatch {
                message: format!(
                    "gradient shape {:?} does not match root shape {:?}",
                    ext_grad.shape(),
                    root.shape(),
                ),
            });
        }
        ext_grad.clone()
    } else {
        if !root.is_scalar() && root.numel() != 1 {
            return Err(FerrotorchError::BackwardNonScalar {
                shape: root.shape().to_vec(),
            });
        }
        let ones_storage = crate::storage::TensorStorage::cpu(vec![<T as num_traits::One>::one()]);
        let seed_cpu = Tensor::from_storage(ones_storage, vec![], false)?;
        seed_cpu.to(root.device())?
    };

    // Phase 1: Collect nodes and compute in-degree (same as sequential).
    let mut in_degree_map: HashMap<TensorId, usize> = HashMap::default();
    let mut node_map: HashMap<TensorId, &Tensor<T>> = HashMap::default();
    let mut queue: VecDeque<&Tensor<T>> = VecDeque::new();

    queue.push_back(root);
    in_degree_map.entry(root.id()).or_insert(0);
    node_map.insert(root.id(), root);

    while let Some(node) = queue.pop_front() {
        if let Some(grad_fn) = node.grad_fn() {
            for input in grad_fn.inputs() {
                let input_id = input.id();
                let count = in_degree_map.entry(input_id).or_insert(0);
                *count += 1;
                if let std::collections::hash_map::Entry::Vacant(e) = node_map.entry(input_id) {
                    e.insert(input);
                    queue.push_back(input);
                }
            }
        }
    }

    let total_nodes = node_map.len();

    // For small graphs, fall back to sequential.
    if total_nodes < 8 || num_workers <= 1 {
        return backward_with_grad(root, gradient);
    }

    // Phase 2: Build shared state for parallel processing.

    // Atomic in-degrees for lock-free decrement.
    let in_degrees: HashMap<TensorId, AtomicUsize> = in_degree_map
        .iter()
        .map(|(&id, &deg)| (id, AtomicUsize::new(deg)))
        .collect();
    let in_degrees = Arc::new(in_degrees);

    // Shared gradient accumulator.
    let grads: Arc<Mutex<HashMap<TensorId, Tensor<T>>>> = Arc::new(Mutex::new({
        let mut m = HashMap::default();
        m.insert(root.id(), seed);
        m
    }));

    // Ready queue + condvar for waking workers.
    let ready: Arc<Mutex<VecDeque<TensorId>>> = Arc::new(Mutex::new(VecDeque::new()));
    let condvar = Arc::new(Condvar::new());

    // Seed the ready queue with all in-degree 0 nodes.
    {
        let mut rq = ready.lock().unwrap();
        for (&id, deg) in in_degrees.iter() {
            if deg.load(Ordering::Relaxed) == 0 {
                rq.push_back(id);
            }
        }
    }

    // Counter of processed nodes — workers exit when this reaches total.
    let processed = Arc::new(AtomicUsize::new(0));

    // Error collector.
    let errors: Arc<Mutex<Vec<FerrotorchError>>> = Arc::new(Mutex::new(Vec::new()));

    // Phase 3: Parallel backward.
    let node_map_ref = &node_map;
    std::thread::scope(|s| {
        let workers = num_workers.min(total_nodes);
        for _ in 0..workers {
            let in_degrees = Arc::clone(&in_degrees);
            let grads = Arc::clone(&grads);
            let ready = Arc::clone(&ready);
            let condvar = Arc::clone(&condvar);
            let processed = Arc::clone(&processed);
            let errors = Arc::clone(&errors);

            s.spawn(move || {
                loop {
                    // Pull a ready node.
                    let id = {
                        let mut rq = ready.lock().unwrap();
                        loop {
                            if let Some(id) = rq.pop_front() {
                                break id;
                            }
                            if processed.load(Ordering::Relaxed) >= total_nodes {
                                return;
                            }
                            rq = condvar.wait(rq).unwrap();
                            if processed.load(Ordering::Relaxed) >= total_nodes {
                                return;
                            }
                        }
                    };

                    // Process this node.
                    let result = (|| -> FerrotorchResult<()> {
                        let node = match node_map_ref.get(&id) {
                            Some(n) => *n,
                            None => return Ok(()),
                        };

                        let grad_output = {
                            let mut g = grads.lock().unwrap();
                            match g.remove(&id) {
                                Some(go) => go,
                                None => return Ok(()),
                            }
                        };

                        if let Some(grad_fn) = node.grad_fn() {
                            let grad_output = if !grad_output.is_contiguous() {
                                crate::methods::contiguous_t(&grad_output)?
                            } else {
                                grad_output
                            };

                            let input_grads = grad_fn.backward(&grad_output)?;
                            let inputs = grad_fn.inputs();

                            if input_grads.len() != inputs.len() {
                                return Err(FerrotorchError::InvalidArgument {
                                    message: format!(
                                        "backward returned {} gradients but expected {}",
                                        input_grads.len(),
                                        inputs.len(),
                                    ),
                                });
                            }

                            for (input, maybe_grad) in
                                inputs.iter().zip(input_grads.into_iter())
                            {
                                if let Some(grad) = maybe_grad {
                                    if input.requires_grad() {
                                        let hooks = input.hooks();
                                        let has_hooks = {
                                            let guard = hooks.lock().map_err(|e| {
                                                FerrotorchError::LockPoisoned {
                                                    message: format!(
                                                        "hook storage mutex: {e}"
                                                    ),
                                                }
                                            })?;
                                            (
                                                guard.has_grad_hooks(),
                                                guard.has_post_accumulate_hooks(),
                                            )
                                        };
                                        let grad = if has_hooks.0 {
                                            run_grad_hooks(hooks, grad)?
                                        } else {
                                            grad
                                        };

                                        if input.is_leaf() {
                                            input.accumulate_grad(&grad)?;
                                            if has_hooks.1 {
                                                run_post_accumulate_hooks(hooks, input)?;
                                            }
                                        } else {
                                            let mut g = grads.lock().unwrap();
                                            accumulate_non_leaf_grad_locked(
                                                &mut g, input, grad,
                                            )?;
                                        }
                                    }
                                }
                            }

                            // Decrement in-degrees of inputs; push newly ready.
                            for input in grad_fn.inputs() {
                                if let Some(deg) = in_degrees.get(&input.id()) {
                                    let prev = deg.fetch_sub(1, Ordering::AcqRel);
                                    if prev == 1 {
                                        let mut rq = ready.lock().unwrap();
                                        rq.push_back(input.id());
                                        condvar.notify_one();
                                    }
                                }
                            }
                        }

                        Ok(())
                    })();

                    if let Err(e) = result {
                        errors.lock().unwrap().push(e);
                    }

                    let prev = processed.fetch_add(1, Ordering::AcqRel);
                    if prev + 1 >= total_nodes {
                        condvar.notify_all();
                    }
                }
            });
        }
    });

    let errs = match Arc::try_unwrap(errors) {
        Ok(mutex) => mutex.into_inner().unwrap(),
        Err(arc) => {
            let mut guard = arc.lock().unwrap();
            std::mem::take(&mut *guard)
        }
    };
    if let Some(e) = errs.into_iter().next() {
        return Err(e);
    }

    Ok(())
}

/// Like `accumulate_non_leaf_grad` but caller holds the grads mutex.
fn accumulate_non_leaf_grad_locked<T: Float>(
    grads: &mut HashMap<TensorId, Tensor<T>>,
    input: &Tensor<T>,
    grad: Tensor<T>,
) -> FerrotorchResult<()> {
    let Some(existing) = grads.remove(&input.id()) else {
        grads.insert(input.id(), grad);
        return Ok(());
    };

    if existing.shape() != grad.shape() {
        return Err(FerrotorchError::ShapeMismatch {
            message: format!(
                "gradient shape mismatch during accumulation: {:?} vs {:?}",
                existing.shape(),
                grad.shape(),
            ),
        });
    }

    // GPU-native accumulation when both on same GPU.
    if let (Device::Cuda(_), Device::Cuda(_)) = (existing.device(), grad.device()) {
        if existing.device() == grad.device() {
            if let Some(backend) = crate::gpu_dispatch::gpu_backend() {
                if std::any::TypeId::of::<T>() == std::any::TypeId::of::<f32>() {
                    let sum_handle = backend.add_f32(
                        existing.gpu_handle()?,
                        grad.gpu_handle()?,
                    )?;
                    let combined = Tensor::from_storage(
                        crate::storage::TensorStorage::gpu(sum_handle),
                        existing.shape().to_vec(),
                        false,
                    )?;
                    grads.insert(input.id(), combined);
                    return Ok(());
                }
            }
        }
    }

    // CPU path.
    let existing_data = existing.data_vec()?;
    let grad_data = grad.data_vec()?;
    let combined_data: Vec<T> = existing_data
        .iter()
        .zip(grad_data.iter())
        .map(|(&a, &b)| a + b)
        .collect();
    let device = existing.device();
    let combined = Tensor::from_storage(
        crate::storage::TensorStorage::on_device(combined_data, device)?,
        existing.shape().to_vec(),
        false,
    )?;
    grads.insert(input.id(), combined);
    Ok(())
}

/// Accumulate a gradient for a non-leaf tensor in the backward grads map.
///
/// This is separated from the main backward loop for clarity and to
/// encapsulate the B1 / B6 fixes:
///
/// - **B1**: In-place accumulation is only attempted when both the outer
///   `Arc<TensorInner>` and the inner `Arc<TensorStorage>` have a strong
///   count of 1, the tensor is contiguous, and it is NOT on GPU. Without
///   the storage refcount check, shared-storage views could be corrupted.
///
/// - **B6**: When both the existing gradient and the incoming gradient are
///   on the same GPU device, we use `backend.add_f32()` / `add_f64()`
///   directly instead of round-tripping through CPU. This eliminates two
///   unnecessary PCIe transfers per accumulation.
fn accumulate_non_leaf_grad<T: Float>(
    grads: &mut HashMap<TensorId, Tensor<T>>,
    input: &Tensor<T>,
    grad: Tensor<T>,
) -> FerrotorchResult<()> {
    let Some(existing) = grads.remove(&input.id()) else {
        grads.insert(input.id(), grad);
        return Ok(());
    };

    // Shape validation.
    if existing.shape() != grad.shape() {
        return Err(FerrotorchError::ShapeMismatch {
            message: format!(
                "gradient shape mismatch during accumulation: {:?} vs {:?}",
                existing.shape(),
                grad.shape(),
            ),
        });
    }

    // B6 fix: GPU-native accumulation when both tensors are on the same GPU.
    if let (Device::Cuda(_), Device::Cuda(_)) = (existing.device(), grad.device()) {
        if existing.device() == grad.device() {
            if let Some(backend) = crate::gpu_dispatch::gpu_backend() {
                let a_handle = existing.gpu_handle()?;
                let b_handle = grad.gpu_handle()?;
                // Dispatch by element size to pick add_f32 or add_f64.
                let result_handle = if std::mem::size_of::<T>() == 4 {
                    backend.add_f32(a_handle, b_handle)?
                } else {
                    backend.add_f64(a_handle, b_handle)?
                };
                let storage = crate::storage::TensorStorage::gpu(result_handle);
                let combined = Tensor::from_storage(storage, existing.shape().to_vec(), false)?;
                grads.insert(input.id(), combined);
                return Ok(());
            }
        }
    }

    // B1 fix: in-place accumulation is only safe when we have exclusive
    // ownership of BOTH the TensorInner Arc AND the TensorStorage Arc,
    // the tensor is contiguous, and it is on CPU. Without the storage
    // refcount check, views sharing the same storage would be corrupted.
    if existing.inner_refcount() == 1
        && existing.storage_refcount() == 1
        && existing.is_contiguous()
        && !existing.is_cuda()
    {
        // SAFETY: inner_refcount == 1 && storage_refcount == 1 guarantees
        // exclusive ownership. No other references exist.
        let existing_slice = unsafe { existing.data_mut()? };
        let grad_cpu = if grad.is_cuda() { grad.cpu()? } else { grad };
        let grad_data = grad_cpu.data()?;
        if existing_slice.len() != grad_data.len() {
            return Err(FerrotorchError::ShapeMismatch {
                message: format!(
                    "gradient length mismatch during accumulation: {} vs {}",
                    existing_slice.len(),
                    grad_data.len(),
                ),
            });
        }
        for (e, &g) in existing_slice.iter_mut().zip(grad_data.iter()) {
            *e += g;
        }
        grads.insert(input.id(), existing);
        return Ok(());
    }

    // Fallback: allocate a new tensor for the sum (CPU path).
    let device = existing.device();
    let existing_cpu = if existing.is_cuda() {
        existing.cpu()?
    } else {
        existing
    };
    let grad_cpu = if grad.is_cuda() { grad.cpu()? } else { grad };
    let mut existing_data = existing_cpu.data()?.to_vec();
    let grad_data = grad_cpu.data()?;
    if existing_data.len() != grad_data.len() {
        return Err(FerrotorchError::ShapeMismatch {
            message: format!(
                "gradient length mismatch during accumulation: {} vs {}",
                existing_data.len(),
                grad_data.len(),
            ),
        });
    }
    for (e, &g) in existing_data.iter_mut().zip(grad_data.iter()) {
        *e += g;
    }
    let storage = crate::storage::TensorStorage::cpu(existing_data);
    let combined = Tensor::from_storage(storage, existing_cpu.shape().to_vec(), false)?;
    grads.insert(input.id(), combined.to(device)?);
    Ok(())
}

/// Convenience methods on Tensor for calling backward.
impl<T: Float> Tensor<T> {
    /// Compute gradients of all leaf tensors that contribute to this tensor.
    ///
    /// This tensor must be scalar (0-dim or single-element). After this call,
    /// leaf tensors with `requires_grad = true` will have their `.grad()` set.
    pub fn backward(&self) -> FerrotorchResult<()> {
        backward(self)
    }

    /// Run backward with an external gradient.
    ///
    /// This allows backward on non-scalar tensors by providing the initial
    /// gradient explicitly. The gradient shape must match this tensor's shape.
    /// Used for multi-head outputs, Jacobian computation, and custom loss
    /// functions.
    pub fn backward_with_gradient(&self, gradient: &Tensor<T>) -> FerrotorchResult<()> {
        backward_with_grad(self, Some(gradient))
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::storage::TensorStorage;
    use crate::tensor::GradFn;
    use std::sync::Arc;

    /// A simple grad_fn for testing: output = a + b.
    /// backward: d(a+b)/da = 1, d(a+b)/db = 1.
    #[derive(Debug)]
    struct AddBackward<T: Float> {
        a: Tensor<T>,
        b: Tensor<T>,
    }

    impl<T: Float> GradFn<T> for AddBackward<T> {
        fn backward(&self, grad_output: &Tensor<T>) -> FerrotorchResult<Vec<Option<Tensor<T>>>> {
            Ok(vec![Some(grad_output.clone()), Some(grad_output.clone())])
        }
        fn inputs(&self) -> Vec<&Tensor<T>> {
            vec![&self.a, &self.b]
        }
        fn name(&self) -> &'static str {
            "AddBackward"
        }
    }

    /// A simple grad_fn: output = a * b (elementwise).
    /// backward: d(a*b)/da = b * grad, d(a*b)/db = a * grad.
    #[derive(Debug)]
    struct MulBackward<T: Float> {
        a: Tensor<T>,
        b: Tensor<T>,
    }

    impl<T: Float> GradFn<T> for MulBackward<T> {
        fn backward(&self, grad_output: &Tensor<T>) -> FerrotorchResult<Vec<Option<Tensor<T>>>> {
            let go = grad_output.data()?;
            let a_data = self.a.data()?;
            let b_data = self.b.data()?;

            let grad_a: Vec<T> = go.iter().zip(b_data.iter()).map(|(&g, &b)| g * b).collect();
            let grad_b: Vec<T> = go.iter().zip(a_data.iter()).map(|(&g, &a)| g * a).collect();

            let ta =
                Tensor::from_storage(TensorStorage::cpu(grad_a), self.a.shape().to_vec(), false)?;
            let tb =
                Tensor::from_storage(TensorStorage::cpu(grad_b), self.b.shape().to_vec(), false)?;
            Ok(vec![Some(ta), Some(tb)])
        }
        fn inputs(&self) -> Vec<&Tensor<T>> {
            vec![&self.a, &self.b]
        }
        fn name(&self) -> &'static str {
            "MulBackward"
        }
    }

    /// Helper to make a leaf scalar tensor.
    fn leaf_scalar(val: f32, requires_grad: bool) -> Tensor<f32> {
        Tensor::from_storage(TensorStorage::cpu(vec![val]), vec![], requires_grad).unwrap()
    }

    #[test]
    fn test_backward_simple_add() {
        // c = a + b, backward from c.
        // dc/da = 1, dc/db = 1.
        let a = leaf_scalar(2.0, true);
        let b = leaf_scalar(3.0, true);

        let sum_val = a.data().unwrap()[0] + b.data().unwrap()[0];
        let c = Tensor::from_operation(
            TensorStorage::cpu(vec![sum_val]),
            vec![],
            Arc::new(AddBackward {
                a: a.clone(),
                b: b.clone(),
            }),
        )
        .unwrap();

        c.backward().unwrap();

        let a_grad = a.grad().unwrap().unwrap();
        let b_grad = b.grad().unwrap().unwrap();
        assert!((a_grad.item().unwrap() - 1.0).abs() < 1e-6);
        assert!((b_grad.item().unwrap() - 1.0).abs() < 1e-6);
    }

    #[test]
    fn test_backward_mul() {
        // c = a * b, backward from c.
        // dc/da = b = 3.0, dc/db = a = 2.0.
        let a = leaf_scalar(2.0, true);
        let b = leaf_scalar(3.0, true);

        let prod_val = a.data().unwrap()[0] * b.data().unwrap()[0];
        let c = Tensor::from_operation(
            TensorStorage::cpu(vec![prod_val]),
            vec![],
            Arc::new(MulBackward {
                a: a.clone(),
                b: b.clone(),
            }),
        )
        .unwrap();

        c.backward().unwrap();

        let a_grad = a.grad().unwrap().unwrap();
        let b_grad = b.grad().unwrap().unwrap();
        assert!((a_grad.item().unwrap() - 3.0).abs() < 1e-6);
        assert!((b_grad.item().unwrap() - 2.0).abs() < 1e-6);
    }

    #[test]
    fn test_backward_shared_input() {
        // c = a + a, backward from c.
        // dc/da = 1 + 1 = 2.
        let a = leaf_scalar(5.0, true);

        let sum_val = a.data().unwrap()[0] + a.data().unwrap()[0];
        let c = Tensor::from_operation(
            TensorStorage::cpu(vec![sum_val]),
            vec![],
            Arc::new(AddBackward {
                a: a.clone(),
                b: a.clone(),
            }),
        )
        .unwrap();

        c.backward().unwrap();

        let a_grad = a.grad().unwrap().unwrap();
        assert!((a_grad.item().unwrap() - 2.0).abs() < 1e-6);
    }

    #[test]
    fn test_backward_chain() {
        // d = (a * b) + b
        // dd/da = b = 3.0
        // dd/db = a + 1 = 2.0 + 1.0 = 3.0
        let a = leaf_scalar(2.0, true);
        let b = leaf_scalar(3.0, true);

        // c = a * b
        let c_val = 2.0 * 3.0;
        let c = Tensor::from_operation(
            TensorStorage::cpu(vec![c_val]),
            vec![],
            Arc::new(MulBackward {
                a: a.clone(),
                b: b.clone(),
            }),
        )
        .unwrap();

        // d = c + b
        let d_val = c_val + 3.0;
        let d = Tensor::from_operation(
            TensorStorage::cpu(vec![d_val]),
            vec![],
            Arc::new(AddBackward {
                a: c.clone(),
                b: b.clone(),
            }),
        )
        .unwrap();

        d.backward().unwrap();

        let a_grad = a.grad().unwrap().unwrap();
        let b_grad = b.grad().unwrap().unwrap();
        assert!(
            (a_grad.item().unwrap() - 3.0).abs() < 1e-6,
            "expected dd/da = 3.0, got {}",
            a_grad.item().unwrap()
        );
        assert!(
            (b_grad.item().unwrap() - 3.0).abs() < 1e-6,
            "expected dd/db = 3.0, got {}",
            b_grad.item().unwrap()
        );
    }

    #[test]
    fn test_backward_non_scalar_error() {
        let t =
            Tensor::<f32>::from_storage(TensorStorage::cpu(vec![1.0, 2.0, 3.0]), vec![3], false)
                .unwrap();
        assert!(t.backward().is_err());
    }
}