agpm-cli 0.4.8

AGent Package Manager - A Git-based package manager for coding agents
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
//! Git repository cache management with worktree-based parallel operations
//!
//! This module provides a sophisticated caching system for Git repositories that enables
//! safe parallel resource installation through Git worktrees. The cache system has been
//! redesigned for optimal concurrency, simplified architecture, and enhanced performance
//! in AGPM v0.3.0.
//!
//! # Architecture Overview
//!
//! The cache system implements a multi-layered architecture:
//! - [`Cache`] struct: Core repository management and worktree orchestration
//! - [`CacheLock`]: File-based locking for process-safe concurrent access
//! - `WorktreeState`: Instance-level caching for worktree lifecycle management
//! - Bare repositories: Optimized Git storage for efficient worktree creation
//!
//! # Platform-Specific Cache Locations
//!
//! The cache follows platform conventions for optimal performance:
//! - **Linux/macOS**: `~/.agpm/cache/` (following XDG standards)
//! - **Windows**: `%LOCALAPPDATA%\agpm\cache\` (using Windows cache directory)
//! - **Environment Override**: Set `AGPM_CACHE_DIR` for custom locations
//!
//! # Cache Directory Structure
//!
//! The cache is organized for optimal parallel access patterns:
//! ```text
//! ~/.agpm/cache/
//! ├── sources/                    # Bare repositories optimized for worktrees
//! │   ├── github_owner_repo.git/  # Bare repo with all Git objects
//! │   └── gitlab_org_project.git/ # URL-parsed directory naming
//! ├── worktrees/                  # SHA-based worktrees for maximum deduplication
//! │   ├── github_owner_repo_abc12345/ # First 8 chars of commit SHA
//! │   ├── github_owner_repo_def67890/ # Each unique commit gets one worktree
//! │   ├── .state.json             # Persistent worktree registry
//! │   └── github_owner_repo_456789ab/ # Multiple refs to same SHA share worktree
//! └── .locks/                     # Fine-grained locking infrastructure
//!     ├── github_owner_repo.lock      # Repository-level locks
//!     └── worktree-owner_repo-v1.lock # Worktree creation locks
//! ```
//!
//! # Enhanced Concurrency Architecture
//!
//! The v0.3.2+ cache implements SHA-based worktree optimization with advanced concurrency:
//! - **SHA-based deduplication**: Worktrees keyed by commit SHA, not version reference
//! - **Centralized resolution**: `VersionResolver` handles batch SHA resolution upfront
//! - **Maximum reuse**: Multiple tags/branches pointing to same commit share one worktree
//! - **Instance-level caching**: `WorktreeState` tracks creation across threads
//! - **Per-worktree file locking**: Fine-grained locks prevent creation conflicts
//! - **Direct parallelism control**: `--max-parallel` flag controls concurrency
//! - **Command-instance fetch caching**: Single fetch per repository per command
//! - **Atomic state transitions**: Pending → Ready state coordination
//!
//! ## Locking Strategy
//!
//! ```text
//! Process A: acquire("source1") ───┐
//!                                   ├─── BLOCKS: same source
//! Process B: acquire("source1") ───┘
//!
//! Process C: acquire("source2") ───── CONCURRENT: different source
//! ```
//!
//! # Cache Operations
//!
//! ## Repository Management
//! - **Clone**: Initial repository cloning from remote URLs
//! - **Update**: Fetch latest changes from remote (git fetch)
//! - **Checkout**: Switch to specific versions (tags, branches, commits)
//! - **Cleanup**: Remove unused repositories to reclaim disk space
//!
//! ## Resource Installation
//! - **Copy-based**: Files copied from cache to project directories
//! - **Path resolution**: Handles relative paths within repositories
//! - **Directory creation**: Automatically creates parent directories
//! - **Overwrite safety**: Replaces existing files atomically
//!
//! # Performance Characteristics
//!
//! The cache is optimized for common AGPM workflows:
//! - **First install**: Clone repository once, reuse for all resources
//! - **Subsequent installs**: Copy from local cache (fast file operations)
//! - **Version switching**: Git checkout within cached repository
//! - **Parallel operations**: Multiple sources can be processed concurrently
//!
//! ## Disk Space Management
//!
//! - **Size calculation**: Recursive directory size calculation
//! - **Unused cleanup**: Remove repositories no longer referenced
//! - **Complete cleanup**: Clear entire cache when needed
//! - **Selective removal**: Keep active sources, remove only unused ones
//!
//! # Error Handling and Recovery
//!
//! The cache provides comprehensive error handling:
//! - **Lock timeouts**: Graceful handling of concurrent access
//! - **Clone failures**: Network and authentication error reporting
//! - **Version errors**: Clear messages for invalid tags/branches/commits
//! - **File system errors**: Detailed context for permission and space issues
//!
//! # Security Considerations
//!
//! - **Path validation**: Prevents directory traversal attacks
//! - **Lock file isolation**: Prevents lock file manipulation
//! - **Safe file operations**: Atomic operations prevent corruption
//! - **Permission handling**: Respects file system permissions
//!
//! # Usage Examples
//!
//! ## Basic Cache Operations
//!
//! ```rust,no_run
//! use agpm_cli::cache::Cache;
//! use std::path::PathBuf;
//!
//! # async fn example() -> anyhow::Result<()> {
//! // Initialize cache with default location
//! let cache = Cache::new()?;
//!
//! // Get or clone a source repository
//! let repo_path = cache.get_or_clone_source(
//!     "community",
//!     "https://github.com/example/agpm-community.git",
//!     Some("v1.0.0")  // Specific version
//! ).await?;
//!
//! // Copy a resource from cache to project
//! cache.copy_resource(
//!     &repo_path,
//!     "agents/helper.md",  // Source path in repository
//!     &PathBuf::from("./agents/helper.md")  // Destination in project
//! ).await?;
//! # Ok(())
//! # }
//! ```
//!
//! ## Cache Maintenance
//!
//! ```rust,no_run
//! use agpm_cli::cache::Cache;
//!
//! # #[tokio::main]
//! # async fn main() -> anyhow::Result<()> {
//! let cache = Cache::new()?;
//!
//! // Check cache size
//! let size_bytes = cache.get_cache_size().await?;
//! println!("Cache size: {} MB", size_bytes / 1024 / 1024);
//!
//! // Clean unused repositories
//! let active_sources = vec!["community".to_string(), "work".to_string()];
//! let removed_count = cache.clean_unused(&active_sources).await?;
//! println!("Removed {} unused repositories", removed_count);
//!
//! // Complete cache cleanup
//! cache.clear_all().await?;
//! # Ok(())
//! # }
//! ```
//!
//! ## Custom Cache Location
//!
//! ```rust,no_run
//! use agpm_cli::cache::Cache;
//! use std::path::PathBuf;
//!
//! # fn custom_location() -> anyhow::Result<()> {
//! // Use custom cache directory (useful for testing or special setups)
//! let custom_dir = PathBuf::from("/tmp/my-agpm-cache");
//! let cache = Cache::with_dir(custom_dir)?;
//!
//! println!("Using cache at: {}", cache.get_cache_location().display());
//! # Ok(())
//! # }
//! ```
//!
//! # Integration with AGPM Workflow
//!
//! The cache module integrates seamlessly with AGPM's dependency management:
//! 1. **Manifest parsing**: Source URLs extracted from `agpm.toml`
//! 2. **Dependency resolution**: Version constraints resolved to specific commits
//! 3. **Cache population**: Repositories cloned and checked out as needed
//! 4. **Resource installation**: Files copied from cache to project directories
//! 5. **Lockfile generation**: Installed resources tracked in `agpm.lock`
//!
//! See [`crate::manifest`] for manifest handling and [`crate::lockfile`] for
//! lockfile management.

use crate::core::error::AgpmError;
use crate::git::GitRepo;
use crate::git::command_builder::GitCommand;
use crate::utils::fs;
use crate::utils::security::validate_path_security;
use anyhow::{Context, Result};
use dashmap::DashMap;
use serde::{Deserialize, Serialize};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use tokio::fs as async_fs;
use tokio::sync::{Mutex, RwLock};

// Concurrency Architecture:
// - Direct control approach: Command parallelism (--max-parallel) + per-worktree file locking
// - Instance-level caching: Worktrees and fetch operations cached per Cache instance
// - Command-level control: --max-parallel flag controls dependency processing parallelism
// - Fetch caching: Network operations cached for 5 minutes to reduce redundancy

/// State of a worktree in the instance-level cache for concurrent coordination.
///
/// This enum implements a sophisticated state machine for worktree lifecycle management
/// that enables safe concurrent access across multiple threads without race conditions.
/// The cache uses this state to coordinate between threads that might request the same
/// worktree simultaneously, eliminating the need for global synchronization bottlenecks.
///
/// # State Transitions
///
/// - **Initial**: No entry exists in cache (implicit state)
/// - [`Pending`](WorktreeState::Pending): One thread is creating the worktree
/// - [`Ready`](WorktreeState::Ready): Worktree exists and is ready for all threads
///
/// # Concurrency Coordination Pattern
///
/// The worktree creation process follows this coordinated pattern:
/// 1. **Reservation**: First thread reserves slot by setting state to `Pending`
/// 2. **Creation**: Reserved thread performs actual worktree creation with file lock
/// 3. **Notification**: Creator updates state to `Ready(path)` when complete
/// 4. **Reuse**: Subsequent threads immediately use the ready worktree path
/// 5. **Validation**: All threads verify worktree still exists before use
///
/// # Cache Key Format
///
/// Worktrees are uniquely identified by composite keys:
/// ```text
/// "{cache_dir_hash}:{owner}_{repo}:{version}"
/// ```
///
/// Components:
/// - `cache_dir_hash`: First 8 hex chars of cache directory path hash
/// - `owner_repo`: Parsed from Git URL (e.g., "`github_owner_project`")
/// - `version`: Git reference (tag, branch, commit, or "HEAD")
///
/// This format ensures isolation between:
/// - Different cache instances (via hash)
/// - Different repositories (via owner/repo)
/// - Different versions (via version string)
///
/// # Memory Management
///
/// The instance-level cache persists for the lifetime of the `Cache` instance,
/// but worktrees are validated on each access to handle external deletion.
#[derive(Debug, Clone)]
enum WorktreeState {
    /// Another thread is currently creating this worktree.
    ///
    /// When threads encounter this state, they should wait briefly and retry
    /// rather than attempting concurrent worktree creation which would fail.
    Pending,

    /// Worktree is fully created and ready to use.
    ///
    /// The `PathBuf` contains the filesystem path to the working directory.
    /// This path should be validated before use as the worktree may have been
    /// externally deleted.
    Ready(PathBuf),
}

#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct WorktreeRegistry {
    entries: HashMap<String, WorktreeRecord>,
}

#[derive(Debug, Clone, Serialize, Deserialize)]
struct WorktreeRecord {
    source: String,
    version: String,
    path: PathBuf,
    last_used: u64,
}

impl WorktreeRegistry {
    fn load(path: &Path) -> Self {
        match std::fs::read(path) {
            Ok(data) => serde_json::from_slice(&data).unwrap_or_default(),
            Err(err) if err.kind() == std::io::ErrorKind::NotFound => Self::default(),
            Err(err) => {
                tracing::warn!("Failed to load worktree registry from {}: {}", path.display(), err);
                Self::default()
            }
        }
    }

    fn update(&mut self, key: String, source: String, version: String, path: PathBuf) {
        let timestamp = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap_or_else(|_| Duration::from_secs(0))
            .as_secs();

        self.entries.insert(
            key,
            WorktreeRecord {
                source,
                version,
                path,
                last_used: timestamp,
            },
        );
    }

    fn remove_by_path(&mut self, target: &Path) -> bool {
        if let Some(key) = self.entries.iter().find_map(|(k, record)| {
            if record.path == target {
                Some(k.clone())
            } else {
                None
            }
        }) {
            self.entries.remove(&key);
            true
        } else {
            false
        }
    }

    async fn persist(&self, path: &Path) -> Result<()> {
        if let Some(parent) = path.parent() {
            async_fs::create_dir_all(parent).await?;
        }

        let data = serde_json::to_vec_pretty(self)?;
        async_fs::write(path, data).await?;
        Ok(())
    }
}

/// File-based locking mechanism for cache operations
///
/// This module provides thread-safe and process-safe locking for cache
/// operations through OS-level file locks, ensuring data consistency
/// when multiple AGPM processes access the same cache directory.
pub mod lock;
pub use lock::CacheLock;

/// Git repository cache for efficient resource management
///
/// The `Cache` struct provides the primary interface for managing Git repository
/// caching in AGPM. It handles repository cloning, updating, version management,
/// and resource file copying operations.
///
/// # Thread Safety
///
/// While the `Cache` struct itself is not thread-safe (not `Send + Sync`),
/// multiple instances can safely operate on the same cache directory through
/// the file-based locking mechanism provided by [`CacheLock`].
///
/// # Platform Compatibility
///
/// The cache automatically handles platform-specific differences:
/// - **Path separators**: Uses [`std::path`] for cross-platform compatibility
/// - **Cache location**: Follows platform conventions for app data storage
/// - **File locking**: Uses [`fs4`] crate for cross-platform file locking
/// - **Directory creation**: Handles permissions and long paths on Windows
///
/// # Examples
///
/// Create a cache with default platform-specific location:
///
/// ```rust,no_run
/// use agpm_cli::cache::Cache;
///
/// # fn example() -> anyhow::Result<()> {
/// let cache = Cache::new()?;
/// println!("Cache location: {}", cache.get_cache_location().display());
/// # Ok(())
/// # }
/// ```
///
/// Create a cache with custom location (useful for testing):
///
/// ```rust,no_run
/// use agpm_cli::cache::Cache;
/// use std::path::PathBuf;
///
/// # fn example() -> anyhow::Result<()> {
/// let custom_dir = PathBuf::from("/tmp/test-cache");
/// let cache = Cache::with_dir(custom_dir)?;
/// # Ok(())
/// # }
/// ```
pub struct Cache {
    /// The root directory where all cached repositories are stored
    cache_dir: PathBuf,

    /// Instance-level cache for worktrees to avoid redundant checkouts.
    ///
    /// This cache maps worktree identifiers to their creation state, enabling
    /// safe concurrent access. Multiple threads can request the same worktree
    /// without conflicts - the first thread creates it while others wait.
    ///
    /// **Key format**: `"{cache_dir_hash}:{owner}_{repo}:{version}"`
    ///
    /// The cache directory hash ensures isolation between different Cache instances,
    /// preventing conflicts when multiple instances operate on different cache roots.
    worktree_cache: Arc<RwLock<HashMap<String, WorktreeState>>>,

    /// Per-repository async locks that serialize fetch operations across
    /// concurrent tasks. This prevents redundant `git fetch` runs when
    /// multiple dependencies target the same repository simultaneously.
    fetch_locks: Arc<DashMap<PathBuf, Arc<Mutex<()>>>>,

    /// Command-instance fetch cache to track which repositories have been fetched
    /// during this command execution. This ensures we only fetch once per repository
    /// per command instance, dramatically reducing network operations for multi-dependency
    /// installations.
    ///
    /// Contains bare repository paths that have been fetched in this command instance.
    /// Works in conjunction with `VersionResolver` to minimize Git network operations.
    fetched_repos: Arc<RwLock<HashSet<PathBuf>>>,

    /// Persistent registry of worktrees stored on disk for reuse across
    /// AGPM runs. Tracks last-used timestamps and paths so we can validate
    /// and clean up cached worktrees without recreating them unnecessarily.
    worktree_registry: Arc<Mutex<WorktreeRegistry>>,
}

impl Clone for Cache {
    fn clone(&self) -> Self {
        Self {
            cache_dir: self.cache_dir.clone(),
            worktree_cache: Arc::clone(&self.worktree_cache),
            fetch_locks: Arc::clone(&self.fetch_locks),
            fetched_repos: Arc::clone(&self.fetched_repos),
            worktree_registry: Arc::clone(&self.worktree_registry),
        }
    }
}

impl Cache {
    fn registry_path_for(cache_dir: &Path) -> PathBuf {
        cache_dir.join("worktrees").join(".state.json")
    }

    fn registry_path(&self) -> PathBuf {
        Self::registry_path_for(&self.cache_dir)
    }

    /// Verify that a worktree directory is fully accessible with actual content.
    ///
    /// This function ensures that a newly created worktree is fully accessible
    /// before it's marked as ready. This prevents race conditions in parallel
    /// operations where `git worktree add` returns but the filesystem hasn't
    /// finished writing all files yet.
    ///
    /// # Implementation
    ///
    /// Uses tokio-retry with exponential backoff to handle filesystem sync delays.
    ///
    /// Verification uses `git diff-index --quiet HEAD` which provides a comprehensive
    /// check that:
    /// - The worktree directory and .git marker exist
    /// - The git index is readable
    /// - ALL files from the commit are present and match HEAD
    /// - Git recognizes the worktree as valid
    ///
    /// This single command provides stronger guarantees than multi-level checks,
    /// as it verifies complete checkout rather than partial availability.
    ///
    /// # Parameters
    ///
    /// * `worktree_path` - Path to the worktree directory to verify
    /// * `sha` - The commit SHA being checked out (for logging)
    ///
    /// # Errors
    ///
    /// Returns an error if the worktree is not accessible after all retries.
    async fn verify_worktree_accessible(worktree_path: &Path, sha: &str) -> Result<()> {
        use tokio_retry::Retry;
        use tokio_retry::strategy::{ExponentialBackoff, jitter};

        // Retry strategy with jitter for concurrent operations
        let retry_strategy = ExponentialBackoff::from_millis(50)
            .max_delay(std::time::Duration::from_secs(2))
            .take(10)
            .map(jitter);

        let worktree_path = worktree_path.to_path_buf();
        let sha_short = &sha[..8];

        tracing::debug!(
            target: "git::worktree",
            "Verifying worktree at {} for SHA {}",
            worktree_path.display(),
            sha_short
        );

        Retry::spawn(retry_strategy, || async {
            // Verify working tree matches HEAD (all files checked out)
            // This verifies the worktree structure is valid and all files are present.
            // Cache coherency (making files visible to the parent process) is now
            // handled at the point of actual file read in installer.rs and resolver/mod.rs
            // via read_with_cache_retry functions.
            crate::git::command_builder::GitCommand::new()
                .args(["diff-index", "--quiet", "HEAD"])
                .current_dir(&worktree_path)
                .execute_success()
                .await
                .map_err(|_| "Working tree doesn't match HEAD (checkout incomplete)".to_string())?;

            tracing::debug!(
                target: "git::worktree",
                "Worktree verification passed for {}",
                worktree_path.display()
            );

            Ok::<(), String>(())
        })
        .await
        .map_err(|e| {
            anyhow::anyhow!(
                "Worktree not fully initialized after retries: {} @ {} - {}",
                worktree_path.display(),
                sha_short,
                e
            )
        })
    }

    async fn record_worktree_usage(
        &self,
        registry_key: &str,
        source_name: &str,
        version_key: &str,
        worktree_path: &Path,
    ) -> Result<()> {
        let mut registry = self.worktree_registry.lock().await;
        registry.update(
            registry_key.to_string(),
            source_name.to_string(),
            version_key.to_string(),
            worktree_path.to_path_buf(),
        );
        registry.persist(&self.registry_path()).await?;
        Ok(())
    }

    async fn remove_worktree_record_by_path(&self, worktree_path: &Path) -> Result<()> {
        let mut registry = self.worktree_registry.lock().await;
        if registry.remove_by_path(worktree_path) {
            registry.persist(&self.registry_path()).await?;
        }
        Ok(())
    }

    async fn configure_connection_pooling(path: &Path) -> Result<()> {
        let commands = [
            ("http.version", "HTTP/2"),
            ("http.postBuffer", "524288000"),
            ("core.compression", "0"),
        ];

        for (key, value) in commands {
            GitCommand::new()
                .args(["config", key, value])
                .current_dir(path)
                .execute_success()
                .await
                .ok();
        }

        Ok(())
    }

    /// Creates a new `Cache` instance using the default platform-specific cache directory.
    ///
    /// The cache directory is determined based on the current platform:
    /// - **Linux/macOS**: `~/.agpm/cache/`
    /// - **Windows**: `%LOCALAPPDATA%\agpm\cache\`
    ///
    /// # Environment Variable Override
    ///
    /// The cache location can be overridden by setting the `AGPM_CACHE_DIR`
    /// environment variable. This is particularly useful for:
    /// - Testing with isolated cache directories
    /// - CI/CD environments with specific cache locations
    /// - Custom deployment scenarios
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - Unable to determine the home/local data directory
    /// - The resolved path is invalid or inaccessible
    ///
    /// # Examples
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// println!("Using cache at: {}", cache.get_cache_location().display());
    /// # Ok(())
    /// # }
    /// ```
    pub fn new() -> Result<Self> {
        let cache_dir = crate::config::get_cache_dir()?;
        let registry_path = Self::registry_path_for(&cache_dir);
        let registry = WorktreeRegistry::load(&registry_path);
        Ok(Self {
            cache_dir,
            worktree_cache: Arc::new(RwLock::new(HashMap::new())),
            fetch_locks: Arc::new(DashMap::new()),
            fetched_repos: Arc::new(RwLock::new(HashSet::new())),
            worktree_registry: Arc::new(Mutex::new(registry)),
        })
    }

    /// Creates a new `Cache` instance using a custom cache directory.
    ///
    /// This constructor allows you to specify exactly where the cache should be
    /// stored, overriding platform defaults. The directory will be created if
    /// it doesn't exist when cache operations are performed.
    ///
    /// # Use Cases
    ///
    /// - **Testing**: Use temporary directories for isolated test environments
    /// - **Development**: Use project-local cache directories
    /// - **Deployment**: Use specific paths in containerized environments
    /// - **Multi-user systems**: Use user-specific cache locations
    ///
    /// # Parameters
    ///
    /// * `cache_dir` - The absolute path where cache data should be stored
    ///
    /// # Examples
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    /// use std::path::PathBuf;
    ///
    /// # fn example() -> anyhow::Result<()> {
    /// // Use a project-local cache
    /// let project_cache = Cache::with_dir(PathBuf::from("./cache"))?;
    ///
    /// // Use a system-wide cache
    /// let system_cache = Cache::with_dir(PathBuf::from("/var/cache/agpm"))?;
    ///
    /// // Use a temporary cache for testing
    /// let temp_cache = Cache::with_dir(std::env::temp_dir().join("agpm-test"))?;
    /// # Ok(())
    /// # }
    /// ```
    pub fn with_dir(cache_dir: PathBuf) -> Result<Self> {
        let registry_path = Self::registry_path_for(&cache_dir);
        let registry = WorktreeRegistry::load(&registry_path);
        Ok(Self {
            cache_dir,
            worktree_cache: Arc::new(RwLock::new(HashMap::new())),
            fetch_locks: Arc::new(DashMap::new()),
            fetched_repos: Arc::new(RwLock::new(HashSet::new())),
            worktree_registry: Arc::new(Mutex::new(registry)),
        })
    }

    /// Ensures the cache directory exists, creating it if necessary.
    ///
    /// This method creates the cache directory and all necessary parent directories
    /// if they don't already exist. It's safe to call multiple times - it will
    /// not error if the directory already exists.
    ///
    /// # Platform Considerations
    ///
    /// - **Windows**: Handles long path names (>260 characters) correctly
    /// - **Unix**: Respects umask settings for directory permissions
    /// - **All platforms**: Creates intermediate directories as needed
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - Insufficient permissions to create the directory
    /// - Disk space is exhausted
    /// - Path contains invalid characters for the platform
    /// - A file exists at the target path (not a directory)
    ///
    /// # Examples
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// // Ensure cache directory exists before operations
    /// cache.ensure_cache_dir().await?;
    ///
    /// // Safe to call multiple times
    /// cache.ensure_cache_dir().await?; // No error
    /// # Ok(())
    /// # }
    /// ```
    pub async fn ensure_cache_dir(&self) -> Result<()> {
        if !self.cache_dir.exists() {
            async_fs::create_dir_all(&self.cache_dir).await.with_context(|| {
                format!("Failed to create cache directory at {}", self.cache_dir.display())
            })?;
        }
        Ok(())
    }

    /// Returns the path to the cache directory.
    ///
    /// This is useful for operations that need direct access to the cache directory,
    /// such as lock file cleanup or cache size calculations.
    ///
    /// # Example
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// let cache_dir = cache.cache_dir();
    /// println!("Cache directory: {}", cache_dir.display());
    /// # Ok(())
    /// # }
    /// ```
    pub fn cache_dir(&self) -> &Path {
        &self.cache_dir
    }

    /// Get the worktree path for a specific URL and commit SHA.
    ///
    /// This method constructs the expected worktree directory path based on the cache's
    /// naming scheme. It does NOT check if the worktree exists or create it - use
    /// `get_or_create_worktree_for_sha` for that.
    ///
    /// # Arguments
    ///
    /// * `url` - Git repository URL
    /// * `sha` - Full commit SHA (will be shortened to first 8 characters)
    ///
    /// # Returns
    ///
    /// Path to the worktree directory (may not exist yet)
    ///
    /// # Example
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// let path = cache.get_worktree_path(
    ///     "https://github.com/owner/repo.git",
    ///     "abc1234567890def"
    /// )?;
    /// println!("Worktree path: {}", path.display());
    /// # Ok(())
    /// # }
    /// ```
    pub fn get_worktree_path(&self, url: &str, sha: &str) -> Result<PathBuf> {
        let (owner, repo) = crate::git::parse_git_url(url)
            .map_err(|e| anyhow::anyhow!("Invalid Git URL: {}", e))?;
        let sha_short = &sha[..8.min(sha.len())];
        Ok(self.cache_dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}")))
    }

    /// Gets or clones a source repository, ensuring it's available in the cache.
    ///
    /// This is the primary method for source repository management. It handles both
    /// initial cloning of new repositories and updating existing cached repositories.
    /// The operation is atomic and thread-safe through file-based locking.
    ///
    /// # Operation Flow
    ///
    /// 1. **Lock acquisition**: Acquires exclusive lock for the source name
    /// 2. **Directory check**: Determines if repository already exists in cache
    /// 3. **Clone or update**: Either clones new repository or fetches updates
    /// 4. **Version checkout**: Switches to requested version if specified
    /// 5. **Path return**: Returns path to cached repository
    ///
    /// # Concurrency Behavior
    ///
    /// - **Same source**: Concurrent calls with the same `name` will block
    /// - **Different sources**: Concurrent calls with different `name` run in parallel
    /// - **Process safety**: Safe across multiple AGPM processes
    ///
    /// # Version Handling
    ///
    /// The `version` parameter accepts various Git reference types:
    /// - **Tags**: `"v1.0.0"`, `"release-2023"` (most common for releases)
    /// - **Branches**: `"main"`, `"develop"`, `"feature/new-agents"`
    /// - **Commits**: `"abc123def"` (full or short SHA hashes)
    /// - **None**: Uses repository's default branch (typically `main` or `master`)
    ///
    /// # Parameters
    ///
    /// * `name` - Unique source identifier (used for cache directory and locking)
    /// * `url` - Git repository URL (HTTPS, SSH, or local paths)
    /// * `version` - Optional version constraint (tag, branch, or commit)
    ///
    /// # Returns
    ///
    /// Returns the [`PathBuf`] to the cached repository directory, which contains
    /// the full Git repository structure and can be used for resource file access.
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - **Network issues**: Unable to clone or fetch from remote repository
    /// - **Authentication**: Invalid credentials for private repositories
    /// - **Version issues**: Specified version doesn't exist in repository
    /// - **Lock timeout**: Unable to acquire exclusive lock (rare)
    /// - **File system**: Permission or disk space issues
    /// - **Git errors**: Repository corruption or invalid Git operations
    ///
    /// # Performance Notes
    ///
    /// - **First call**: Performs full repository clone (slower)
    /// - **Subsequent calls**: Only fetches updates (faster)
    /// - **Version switching**: Uses Git checkout (very fast)
    /// - **Parallel sources**: Multiple sources processed concurrently
    ///
    /// # Examples
    ///
    /// Clone a public repository with specific version:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// let repo_path = cache.get_or_clone_source(
    ///     "community",
    ///     "https://github.com/example/agpm-community.git",
    ///     Some("v1.2.0")
    /// ).await?;
    ///
    /// println!("Repository cached at: {}", repo_path.display());
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Use latest version from default branch:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// let repo_path = cache.get_or_clone_source(
    ///     "dev-tools",
    ///     "https://github.com/myorg/dev-tools.git",
    ///     None  // Use default branch
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Work with development branch:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// let repo_path = cache.get_or_clone_source(
    ///     "experimental",
    ///     "https://github.com/myorg/experimental.git",
    ///     Some("develop")
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub async fn get_or_clone_source(
        &self,
        name: &str,
        url: &str,
        version: Option<&str>,
    ) -> Result<PathBuf> {
        self.get_or_clone_source_impl(name, url, version).await
    }

    /// Clean up a worktree after use (fast version).
    ///
    /// This just removes the worktree directory without calling git.
    /// Git will clean up its internal references when `git worktree prune` is called.
    ///
    /// # Parameters
    ///
    /// * `worktree_path` - The path to the worktree to clean up
    pub async fn cleanup_worktree(&self, worktree_path: &Path) -> Result<()> {
        // Just remove the directory - don't call git worktree remove
        // This is much faster and git will clean up its references later
        if worktree_path.exists() {
            tokio::fs::remove_dir_all(worktree_path).await.with_context(|| {
                format!("Failed to remove worktree directory: {worktree_path:?}")
            })?;
            self.remove_worktree_record_by_path(worktree_path).await?;
        }
        Ok(())
    }

    /// Clean up all worktrees in the cache.
    ///
    /// This is useful for cleaning up after batch operations or on cache clear.
    pub async fn cleanup_all_worktrees(&self) -> Result<()> {
        let worktrees_dir = self.cache_dir.join("worktrees");

        if !worktrees_dir.exists() {
            return Ok(());
        }

        // Remove the entire worktrees directory
        tokio::fs::remove_dir_all(&worktrees_dir)
            .await
            .with_context(|| "Failed to clean up worktrees")?;

        // Also prune worktree references from all bare repos
        let sources_dir = self.cache_dir.join("sources");
        if sources_dir.exists() {
            let mut entries = tokio::fs::read_dir(&sources_dir).await?;
            while let Some(entry) = entries.next_entry().await? {
                let path = entry.path();
                if path.extension().and_then(|s| s.to_str()) == Some("git") {
                    let bare_repo = GitRepo::new(&path);
                    bare_repo.prune_worktrees().await.ok();
                }
            }
        }

        {
            let mut registry = self.worktree_registry.lock().await;
            if !registry.entries.is_empty() {
                registry.entries.clear();
                registry.persist(&self.registry_path()).await?;
            }
        }

        Ok(())
    }

    /// Get or create a worktree for a specific commit SHA.
    ///
    /// This method is the cornerstone of AGPM's optimized dependency resolution.
    /// By using commit SHAs as the primary key for worktrees, we ensure:
    /// - Maximum worktree reuse (same SHA = same worktree)
    /// - Deterministic installations (SHA uniquely identifies content)
    /// - Reduced disk usage (no duplicate worktrees for same commit)
    ///
    /// # SHA-Based Caching Strategy
    ///
    /// Unlike version-based worktrees that create separate directories for
    /// "v1.0.0" and "release-1.0" even if they point to the same commit,
    /// SHA-based worktrees ensure a single worktree per unique commit.
    ///
    /// # Parameters
    ///
    /// * `name` - Source name from manifest
    /// * `url` - Git repository URL
    /// * `sha` - Full 40-character commit SHA (must be pre-resolved)
    /// * `context` - Optional context for logging
    ///
    /// # Returns
    ///
    /// Path to the worktree containing the exact commit specified by SHA.
    ///
    /// # Example
    ///
    /// ```no_run
    /// # use agpm_cli::cache::Cache;
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// // First resolve version to SHA
    /// let sha = "abc1234567890def1234567890abcdef12345678";
    ///
    /// // Get worktree for that specific commit
    /// let worktree = cache.get_or_create_worktree_for_sha(
    ///     "community",
    ///     "https://github.com/example/repo.git",
    ///     sha,
    ///     Some("my-agent")
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub async fn get_or_create_worktree_for_sha(
        &self,
        name: &str,
        url: &str,
        sha: &str,
        context: Option<&str>,
    ) -> Result<PathBuf> {
        // Validate SHA format
        if sha.len() != 40 || !sha.chars().all(|c| c.is_ascii_hexdigit()) {
            return Err(anyhow::anyhow!(
                "Invalid SHA format: expected 40 hex characters, got '{sha}'"
            ));
        }

        // Check if this is a local path
        let is_local_path = crate::utils::is_local_path(url);
        if is_local_path {
            // Local paths don't use worktrees
            return self.get_or_clone_source(name, url, None).await;
        }

        self.ensure_cache_dir().await?;

        // Parse URL for cache structure
        let (owner, repo) =
            crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));

        // Create SHA-based cache key
        // Using first 8 chars of SHA for directory name (like Git does)
        let sha_short = &sha[..8];
        let cache_dir_hash = {
            use std::collections::hash_map::DefaultHasher;
            use std::hash::{Hash, Hasher};
            let mut hasher = DefaultHasher::new();
            self.cache_dir.hash(&mut hasher);
            format!("{:x}", hasher.finish())[..8].to_string()
        };
        let cache_key = format!("{cache_dir_hash}:{owner}_{repo}:{sha}");

        // Check if we already have a worktree for this SHA
        let mut should_create_worktree = false;
        while !should_create_worktree {
            {
                let cache_read = self.worktree_cache.read().await;
                match cache_read.get(&cache_key) {
                    Some(WorktreeState::Ready(cached_path)) => {
                        if cached_path.exists() {
                            let cached_path = cached_path.clone();
                            drop(cache_read);
                            self.record_worktree_usage(&cache_key, name, sha_short, &cached_path)
                                .await?;

                            if let Some(ctx) = context {
                                tracing::debug!(
                                    target: "git",
                                    "({}) Reusing SHA-based worktree for {} @ {}",
                                    ctx,
                                    url.split('/').next_back().unwrap_or(url),
                                    sha_short
                                );
                            }
                            return Ok(cached_path);
                        }
                        should_create_worktree = true;
                    }
                    Some(WorktreeState::Pending) => {
                        if let Some(ctx) = context {
                            tracing::debug!(
                                target: "git",
                                "({}) Waiting for SHA worktree creation for {} @ {}",
                                ctx,
                                url.split('/').next_back().unwrap_or(url),
                                sha_short
                            );
                        }
                        drop(cache_read);
                        tokio::time::sleep(Duration::from_millis(100)).await;
                    }
                    None => {
                        should_create_worktree = true;
                    }
                }
            }
        }

        // Reserve the cache slot
        let mut reservation_successful = false;
        while !reservation_successful {
            let mut cache_write = self.worktree_cache.write().await;
            match cache_write.get(&cache_key) {
                Some(WorktreeState::Ready(cached_path)) if cached_path.exists() => {
                    return Ok(cached_path.clone());
                }
                Some(WorktreeState::Pending) => {
                    drop(cache_write);
                    tokio::time::sleep(Duration::from_millis(50)).await;
                }
                _ => {
                    cache_write.insert(cache_key.clone(), WorktreeState::Pending);
                    reservation_successful = true;
                }
            }
        }

        // Get bare repository (fetches if needed)
        let bare_repo_dir = self.cache_dir.join("sources").join(format!("{owner}_{repo}.git"));

        if bare_repo_dir.exists() {
            // Fetch to ensure we have the SHA
            self.fetch_with_hybrid_lock(&bare_repo_dir, context).await?;
        } else {
            let lock_name = format!("{owner}_{repo}");
            let _lock = CacheLock::acquire(&self.cache_dir, &lock_name).await?;

            if let Some(parent) = bare_repo_dir.parent() {
                tokio::fs::create_dir_all(parent).await?;
            }

            if !bare_repo_dir.exists() {
                if let Some(ctx) = context {
                    tracing::debug!("📦 ({ctx}) Cloning repository {url}...");
                } else {
                    tracing::debug!("📦 Cloning repository {url} to cache...");
                }

                GitRepo::clone_bare_with_context(url, &bare_repo_dir, context).await?;
                Self::configure_connection_pooling(&bare_repo_dir).await.ok();
            }
        }

        let bare_repo = GitRepo::new(&bare_repo_dir);

        // Create worktree path using SHA
        let worktree_path =
            self.cache_dir.join("worktrees").join(format!("{owner}_{repo}_{sha_short}"));

        // Acquire worktree creation lock
        let worktree_lock_name = format!("worktree-{owner}-{repo}-{sha_short}");
        let _worktree_lock = CacheLock::acquire(&self.cache_dir, &worktree_lock_name).await?;

        // Re-check after lock
        if worktree_path.exists() {
            let mut cache_write = self.worktree_cache.write().await;
            cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
            self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
            return Ok(worktree_path);
        }

        // Prune stale worktrees if needed
        if !worktree_path.exists() {
            let _ = bare_repo.prune_worktrees().await;
        }

        // Create worktree at specific SHA
        if let Some(ctx) = context {
            tracing::debug!(
                target: "git",
                "({}) Creating SHA-based worktree: {} @ {}",
                ctx,
                url.split('/').next_back().unwrap_or(url),
                sha_short
            );
        }

        // Lock bare repo for worktree creation
        // Hold the lock through cache update to prevent git state corruption
        // when multiple worktrees are created concurrently for the same repo
        let bare_repo_lock_name = format!("bare-repo-{owner}_{repo}");
        let _bare_repo_lock = CacheLock::acquire(&self.cache_dir, &bare_repo_lock_name).await?;

        // Create worktree using SHA directly
        let worktree_result =
            bare_repo.create_worktree_with_context(&worktree_path, Some(sha), context).await;

        // Keep lock held until cache is updated to ensure git state is fully settled
        match worktree_result {
            Ok(_) => {
                // Verify worktree is fully accessible before marking as Ready
                // This prevents race conditions where git worktree add returns
                // but filesystem hasn't finished writing all files yet
                Self::verify_worktree_accessible(&worktree_path, sha).await?;

                let mut cache_write = self.worktree_cache.write().await;
                cache_write.insert(cache_key.clone(), WorktreeState::Ready(worktree_path.clone()));
                self.record_worktree_usage(&cache_key, name, sha_short, &worktree_path).await?;
                // Lock automatically dropped here
                Ok(worktree_path)
            }
            Err(e) => {
                let mut cache_write = self.worktree_cache.write().await;
                cache_write.remove(&cache_key);
                // Lock automatically dropped here
                Err(e)
            }
        }
    }

    /// Get or clone a source repository with options to control cache behavior.
    ///
    /// This method provides the core functionality for repository access with
    /// additional control over cache behavior. Creates bare repositories that
    /// can be shared by all operations (resolution, installation, etc).
    ///
    /// # Parameters
    ///
    /// * `name` - The name of the source (used for cache directory naming)
    /// * `url` - The Git repository URL or local path
    /// * `version` - Optional specific version/tag/branch to checkout
    /// * `force_refresh` - If true, ignore cached version and clone/fetch fresh
    ///
    /// # Returns
    ///
    /// Returns the path to the cached bare repository directory
    async fn get_or_clone_source_impl(
        &self,
        name: &str,
        url: &str,
        version: Option<&str>,
    ) -> Result<PathBuf> {
        // Check if this is a local path (not a git repository URL)
        let is_local_path = crate::utils::is_local_path(url);

        if is_local_path {
            // For local paths (directories), validate and return the secure path
            // No cloning or version management needed

            // Resolve path securely with validation
            let resolved_path = crate::utils::platform::resolve_path(url)?;

            // Canonicalize to get the real path and prevent symlink attacks
            let canonical_path = crate::utils::safe_canonicalize(&resolved_path)
                .map_err(|_| anyhow::anyhow!("Local path is not accessible or does not exist"))?;

            // Security check: Validate path against blacklist and symlinks
            validate_path_security(&canonical_path, true)?;

            // For local paths, versions don't apply. Suppress warning for internal sentinel values.
            if let Some(ver) = version
                && ver != "local"
            {
                eprintln!("Warning: Version constraints are ignored for local paths");
            }

            return Ok(canonical_path);
        }

        self.ensure_cache_dir().await?;

        // Acquire lock for this source to prevent concurrent access
        let _lock = CacheLock::acquire(&self.cache_dir, name)
            .await
            .with_context(|| format!("Failed to acquire lock for source: {name}"))?;

        // Use the same cache directory structure as worktrees - bare repos with .git suffix
        // This ensures we have ONE repository that's shared by all operations
        let (owner, repo) =
            crate::git::parse_git_url(url).unwrap_or(("direct".to_string(), "repo".to_string()));
        let source_dir = self.cache_dir.join("sources").join(format!("{owner}_{repo}.git")); // Always use .git suffix for bare repos

        // Ensure parent directory exists
        if let Some(parent) = source_dir.parent() {
            tokio::fs::create_dir_all(parent)
                .await
                .with_context(|| format!("Failed to create cache directory: {parent:?}"))?;
        }

        if source_dir.exists() {
            // Use existing cache - fetch to ensure we have latest refs
            // Skip fetch for local paths as they don't have remotes
            // For Git URLs, always fetch to get the latest refs (especially important for branches)
            if crate::utils::is_git_url(url) {
                // Check if we've already fetched this repo in this command instance
                let already_fetched = {
                    let fetched = self.fetched_repos.read().await;
                    fetched.contains(&source_dir)
                };

                if already_fetched {
                    tracing::debug!(
                        target: "agpm::cache",
                        "Skipping fetch for {} (already fetched in this command)",
                        name
                    );
                } else {
                    tracing::debug!(
                        target: "agpm::cache",
                        "Fetching updates for {} from {}",
                        name,
                        url
                    );
                    let repo = crate::git::GitRepo::new(&source_dir);
                    if let Err(e) = repo.fetch(None).await {
                        tracing::warn!(
                            target: "agpm::cache",
                            "Failed to fetch updates for {}: {}",
                            name,
                            e
                        );
                    } else {
                        // Mark this repo as fetched for this command execution
                        let mut fetched = self.fetched_repos.write().await;
                        fetched.insert(source_dir.clone());
                        tracing::debug!(
                            target: "agpm::cache",
                            "Successfully fetched updates for {}",
                            name
                        );
                    }
                }
            } else {
                tracing::debug!(
                    target: "agpm::cache",
                    "Skipping fetch for local path: {}",
                    url
                );
            }
        } else {
            // Directory doesn't exist - clone fresh as bare repo
            self.clone_source(url, &source_dir).await?;
        }

        Ok(source_dir)
    }

    /// Clones a Git repository to the specified target directory as a bare repository.
    ///
    /// This internal method performs the initial clone operation for repositories
    /// that are not yet present in the cache. It creates a bare repository which
    /// is optimal for serving and allows multiple worktrees to be created from it.
    ///
    /// # Why Bare Repositories
    ///
    /// Bare repositories are used because:
    /// - **No working directory conflicts**: Multiple worktrees can be created safely
    /// - **Optimized for serving**: Like GitHub/GitLab, designed for fetch operations
    /// - **Space efficient**: No checkout of files in the main repository
    /// - **Thread-safe**: Multiple processes can fetch from it simultaneously
    ///
    /// # Authentication
    ///
    /// Repository authentication is handled through:
    /// - **SSH keys**: For `git@github.com:` URLs (user's SSH configuration)
    /// - **HTTPS tokens**: For private repositories (from global config)
    /// - **Public repos**: No authentication required
    ///
    /// # Parameters
    ///
    /// * `url` - Git repository URL to clone from
    /// * `target` - Local directory path where bare repository should be created
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - Repository URL is invalid or unreachable
    /// - Authentication fails for private repositories
    /// - Target directory cannot be created or written to
    /// - Network connectivity issues
    /// - Git command is not available in PATH
    async fn clone_source(&self, url: &str, target: &Path) -> Result<()> {
        tracing::debug!("📦 Cloning {} to cache...", url);

        // Clone as a bare repository for better concurrency and worktree support
        GitRepo::clone_bare(url, target)
            .await
            .with_context(|| format!("Failed to clone repository from {url}"))?;

        // Debug: List what was cloned
        if cfg!(test)
            && let Ok(entries) = std::fs::read_dir(target)
        {
            tracing::debug!(
                target: "agpm::cache",
                "Cloned bare repo to {}, contents:",
                target.display()
            );
            for entry in entries.flatten() {
                tracing::debug!(
                    target: "agpm::cache",
                    "  - {}",
                    entry.path().display()
                );
            }
        }

        Ok(())
    }

    /// Copies a resource file from cached repository to project directory.
    ///
    /// This method performs the core resource installation operation by copying
    /// files from the cached Git repository to the project's local directory.
    /// It provides a simple interface for resource installation without output.
    ///
    /// # Copy Strategy
    ///
    /// The method uses a copy-based approach rather than symlinks for:
    /// - **Cross-platform compatibility**: Works identically on all platforms
    /// - **Git integration**: Real files can be tracked and committed
    /// - **Editor support**: No symlink confusion in IDEs and editors
    /// - **User flexibility**: Local files can be modified if needed
    ///
    /// # Path Resolution
    ///
    /// - **Source path**: Relative to the repository root directory
    /// - **Target path**: Absolute path where file should be installed
    /// - **Directory creation**: Parent directories created automatically
    /// - **Path normalization**: Handles platform-specific path separators
    ///
    /// # Parameters
    ///
    /// * `source_dir` - Path to the cached repository directory
    /// * `source_path` - Relative path to the resource file within the repository
    /// * `target_path` - Absolute path where the resource should be installed
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - Source file doesn't exist in the repository
    /// - Target directory cannot be created (permissions)
    /// - File copy operation fails (disk space, permissions)
    /// - Source path attempts directory traversal (security)
    ///
    /// # Examples
    ///
    /// Copy a single resource file:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    /// use std::path::PathBuf;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// // Get cached repository
    /// let repo_path = cache.get_or_clone_source(
    ///     "community",
    ///     "https://github.com/example/repo.git",
    ///     Some("v1.0.0")
    /// ).await?;
    ///
    /// // Copy resource to project
    /// cache.copy_resource(
    ///     &repo_path,
    ///     "agents/helper.md",  // Source: agents/helper.md in repository
    ///     &PathBuf::from("./my-agents/helper.md")  // Target: project location
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Copy nested resource:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    /// use std::path::PathBuf;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// let repo_path = PathBuf::from("/cache/community");
    ///
    /// cache.copy_resource(
    ///     &repo_path,
    ///     "tools/generators/api-client.md",  // Nested source path
    ///     &PathBuf::from("./tools/api-client.md")  // Flattened target
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub async fn copy_resource(
        &self,
        source_dir: &Path,
        source_path: &str,
        target_path: &Path,
    ) -> Result<()> {
        self.copy_resource_with_output(source_dir, source_path, target_path, false).await
    }

    /// Copies a resource file with optional installation output messages.
    ///
    /// This is the full-featured resource copying method that provides control
    /// over whether installation progress is displayed to the user. It handles
    /// all the details of safe file copying including directory creation,
    /// error handling, and atomic operations.
    ///
    /// # Operation Details
    ///
    /// 1. **Source validation**: Verifies the source file exists in repository
    /// 2. **Directory creation**: Creates target parent directories if needed
    /// 3. **Atomic copy**: Performs file copy operation safely
    /// 4. **Progress output**: Optionally displays installation confirmation
    ///
    /// # File Safety
    ///
    /// - **Overwrite protection**: Will overwrite existing files without warning
    /// - **Atomic operations**: Uses system copy operations for atomicity
    /// - **Permission preservation**: Maintains reasonable file permissions
    /// - **Path validation**: Prevents directory traversal attacks
    ///
    /// # Output Control
    ///
    /// When `show_output` is `true`, displays user-friendly installation messages:
    /// ```text
    /// ✅ Installed ./agents/helper.md
    /// ✅ Installed ./snippets/docker-compose.md
    /// ```
    ///
    /// # Parameters
    ///
    /// * `source_dir` - Path to the cached repository directory
    /// * `source_path` - Relative path to resource file within repository
    /// * `target_path` - Absolute path where resource should be installed
    /// * `show_output` - Whether to display installation progress messages
    ///
    /// # Errors
    ///
    /// Returns specific error types for different failure modes:
    /// - [`AgpmError::ResourceFileNotFound`]: Source file doesn't exist
    /// - File system errors: Permission, disk space, invalid paths
    /// - Directory creation errors: Parent directory creation failures
    ///
    /// # Examples
    ///
    /// Silent installation (for batch operations):
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    /// use std::path::PathBuf;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// let repo_path = PathBuf::from("/cache/community");
    ///
    /// cache.copy_resource_with_output(
    ///     &repo_path,
    ///     "agents/helper.md",
    ///     &PathBuf::from("./agents/helper.md"),
    ///     false  // No output
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Interactive installation (with progress):
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    /// use std::path::PathBuf;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// let repo_path = PathBuf::from("/cache/community");
    ///
    /// cache.copy_resource_with_output(
    ///     &repo_path,
    ///     "snippets/deployment.md",
    ///     &PathBuf::from("./snippets/deployment.md"),
    ///     true  // Show "✅ Installed" message
    /// ).await?;
    /// # Ok(())
    /// # }
    /// ```
    pub async fn copy_resource_with_output(
        &self,
        source_dir: &Path,
        source_path: &str,
        target_path: &Path,
        show_output: bool,
    ) -> Result<()> {
        let source_file = source_dir.join(source_path);

        if !source_file.exists() {
            return Err(AgpmError::ResourceFileNotFound {
                path: source_path.to_string(),
                source_name: source_dir
                    .file_name()
                    .and_then(|n| n.to_str())
                    .unwrap_or("unknown")
                    .to_string(),
            }
            .into());
        }

        if let Some(parent) = target_path.parent() {
            async_fs::create_dir_all(parent)
                .await
                .with_context(|| format!("Failed to create directory: {}", parent.display()))?;
        }

        async_fs::copy(&source_file, target_path).await.with_context(|| {
            format!("Failed to copy {} to {}", source_file.display(), target_path.display())
        })?;

        if show_output {
            println!("  ✅ Installed {}", target_path.display());
        }

        Ok(())
    }

    /// Removes unused cached repositories to reclaim disk space.
    ///
    /// This method performs selective cache cleanup by removing repositories
    /// that are no longer referenced by any active source configurations.
    /// It's a safe operation that preserves repositories currently in use.
    ///
    /// # Cleanup Strategy
    ///
    /// 1. **Directory scanning**: Enumerates all cached repository directories
    /// 2. **Active comparison**: Checks each directory against active sources list
    /// 3. **Safe removal**: Removes only unused directories, preserving files
    /// 4. **Progress reporting**: Displays removal progress for user feedback
    ///
    /// # Safety Guarantees
    ///
    /// - **Active protection**: Never removes repositories listed in active sources
    /// - **Directory-only**: Only removes directories, preserves any loose files
    /// - **Atomic removal**: Each directory is removed completely or not at all
    /// - **Lock awareness**: Respects file locks but doesn't acquire them
    ///
    /// # Performance Considerations
    ///
    /// - **I/O intensive**: Scans entire cache directory structure
    /// - **Disk space recovery**: Can free significant space for large repositories
    /// - **Network savings**: Removed repositories will need re-cloning if used again
    /// - **Concurrent safe**: Can run while other cache operations are in progress
    ///
    /// # Parameters
    ///
    /// * `active_sources` - List of source names that should be preserved in cache
    ///
    /// # Returns
    ///
    /// Returns the number of repository directories that were successfully removed.
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - Cache directory cannot be read (permissions)
    /// - Unable to remove a directory (file locks, permissions)
    /// - File system errors during directory traversal
    ///
    /// # Output Messages
    ///
    /// Displays progress messages for each removed repository:
    /// ```text
    /// 🗑️  Removing unused cache: old-project
    /// 🗑️  Removing unused cache: deprecated-tools
    /// ```
    ///
    /// # Examples
    ///
    /// Clean cache based on current manifest sources:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// // Active sources from current agpm.toml
    /// let active_sources = vec![
    ///     "community".to_string(),
    ///     "work-tools".to_string(),
    ///     "personal".to_string(),
    /// ];
    ///
    /// let removed = cache.clean_unused(&active_sources).await?;
    /// println!("Cleaned {} unused repositories", removed);
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Clean all cached repositories:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// // Empty active list removes everything
    /// let removed = cache.clean_unused(&[]).await?;
    /// println!("Removed all {} cached repositories", removed);
    /// # Ok(())
    /// # }
    /// ```
    pub async fn clean_unused(&self, active_sources: &[String]) -> Result<usize> {
        self.ensure_cache_dir().await?;

        let mut removed_count = 0;
        let mut entries = async_fs::read_dir(&self.cache_dir)
            .await
            .with_context(|| "Failed to read cache directory")?;

        while let Some(entry) =
            entries.next_entry().await.with_context(|| "Failed to read directory entry")?
        {
            let path = entry.path();
            if path.is_dir() {
                let dir_name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");

                if !active_sources.contains(&dir_name.to_string()) {
                    println!("🗑️  Removing unused cache: {dir_name}");
                    async_fs::remove_dir_all(&path).await.with_context(|| {
                        format!("Failed to remove cache directory: {}", path.display())
                    })?;
                    removed_count += 1;
                }
            }
        }

        Ok(removed_count)
    }

    /// Calculates the total size of the cache directory in bytes.
    ///
    /// This method recursively calculates the disk space used by all cached
    /// repositories and supporting files. It's useful for cache size monitoring,
    /// cleanup decisions, and storage management.
    ///
    /// # Calculation Method
    ///
    /// - **Recursive traversal**: Includes all subdirectories and files
    /// - **Actual file sizes**: Reports real disk usage, not allocated blocks
    /// - **All file types**: Includes Git objects, working files, and lock files
    /// - **Cross-platform**: Consistent behavior across different file systems
    ///
    /// # Performance Notes
    ///
    /// - **I/O intensive**: May be slow for very large caches
    /// - **File system dependent**: Performance varies by underlying storage
    /// - **Concurrent safe**: Can run during other cache operations
    /// - **Memory efficient**: Streams directory traversal without loading all paths
    ///
    /// # Returns
    ///
    /// Returns the total size in bytes. For a non-existent cache directory,
    /// returns `0` without error.
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - Permission denied reading cache directory or subdirectories
    /// - File system errors during directory traversal
    /// - Symbolic link cycles (rare, but possible)
    ///
    /// # Examples
    ///
    /// Check current cache size:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// let size_bytes = cache.get_cache_size().await?;
    /// let size_mb = size_bytes / 1024 / 1024;
    ///
    /// println!("Cache size: {} MB ({} bytes)", size_mb, size_bytes);
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Display human-readable sizes:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// let size_bytes = cache.get_cache_size().await?;
    ///
    /// let (size, unit) = match size_bytes {
    ///     s if s < 1024 => (s, "B"),
    ///     s if s < 1024 * 1024 => (s / 1024, "KB"),
    ///     s if s < 1024 * 1024 * 1024 => (s / 1024 / 1024, "MB"),
    ///     s => (s / 1024 / 1024 / 1024, "GB"),
    /// };
    ///
    /// println!("Cache size: {}{}", size, unit);
    /// # Ok(())
    /// # }
    /// ```
    pub async fn get_cache_size(&self) -> Result<u64> {
        if !self.cache_dir.exists() {
            return Ok(0);
        }

        let size = fs::get_directory_size(&self.cache_dir).await?;
        Ok(size)
    }

    /// Returns the path to the cache directory.
    ///
    /// This method provides access to the cache directory path for inspection,
    /// logging, or integration with other tools. The path represents where
    /// all cached repositories and supporting files are stored.
    ///
    /// # Return Value
    ///
    /// Returns a reference to the [`Path`] representing the cache directory.
    /// The path may or may not exist on the file system - use [`ensure_cache_dir`]
    /// to create it if needed.
    ///
    /// # Thread Safety
    ///
    /// This method is safe to call from multiple threads as it only returns
    /// a reference to the immutable path stored in the `Cache` instance.
    ///
    /// # Examples
    ///
    /// Display cache location:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// println!("Cache stored at: {}", cache.get_cache_location().display());
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Check if cache exists:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    /// let location = cache.get_cache_location();
    ///
    /// if location.exists() {
    ///     println!("Cache directory exists at: {}", location.display());
    /// } else {
    ///     println!("Cache directory not yet created: {}", location.display());
    /// }
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// [`ensure_cache_dir`]: Cache::ensure_cache_dir
    #[must_use]
    pub fn get_cache_location(&self) -> &Path {
        &self.cache_dir
    }

    /// Completely removes the entire cache directory and all its contents.
    ///
    /// This is a destructive operation that removes all cached repositories,
    /// lock files, and any other cache-related data. Use with caution as
    /// this will require re-cloning all repositories on the next operation.
    ///
    /// # Operation Details
    ///
    /// - **Complete removal**: Deletes the entire cache directory tree
    /// - **Recursive deletion**: Removes all subdirectories and files
    /// - **Lock files**: Also removes .locks directory and all lock files
    /// - **Atomic operation**: Either succeeds completely or leaves cache intact
    ///
    /// # Recovery Impact
    ///
    /// After calling this method:
    /// - All repositories must be re-cloned on next use
    /// - Network bandwidth will be required for repository downloads
    /// - Disk space is immediately reclaimed
    /// - Cache directory will be recreated automatically on next operation
    ///
    /// # Safety Considerations
    ///
    /// - **No confirmation**: This method doesn't ask for confirmation
    /// - **Irreversible**: Cannot undo the deletion operation
    /// - **Concurrent operations**: May interfere with running cache operations
    /// - **Lock respect**: Doesn't wait for locks, may fail if repositories are in use
    ///
    /// # Errors
    ///
    /// Returns an error if:
    /// - Permission denied for cache directory or contents
    /// - Files are locked by other processes
    /// - File system errors during deletion
    /// - Cache directory is in use by another process
    ///
    /// # Output Messages
    ///
    /// Displays confirmation message on successful completion:
    /// ```text
    /// 🗑️  Cleared all cache
    /// ```
    ///
    /// # Examples
    ///
    /// Clear cache for fresh start:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// // Check size before clearing
    /// let size_before = cache.get_cache_size().await?;
    /// println!("Cache size before: {} bytes", size_before);
    ///
    /// // Clear everything
    /// cache.clear_all().await?;
    ///
    /// // Verify cache is empty
    /// let size_after = cache.get_cache_size().await?;
    /// println!("Cache size after: {} bytes", size_after); // Should be 0
    /// # Ok(())
    /// # }
    /// ```
    ///
    /// Clear cache with error handling:
    ///
    /// ```rust,no_run
    /// use agpm_cli::cache::Cache;
    ///
    /// # async fn example() -> anyhow::Result<()> {
    /// let cache = Cache::new()?;
    ///
    /// match cache.clear_all().await {
    ///     Ok(()) => println!("Cache cleared successfully"),
    ///     Err(e) => {
    ///         eprintln!("Failed to clear cache: {}", e);
    ///         eprintln!("Some files may be in use by other processes");
    ///     }
    /// }
    /// # Ok(())
    /// # }
    /// ```
    pub async fn clear_all(&self) -> Result<()> {
        if self.cache_dir.exists() {
            async_fs::remove_dir_all(&self.cache_dir)
                .await
                .with_context(|| "Failed to clear cache")?;
            println!("🗑️  Cleared all cache");
        }
        Ok(())
    }

    /// Perform a fetch operation with hybrid locking (in-process and cross-process).
    ///
    /// This method implements a two-level locking strategy:
    /// 1. In-process locks (Arc<Mutex>) for fast coordination within the same process
    /// 2. File-based locks for cross-process coordination
    ///
    /// The fetch will only happen once per repository per command execution.
    ///
    /// # Parameters
    ///
    /// * `bare_repo_path` - Path to the bare repository
    /// * `context` - Optional context string for logging
    ///
    /// # Returns
    ///
    /// Returns Ok(()) if the fetch was successful or skipped.
    async fn fetch_with_hybrid_lock(
        &self,
        bare_repo_path: &Path,
        context: Option<&str>,
    ) -> Result<()> {
        // Level 1: In-process lock (fast path)
        let memory_lock = self
            .fetch_locks
            .entry(bare_repo_path.to_path_buf())
            .or_insert_with(|| Arc::new(Mutex::new(())))
            .clone();
        let _memory_guard = memory_lock.lock().await;

        // Level 2: File-based lock (cross-process)
        let safe_name = bare_repo_path
            .file_name()
            .and_then(|s| s.to_str())
            .unwrap_or("unknown")
            .replace(['/', '\\', ':'], "_");

        let lock_path = self.cache_dir.join(".locks").join(format!("{safe_name}.fetch.lock"));

        // Ensure lock directory exists
        if let Some(parent) = lock_path.parent() {
            tokio::fs::create_dir_all(parent).await?;
        }

        // Create/open lock file
        let lock_file = tokio::fs::OpenOptions::new()
            .create(true)
            .write(true)
            .truncate(false)
            .open(&lock_path)
            .await?;

        // Convert to std::fs::File for fs4
        let std_file = lock_file.into_std().await;

        // Acquire exclusive lock (blocks until available)
        use fs4::fs_std::FileExt;
        if let Some(ctx) = context {
            tracing::debug!(
                target: "agpm::git",
                "({}) Acquiring file lock for {}",
                ctx,
                bare_repo_path.display()
            );
        }
        std_file.lock_exclusive()?;

        if let Some(ctx) = context {
            tracing::debug!(
                target: "agpm::git",
                "({}) Acquired file lock for {}",
                ctx,
                bare_repo_path.display()
            );
        }

        // Now check if we've already fetched this repo in this command execution
        // This happens AFTER acquiring the lock to prevent race conditions
        let already_fetched = {
            let fetched = self.fetched_repos.read().await;
            let is_fetched = fetched.contains(bare_repo_path);
            if let Some(ctx) = context {
                tracing::debug!(
                    target: "agpm::git",
                    "({}) Checking if already fetched: {} - Result: {} (total fetched: {}, hashset addr: {:p})",
                    ctx,
                    bare_repo_path.display(),
                    is_fetched,
                    fetched.len(),
                    &raw const *fetched
                );
            }
            is_fetched
        };

        if already_fetched {
            if let Some(ctx) = context {
                tracing::debug!(
                    target: "agpm::git",
                    "({}) Skipping fetch (already fetched in this command): {}",
                    ctx,
                    bare_repo_path.display()
                );
            }
            // Release the file lock and return
            return Ok(());
        }

        // Now safe to fetch
        let repo = GitRepo::new(bare_repo_path);

        if let Some(ctx) = context {
            tracing::debug!(
                target: "agpm::git",
                "({}) Fetching updates for {}",
                ctx,
                bare_repo_path.display()
            );
        }

        repo.fetch(None).await?;

        // Mark this repo as fetched for this command execution
        {
            let mut fetched = self.fetched_repos.write().await;
            fetched.insert(bare_repo_path.to_path_buf());
            if let Some(ctx) = context {
                tracing::debug!(
                    target: "agpm::git",
                    "({}) Marked as fetched: {} (total fetched: {}, hashset addr: {:p})",
                    ctx,
                    bare_repo_path.display(),
                    fetched.len(),
                    &raw const *fetched
                );
            }
        }

        // File lock automatically released when std_file is dropped
        Ok(())
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use tempfile::TempDir;

    #[tokio::test]
    async fn test_cache_dir_creation() {
        let temp_dir = TempDir::new().unwrap();
        let cache_dir = temp_dir.path().join("cache");

        let cache = Cache::with_dir(cache_dir.clone()).unwrap();
        cache.ensure_cache_dir().await.unwrap();

        assert!(cache_dir.exists());
    }

    #[tokio::test]
    async fn test_cache_location() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();
        let location = cache.get_cache_location();
        assert_eq!(location, temp_dir.path());
    }

    #[tokio::test]
    async fn test_cache_size_empty() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();
        let size = cache.get_cache_size().await.unwrap();
        assert_eq!(size, 0);
    }

    #[tokio::test]
    async fn test_cache_size_with_content() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();

        // Create some test content
        let test_file = temp_dir.path().join("test.txt");
        std::fs::write(&test_file, "test content").unwrap();

        let size = cache.get_cache_size().await.unwrap();
        assert!(size > 0);
        assert_eq!(size, 12); // "test content" is 12 bytes
    }

    #[tokio::test]
    async fn test_clean_unused_empty_cache() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();

        let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();
        assert_eq!(removed, 0);
    }

    #[tokio::test]
    async fn test_clean_unused_removes_correct_dirs() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();

        // Create some test directories
        let active_dir = temp_dir.path().join("active");
        let unused_dir = temp_dir.path().join("unused");
        let another_unused = temp_dir.path().join("another_unused");

        std::fs::create_dir_all(&active_dir).unwrap();
        std::fs::create_dir_all(&unused_dir).unwrap();
        std::fs::create_dir_all(&another_unused).unwrap();

        // Add some content to verify directories are removed completely
        std::fs::write(active_dir.join("file.txt"), "keep").unwrap();
        std::fs::write(unused_dir.join("file.txt"), "remove").unwrap();
        std::fs::write(another_unused.join("file.txt"), "remove").unwrap();

        let removed = cache.clean_unused(&["active".to_string()]).await.unwrap();

        assert_eq!(removed, 2);
        assert!(active_dir.exists());
        assert!(!unused_dir.exists());
        assert!(!another_unused.exists());
    }

    #[tokio::test]
    async fn test_clear_all_removes_entire_cache() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();

        // Create some content
        let subdir = temp_dir.path().join("subdir");
        std::fs::create_dir_all(&subdir).unwrap();
        std::fs::write(subdir.join("file.txt"), "content").unwrap();

        assert!(temp_dir.path().exists());
        assert!(subdir.exists());

        cache.clear_all().await.unwrap();

        assert!(!temp_dir.path().exists());
    }

    #[tokio::test]
    async fn test_copy_resource() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        // Create source file
        let source_dir = temp_dir.path().join("source");
        std::fs::create_dir_all(&source_dir).unwrap();
        let source_file = source_dir.join("resource.md");
        std::fs::write(&source_file, "# Test Resource\nContent").unwrap();

        // Copy resource
        let dest = temp_dir.path().join("dest.md");
        cache.copy_resource(&source_dir, "resource.md", &dest).await.unwrap();

        assert!(dest.exists());
        let content = std::fs::read_to_string(&dest).unwrap();
        assert_eq!(content, "# Test Resource\nContent");
    }

    #[tokio::test]
    async fn test_copy_resource_nested_path() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        // Create source file in nested directory
        let source_dir = temp_dir.path().join("source");
        let nested_dir = source_dir.join("nested").join("path");
        std::fs::create_dir_all(&nested_dir).unwrap();
        let source_file = nested_dir.join("resource.md");
        std::fs::write(&source_file, "# Nested Resource").unwrap();

        // Copy resource using relative path from source_dir
        let dest = temp_dir.path().join("dest.md");
        cache.copy_resource(&source_dir, "nested/path/resource.md", &dest).await.unwrap();

        assert!(dest.exists());
        let content = std::fs::read_to_string(&dest).unwrap();
        assert_eq!(content, "# Nested Resource");
    }

    #[tokio::test]
    async fn test_copy_resource_invalid_path() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        let source_dir = temp_dir.path().join("source");
        std::fs::create_dir_all(&source_dir).unwrap();

        // Try to copy non-existent resource
        let dest = temp_dir.path().join("dest.md");
        let result = cache.copy_resource(&source_dir, "nonexistent.md", &dest).await;

        assert!(result.is_err());
        assert!(!dest.exists());
    }

    #[tokio::test]
    async fn test_ensure_cache_dir_idempotent() {
        let temp_dir = TempDir::new().unwrap();
        let cache_dir = temp_dir.path().join("cache");
        let cache = Cache::with_dir(cache_dir.clone()).unwrap();

        // Call ensure_cache_dir multiple times
        cache.ensure_cache_dir().await.unwrap();
        assert!(cache_dir.exists());

        cache.ensure_cache_dir().await.unwrap();
        assert!(cache_dir.exists());

        // Add a file and ensure it's preserved
        std::fs::write(cache_dir.join("test.txt"), "content").unwrap();

        cache.ensure_cache_dir().await.unwrap();
        assert!(cache_dir.exists());
        assert!(cache_dir.join("test.txt").exists());
    }

    #[tokio::test]
    async fn test_copy_resource_creates_parent_directories() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        // Create source file
        let source_dir = temp_dir.path().join("source");
        std::fs::create_dir_all(&source_dir).unwrap();
        std::fs::write(source_dir.join("file.md"), "content").unwrap();

        // Copy to a destination with non-existent parent directories
        let dest = temp_dir.path().join("deep").join("nested").join("dest.md");
        cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();

        assert!(dest.exists());
        assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
    }

    #[tokio::test]
    async fn test_copy_resource_with_output_flag() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        // Create source file
        let source_dir = temp_dir.path().join("source");
        std::fs::create_dir_all(&source_dir).unwrap();
        std::fs::write(source_dir.join("file.md"), "content").unwrap();

        // Test with output flag false
        let dest1 = temp_dir.path().join("dest1.md");
        cache.copy_resource_with_output(&source_dir, "file.md", &dest1, false).await.unwrap();
        assert!(dest1.exists());

        // Test with output flag true
        let dest2 = temp_dir.path().join("dest2.md");
        cache.copy_resource_with_output(&source_dir, "file.md", &dest2, true).await.unwrap();
        assert!(dest2.exists());
    }

    #[tokio::test]
    async fn test_cache_size_nonexistent_dir() {
        let temp_dir = TempDir::new().unwrap();
        let nonexistent = temp_dir.path().join("nonexistent");
        let cache = Cache::with_dir(nonexistent).unwrap();

        let size = cache.get_cache_size().await.unwrap();
        assert_eq!(size, 0);
    }

    #[tokio::test]
    async fn test_clear_all_nonexistent_cache() {
        let temp_dir = TempDir::new().unwrap();
        let nonexistent = temp_dir.path().join("nonexistent");
        let cache = Cache::with_dir(nonexistent).unwrap();

        // Should not error when clearing non-existent cache
        cache.clear_all().await.unwrap();
    }

    #[tokio::test]
    async fn test_clean_unused_with_files_and_dirs() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();

        // Create directories
        std::fs::create_dir_all(temp_dir.path().join("keep")).unwrap();
        std::fs::create_dir_all(temp_dir.path().join("remove")).unwrap();

        // Create a file (not a directory)
        std::fs::write(temp_dir.path().join("file.txt"), "content").unwrap();

        let removed = cache.clean_unused(&["keep".to_string()]).await.unwrap();

        // Should only remove the "remove" directory, not the file
        assert_eq!(removed, 1);
        assert!(temp_dir.path().join("keep").exists());
        assert!(!temp_dir.path().join("remove").exists());
        assert!(temp_dir.path().join("file.txt").exists());
    }

    #[tokio::test]
    async fn test_copy_resource_overwrites_existing() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        // Create source file
        let source_dir = temp_dir.path().join("source");
        std::fs::create_dir_all(&source_dir).unwrap();
        std::fs::write(source_dir.join("file.md"), "new content").unwrap();

        // Create existing destination file
        let dest = temp_dir.path().join("dest.md");
        std::fs::write(&dest, "old content").unwrap();

        // Copy should overwrite
        cache.copy_resource(&source_dir, "file.md", &dest).await.unwrap();

        assert_eq!(std::fs::read_to_string(&dest).unwrap(), "new content");
    }

    #[tokio::test]
    async fn test_copy_resource_special_characters() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        // Create source file with special characters
        let source_dir = temp_dir.path().join("source");
        std::fs::create_dir_all(&source_dir).unwrap();
        let special_name = "file with spaces & special-chars.md";
        std::fs::write(source_dir.join(special_name), "content").unwrap();

        // Copy resource
        let dest = temp_dir.path().join("dest.md");
        cache.copy_resource(&source_dir, special_name, &dest).await.unwrap();

        assert!(dest.exists());
        assert_eq!(std::fs::read_to_string(&dest).unwrap(), "content");
    }

    #[tokio::test]
    async fn test_cache_location_consistency() {
        let temp_dir = TempDir::new().unwrap();
        let cache_dir = temp_dir.path().join("my_cache");
        let cache = Cache::with_dir(cache_dir.clone()).unwrap();

        // Get location multiple times
        let loc1 = cache.get_cache_location();
        let loc2 = cache.get_cache_location();

        assert_eq!(loc1, loc2);
        assert_eq!(loc1, cache_dir.as_path());
    }

    #[tokio::test]
    async fn test_clean_unused_empty_active_list() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();

        // Create some directories
        std::fs::create_dir_all(temp_dir.path().join("source1")).unwrap();
        std::fs::create_dir_all(temp_dir.path().join("source2")).unwrap();

        // Empty active list should remove all
        let removed = cache.clean_unused(&[]).await.unwrap();

        assert_eq!(removed, 2);
        assert!(!temp_dir.path().join("source1").exists());
        assert!(!temp_dir.path().join("source2").exists());
    }

    #[tokio::test]
    async fn test_copy_resource_with_relative_paths() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().join("cache")).unwrap();

        // Create source with subdirectories
        let source_dir = temp_dir.path().join("source");
        let sub_dir = source_dir.join("agents");
        std::fs::create_dir_all(&sub_dir).unwrap();
        std::fs::write(sub_dir.join("helper.md"), "# Helper Agent").unwrap();

        // Copy using relative path
        let dest = temp_dir.path().join("my-agent.md");
        cache.copy_resource(&source_dir, "agents/helper.md", &dest).await.unwrap();

        assert!(dest.exists());
        assert_eq!(std::fs::read_to_string(&dest).unwrap(), "# Helper Agent");
    }

    #[tokio::test]
    async fn test_cache_size_with_subdirectories() {
        let temp_dir = TempDir::new().unwrap();
        let cache = Cache::with_dir(temp_dir.path().to_path_buf()).unwrap();

        cache.ensure_cache_dir().await.unwrap();

        // Create nested structure with files
        let sub1 = temp_dir.path().join("sub1");
        let sub2 = sub1.join("sub2");
        std::fs::create_dir_all(&sub2).unwrap();

        std::fs::write(temp_dir.path().join("file1.txt"), "12345").unwrap(); // 5 bytes
        std::fs::write(sub1.join("file2.txt"), "1234567890").unwrap(); // 10 bytes
        std::fs::write(sub2.join("file3.txt"), "abc").unwrap(); // 3 bytes

        let size = cache.get_cache_size().await.unwrap();
        assert_eq!(size, 18); // 5 + 10 + 3
    }
}