freenet 0.2.48

Freenet core software
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643
2644
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160
3161
3162
3163
3164
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
3181
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
3239
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249
3250
3251
3252
3253
3254
3255
3256
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
3331
3332
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347
3348
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374
3375
3376
3377
3378
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407
3408
3409
3410
3411
3412
3413
3414
3415
3416
3417
3418
3419
3420
3421
3422
3423
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464
3465
3466
3467
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489
3490
3491
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585
3586
3587
3588
3589
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627
3628
3629
3630
3631
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
//! The main node data type which encapsulates all the behaviour for maintaining a connection
//! and performing operations within the network.
//!
//! This module contains the primary event loop (`NodeP2P::run_node`) that orchestrates
//! interactions between different components like the network, operations, contracts, and clients.
//! It receives events and dispatches actions via channels.
//!
//! # Implementations
//! Node comes with different underlying implementations that can be used upon construction.
//! Those implementations are:
//! - libp2p: all the connection is handled by libp2p.
//! - in-memory: a simplifying node used for emulation purposes mainly.
//! - inter-process: similar to in-memory, but can be rana cross multiple processes, closer to the real p2p impl
//!
//! The main node data structure and execution loop.
//! See [`../../architecture.md`](../../architecture.md) for a high-level overview of the node's role and the event loop interactions.

use anyhow::Context;
use either::Either;
use freenet_stdlib::{
    client_api::{ClientRequest, ErrorKind},
    prelude::ContractInstanceId,
};
use std::{
    borrow::Cow,
    fs::File,
    io::Read,
    net::{IpAddr, SocketAddr, ToSocketAddrs},
    sync::Arc,
    time::Duration,
};
use std::{collections::HashSet, convert::Infallible};

use self::p2p_impl::NodeP2P;
use crate::{
    client_events::{BoxedClient, ClientEventsProxy, ClientId, OpenRequest},
    config::{Address, GatewayConfig, WebsocketApiConfig},
    contract::{Callback, ExecutorError, ExecutorToEventLoopChannel, NetworkContractHandler},
    local_node::Executor,
    message::{InnerMessage, NetMessage, NodeEvent, Transaction, TransactionType},
    operations::{
        OpEnum, OpError, OpOutcome,
        connect::{self, ConnectOp},
        get, put, subscribe, update,
    },
    ring::{Location, PeerKeyLocation},
    router::{RouteEvent, RouteOutcome},
    tracing::{EventRegister, NetEventLog, NetEventRegister},
};
use crate::{
    config::Config,
    message::{MessageStats, NetMessageV1},
};
use freenet_stdlib::client_api::DelegateRequest;
use serde::{Deserialize, Serialize};
use tracing::Instrument;

use crate::operations::handle_op_request;
pub(crate) use network_bridge::{
    ConnectionError, EventLoopNotificationsSender, NetworkBridge, OpExecutionPayload,
};
#[cfg(test)]
pub(crate) use network_bridge::{EventLoopNotificationsReceiver, event_loop_notification_channel};
// Re-export types for dev_tool and testing
pub use network_bridge::{EventLoopExitReason, NetworkStats, reset_channel_id_counter};

use crate::topology::rate::Rate;
use crate::transport::{TransportKeypair, TransportPublicKey};
pub(crate) use op_state_manager::{OpManager, OpNotAvailable};

mod network_bridge;

// Re-export fault injection types for test infrastructure.
// No cfg gate: underlying items are unconditionally compiled and integration
// tests compile the lib without cfg(test).
pub use network_bridge::in_memory::{FaultInjectorState, get_fault_injector, set_fault_injector};
pub(crate) mod background_task_monitor;
pub(crate) mod neighbor_hosting;
pub(crate) mod network_status;
mod op_state_manager;
mod p2p_impl;
mod request_router;
pub(crate) mod testing_impl;

pub use request_router::{DeduplicatedRequest, RequestRouter};

/// Handle to trigger graceful shutdown of the node.
#[derive(Clone)]
pub struct ShutdownHandle {
    tx: tokio::sync::mpsc::Sender<NodeEvent>,
}

impl ShutdownHandle {
    /// Trigger a graceful shutdown of the node.
    ///
    /// This will:
    /// 1. Close all peer connections gracefully
    /// 2. Stop accepting new connections
    /// 3. Exit the event loop
    pub async fn shutdown(&self) {
        if let Err(err) = self
            .tx
            .send(NodeEvent::Disconnect {
                cause: Some("graceful shutdown".into()),
            })
            .await
        {
            tracing::debug!(
                error = %err,
                "failed to send graceful shutdown signal; shutdown channel may already be closed"
            );
        }
    }
}

pub struct Node {
    inner: NodeP2P,
    shutdown_handle: ShutdownHandle,
}

impl Node {
    pub fn update_location(&mut self, location: Location) {
        self.inner
            .op_manager
            .ring
            .connection_manager
            .update_location(Some(location));
    }

    /// Get a handle that can be used to trigger graceful shutdown.
    pub fn shutdown_handle(&self) -> ShutdownHandle {
        self.shutdown_handle.clone()
    }

    pub async fn run(self) -> anyhow::Result<Infallible> {
        self.inner.run_node().await
    }
}

/// When instancing a node you can either join an existing network or bootstrap a new network with a listener
/// which will act as the initial provider. This initial peer will be listening at the provided port and assigned IP.
/// If those are not free the instancing process will return an error.
///
/// In order to bootstrap a new network the following arguments are required to be provided to the builder:
/// - ip: IP associated to the initial node.
/// - port: listening port of the initial node.
///
/// If both are provided but also additional peers are added via the [`Self::add_gateway()`] method, this node will
/// be listening but also try to connect to an existing peer.
#[derive(Serialize, Deserialize, Clone, Debug)]
#[non_exhaustive] // avoid directly instantiating this struct
pub struct NodeConfig {
    /// Determines if an initial connection should be attempted.
    /// Only true for an initial gateway/node. If false, the gateway will be disconnected unless other peers connect through it.
    pub should_connect: bool,
    pub is_gateway: bool,
    /// If not specified, a key is generated and used when creating the node.
    pub key_pair: TransportKeypair,
    // optional local info, in case this is an initial bootstrap node
    /// IP to bind to the network listener.
    pub network_listener_ip: IpAddr,
    /// socket port to bind to the network listener.
    pub network_listener_port: u16,
    /// Our own external socket address, if known (set for gateways, learned for peers).
    pub(crate) own_addr: Option<SocketAddr>,
    pub(crate) config: Arc<Config>,
    /// At least one gateway is required for joining the network.
    /// Not necessary if this is an initial node.
    pub(crate) gateways: Vec<InitPeerNode>,
    /// the location of this node, used for gateways.
    pub(crate) location: Option<Location>,
    pub(crate) max_hops_to_live: Option<usize>,
    pub(crate) rnd_if_htl_above: Option<usize>,
    pub(crate) max_number_conn: Option<usize>,
    pub(crate) min_number_conn: Option<usize>,
    pub(crate) max_upstream_bandwidth: Option<Rate>,
    pub(crate) max_downstream_bandwidth: Option<Rate>,
    pub(crate) blocked_addresses: Option<HashSet<SocketAddr>>,
    pub(crate) transient_budget: usize,
    pub(crate) transient_ttl: Duration,
    /// Minimum ring connections before this peer advertises readiness
    /// to accept non-CONNECT operations. `None` or `Some(0)` disables the gate.
    /// Default: `Some(3)` in production.
    #[serde(default)]
    pub(crate) relay_ready_connections: Option<usize>,
}

impl NodeConfig {
    pub async fn new(config: Config) -> anyhow::Result<NodeConfig> {
        tracing::info!("Loading node configuration for mode {}", config.mode);

        // Get our own public key to filter out self-connections
        let own_pub_key = config.transport_keypair().public();

        let mut gateways = Vec::with_capacity(config.gateways.len());
        for gw in &config.gateways {
            let GatewayConfig {
                address,
                public_key_path,
                location,
            } = gw;

            // Wait for the public key file to be in X25519 hex format.
            // The gateway may still be initializing and converting from RSA PEM.
            let mut key_bytes = None;
            for attempt in 0..10 {
                let mut key_file = File::open(public_key_path).with_context(|| {
                    format!("failed loading gateway pubkey from {public_key_path:?}")
                })?;
                let mut buf = String::new();
                key_file.read_to_string(&mut buf)?;
                let buf = buf.trim();

                // Check for legacy RSA PEM format - gateway may still be initializing
                if buf.starts_with("-----BEGIN") {
                    if attempt < 9 {
                        tracing::debug!(
                            public_key_path = ?public_key_path,
                            attempt = attempt + 1,
                            "Gateway public key is still RSA PEM format, waiting for X25519 conversion..."
                        );
                        tokio::time::sleep(std::time::Duration::from_millis(500)).await;
                        continue;
                    } else {
                        tracing::warn!(
                            public_key_path = ?public_key_path,
                            "Gateway public key still in RSA PEM format after 5s. Skipping this gateway."
                        );
                        break;
                    }
                }

                match hex::decode(buf) {
                    Ok(bytes) if bytes.len() == 32 => {
                        key_bytes = Some(bytes);
                        break;
                    }
                    Ok(bytes) => {
                        anyhow::bail!(
                            "invalid gateway pubkey length {} (expected 32) from {public_key_path:?}",
                            bytes.len()
                        );
                    }
                    Err(e) => {
                        anyhow::bail!(
                            "failed to decode gateway pubkey hex from {public_key_path:?}: {e}"
                        );
                    }
                }
            }

            let key_bytes = match key_bytes {
                Some(bytes) => bytes,
                None => continue, // Skip this gateway
            };
            let mut key_arr = [0u8; 32];
            key_arr.copy_from_slice(&key_bytes);
            let transport_pub_key = TransportPublicKey::from_bytes(key_arr);

            // Skip if this gateway's public key matches our own
            if &transport_pub_key == own_pub_key {
                tracing::warn!(
                    "Skipping gateway with same public key as self: {:?}",
                    public_key_path
                );
                continue;
            }

            let address = Self::parse_socket_addr(address).await?;
            let peer_key_location = PeerKeyLocation::new(transport_pub_key, address);
            let location = location
                .map(Location::new)
                .unwrap_or_else(|| Location::from_address(&address));
            gateways.push(InitPeerNode::new(peer_key_location, location));
        }
        tracing::info!(
            "Node will be listening at {}:{} internal address",
            config.network_api.address,
            config.network_api.port
        );
        if let Some(own_addr) = &config.peer_id {
            tracing::info!("Node external address: {}", own_addr.socket_addr());
        }
        Ok(NodeConfig {
            should_connect: true,
            is_gateway: config.is_gateway,
            key_pair: config.transport_keypair().clone(),
            gateways,
            own_addr: config.peer_id.clone().map(|p| p.socket_addr()),
            network_listener_ip: config.network_api.address,
            network_listener_port: config.network_api.port,
            location: config.location.map(Location::new),
            config: Arc::new(config.clone()),
            max_hops_to_live: None,
            rnd_if_htl_above: None,
            max_number_conn: Some(config.network_api.max_connections),
            min_number_conn: Some(config.network_api.min_connections),
            max_upstream_bandwidth: None,
            max_downstream_bandwidth: None,
            blocked_addresses: config.network_api.blocked_addresses.clone(),
            transient_budget: config.network_api.transient_budget,
            transient_ttl: Duration::from_secs(config.network_api.transient_ttl_secs),
            relay_ready_connections: if config.network_api.skip_load_from_network {
                Some(0) // Local/test networks: disable relay gate
            } else {
                Some(3) // Production: require 3 relay-ready upstream peers
            },
        })
    }

    pub(crate) async fn parse_socket_addr(address: &Address) -> anyhow::Result<SocketAddr> {
        let (hostname, port) = match address {
            crate::config::Address::Hostname(hostname) => {
                match hostname.rsplit_once(':') {
                    None => {
                        // no port found, use default
                        let hostname_with_port =
                            format!("{}:{}", hostname, crate::config::default_network_api_port());

                        if let Ok(mut addrs) = hostname_with_port.to_socket_addrs() {
                            if let Some(addr) = addrs.next() {
                                return Ok(addr);
                            }
                        }

                        (Cow::Borrowed(hostname.as_str()), None)
                    }
                    Some((host, port)) => match port.parse::<u16>() {
                        Ok(port) => {
                            if let Ok(mut addrs) = hostname.to_socket_addrs() {
                                if let Some(addr) = addrs.next() {
                                    return Ok(addr);
                                }
                            }

                            (Cow::Borrowed(host), Some(port))
                        }
                        Err(_) => return Err(anyhow::anyhow!("Invalid port number: {port}")),
                    },
                }
            }
            Address::HostAddress(addr) => return Ok(*addr),
        };

        let (conf, opts) = hickory_resolver::system_conf::read_system_conf()?;
        let resolver = hickory_resolver::TokioAsyncResolver::new(
            conf,
            opts,
            hickory_resolver::name_server::GenericConnector::new(
                hickory_resolver::name_server::TokioRuntimeProvider::new(),
            ),
        );

        // only issue one query with .
        let hostname = if hostname.ends_with('.') {
            hostname
        } else {
            Cow::Owned(format!("{hostname}."))
        };

        let ips = resolver.lookup_ip(hostname.as_ref()).await?;
        match ips.into_iter().next() {
            Some(ip) => Ok(SocketAddr::new(
                ip,
                port.unwrap_or_else(crate::config::default_network_api_port),
            )),
            None => Err(anyhow::anyhow!("Fail to resolve IP address of {hostname}")),
        }
    }

    pub fn config(&self) -> &Config {
        &self.config
    }

    pub fn is_gateway(&mut self) -> &mut Self {
        self.is_gateway = true;
        self
    }

    pub fn first_gateway(&mut self) {
        self.should_connect = false;
    }

    pub fn with_should_connect(&mut self, should_connect: bool) -> &mut Self {
        self.should_connect = should_connect;
        self
    }

    pub fn max_hops_to_live(&mut self, num_hops: usize) -> &mut Self {
        self.max_hops_to_live = Some(num_hops);
        self
    }

    pub fn rnd_if_htl_above(&mut self, num_hops: usize) -> &mut Self {
        self.rnd_if_htl_above = Some(num_hops);
        self
    }

    pub fn max_number_of_connections(&mut self, num: usize) -> &mut Self {
        self.max_number_conn = Some(num);
        self
    }

    pub fn min_number_of_connections(&mut self, num: usize) -> &mut Self {
        self.min_number_conn = Some(num);
        self
    }

    pub fn relay_ready_connections(&mut self, num: Option<usize>) -> &mut Self {
        self.relay_ready_connections = num;
        self
    }

    pub fn with_own_addr(&mut self, addr: SocketAddr) -> &mut Self {
        self.own_addr = Some(addr);
        self
    }

    pub fn with_location(&mut self, loc: Location) -> &mut Self {
        self.location = Some(loc);
        self
    }

    /// Connection info for an already existing peer. Required in case this is not a gateway node.
    pub fn add_gateway(&mut self, peer: InitPeerNode) -> &mut Self {
        self.gateways.push(peer);
        self
    }

    /// Builds a node using the default backend connection manager.
    pub async fn build<const CLIENTS: usize>(
        self,
        clients: [BoxedClient; CLIENTS],
    ) -> anyhow::Result<Node> {
        let (node, _flush_handle) = self.build_with_flush_handle(clients).await?;
        Ok(node)
    }

    /// Builds a node and returns flush handle for event aggregation (for testing).
    pub async fn build_with_flush_handle<const CLIENTS: usize>(
        self,
        clients: [BoxedClient; CLIENTS],
    ) -> anyhow::Result<(Node, crate::tracing::EventFlushHandle)> {
        let (event_register, flush_handle) = {
            use super::tracing::{DynamicRegister, TelemetryReporter};

            let event_reg = EventRegister::new(self.config.event_log());
            let flush_handle = event_reg.flush_handle();

            let mut registers: Vec<Box<dyn NetEventRegister>> = vec![Box::new(event_reg)];

            // Add OpenTelemetry register if feature enabled
            #[cfg(feature = "trace-ot")]
            {
                use super::tracing::OTEventRegister;
                registers.push(Box::new(OTEventRegister::new()));
            }

            // Add telemetry reporter if enabled in config
            if let Some(telemetry) = TelemetryReporter::new(&self.config.telemetry) {
                registers.push(Box::new(telemetry));
            }

            (DynamicRegister::new(registers), flush_handle)
        };
        let cfg = self.config.clone();
        let (node_inner, shutdown_tx) = NodeP2P::build::<NetworkContractHandler, CLIENTS, _>(
            self,
            clients,
            event_register,
            cfg,
        )
        .await?;
        let shutdown_handle = ShutdownHandle { tx: shutdown_tx };
        Ok((
            Node {
                inner: node_inner,
                shutdown_handle,
            },
            flush_handle,
        ))
    }

    pub fn get_own_addr(&self) -> Option<SocketAddr> {
        self.own_addr
    }

    /// Returns all specified gateways for this peer. Returns an error if the peer is not a gateway
    /// and no gateways are specified.
    fn get_gateways(&self) -> anyhow::Result<Vec<PeerKeyLocation>> {
        let gateways: Vec<PeerKeyLocation> = self
            .gateways
            .iter()
            .map(|node| node.peer_key_location.clone())
            .collect();

        if !self.is_gateway && gateways.is_empty() {
            anyhow::bail!(
                "At least one remote gateway is required to join an existing network for non-gateway nodes."
            )
        } else {
            Ok(gateways)
        }
    }
}

/// Gateway node to use for joining the network.
#[derive(Clone, Serialize, Deserialize, Debug)]
pub struct InitPeerNode {
    peer_key_location: PeerKeyLocation,
    location: Location,
}

impl InitPeerNode {
    pub fn new(peer_key_location: PeerKeyLocation, location: Location) -> Self {
        Self {
            peer_key_location,
            location,
        }
    }
}

async fn report_result(
    tx: Option<Transaction>,
    op_result: Result<Option<OpEnum>, OpError>,
    op_manager: &OpManager,
    executor_callback: Option<ExecutorToEventLoopChannel<Callback>>,
    event_listener: &mut dyn NetEventRegister,
) {
    // Add UPDATE-specific debug logging at the start
    if let Some(tx_id) = tx {
        if matches!(tx_id.transaction_type(), TransactionType::Update) {
            tracing::debug!("report_result called for UPDATE transaction {}", tx_id);
        }
    }

    match op_result {
        Ok(Some(op_res)) => {
            // Log specifically for UPDATE operations
            if let crate::operations::OpEnum::Update(ref update_op) = op_res {
                tracing::debug!(
                    "UPDATE operation {} completed, finalized: {}",
                    update_op.id,
                    update_op.finalized()
                );
            }

            // Send to result router (skip for sub-operations and subscription renewals)
            if let Some(transaction) = tx {
                // Sub-operations (e.g., Subscribe spawned by PUT) don't notify clients directly;
                // the parent operation handles the client response.
                if transaction.is_sub_operation() {
                    tracing::debug!(
                        tx = %transaction,
                        "Skipping client notification for sub-operation"
                    );
                } else if op_res.is_subscription_renewal() {
                    // Subscription renewals are node-internal operations spawned by the
                    // renewal manager (ring.rs). No client registered a transaction
                    // for these, so sending to the session actor would just produce
                    // "registered transactions: 0" noise. See #2891.
                    tracing::debug!(
                        tx = %transaction,
                        "Skipping client notification for subscription renewal"
                    );
                } else {
                    let host_result = op_res.to_host_result();
                    // Await result delivery to ensure the client receives the response
                    // before the operation is considered complete. This prevents timeout
                    // issues where the operation completes but the response hasn't been
                    // delivered through the channel chain yet.
                    op_manager.send_client_result(transaction, host_result);
                }
            }

            // check operations.rs:handle_op_result to see what's the meaning of each state
            // in case more cases want to be handled when feeding information to the OpManager

            // Record operation result for dashboard stats
            let (classified_op_type, classified_success) =
                classify_op_outcome(op_res.id().transaction_type(), op_res.outcome());
            if let Some(op_type) = classified_op_type {
                network_status::record_op_result(op_type, classified_success);
            }

            let route_event = match op_res.outcome() {
                OpOutcome::ContractOpSuccess {
                    target_peer,
                    contract_location,
                    first_response_time,
                    payload_size,
                    payload_transfer_time,
                } => Some(RouteEvent {
                    peer: target_peer.clone(),
                    contract_location,
                    outcome: RouteOutcome::Success {
                        time_to_response_start: first_response_time,
                        payload_size,
                        payload_transfer_time,
                    },
                    op_type: classified_op_type,
                }),
                OpOutcome::ContractOpSuccessUntimed {
                    target_peer,
                    contract_location,
                } => Some(RouteEvent {
                    peer: target_peer.clone(),
                    contract_location,
                    outcome: RouteOutcome::SuccessUntimed,
                    op_type: classified_op_type,
                }),
                OpOutcome::ContractOpFailure {
                    target_peer,
                    contract_location,
                } => Some(RouteEvent {
                    peer: target_peer.clone(),
                    contract_location,
                    outcome: RouteOutcome::Failure,
                    op_type: classified_op_type,
                }),
                OpOutcome::Incomplete | OpOutcome::Irrelevant => None,
            };
            if let Some(event) = route_event {
                if let Some(log_event) =
                    NetEventLog::route_event(op_res.id(), &op_manager.ring, &event)
                {
                    event_listener
                        .register_events(Either::Left(log_event))
                        .await;
                }
                op_manager.ring.routing_finished(event);
            }
            if let Some(mut cb) = executor_callback {
                cb.response(op_res).await;
            }
        }
        Ok(None) => {
            tracing::debug!(?tx, "No operation result found");
        }
        Err(err) => {
            // Mark operation as completed and notify waiting clients of the error
            if let Some(tx) = tx {
                // Sub-operations (e.g., Subscribe spawned by PUT) have no client
                // registered — sending errors for them would pollute the
                // SessionActor's pending_results cache.
                if !tx.is_sub_operation() {
                    let client_error = freenet_stdlib::client_api::ClientError::from(
                        freenet_stdlib::client_api::ErrorKind::OperationError {
                            cause: err.to_string().into(),
                        },
                    );
                    op_manager.send_client_result(tx, Err(client_error));
                }

                op_manager.completed(tx);
            }
            #[cfg(any(debug_assertions, test))]
            {
                use std::io::Write;
                #[cfg(debug_assertions)]
                let OpError::InvalidStateTransition { tx, state, trace } = err else {
                    tracing::error!("Finished transaction with error: {err}");
                    return;
                };
                #[cfg(not(debug_assertions))]
                let OpError::InvalidStateTransition { tx } = err else {
                    tracing::error!("Finished transaction with error: {err}");
                    return;
                };
                // todo: this can be improved once std::backtrace::Backtrace::frames is stabilized
                #[cfg(debug_assertions)]
                let trace = format!("{trace}");
                #[cfg(debug_assertions)]
                {
                    let mut tr_lines = trace.lines();
                    let trace = tr_lines
                        .nth(2)
                        .map(|second_trace| {
                            let second_trace_lines =
                                [second_trace, tr_lines.next().unwrap_or_default()];
                            second_trace_lines.join("\n")
                        })
                        .unwrap_or_default();
                    let peer = op_manager.ring.connection_manager.own_location();
                    let log = format!(
                        "Transaction ({tx} @ {peer}) error trace:\n {trace} \nstate:\n {state:?}\n"
                    );
                    std::io::stderr().write_all(log.as_bytes()).unwrap();
                }
                #[cfg(not(debug_assertions))]
                {
                    let peer = op_manager.ring.connection_manager.own_location();
                    let log = format!("Transaction ({tx} @ {peer}) error\n");
                    std::io::stderr().write_all(log.as_bytes()).unwrap();
                }
            }
            #[cfg(not(any(debug_assertions, test)))]
            {
                tracing::debug!("Finished transaction with error: {err}");
            }
        }
    }
}

/// Process a network message and deliver results to clients via the canonical
/// path: report_result → send_client_result → ResultRouter → SessionActor.
pub(crate) async fn process_message_decoupled<CB>(
    msg: NetMessage,
    source_addr: Option<std::net::SocketAddr>,
    op_manager: Arc<OpManager>,
    conn_manager: CB,
    mut event_listener: Box<dyn NetEventRegister>,
    executor_callback: Option<ExecutorToEventLoopChannel<crate::contract::Callback>>,
    pending_op_result: Option<tokio::sync::mpsc::Sender<NetMessage>>,
) where
    CB: NetworkBridge,
{
    let tx = *msg.id();

    let op_result = handle_pure_network_message(
        msg,
        source_addr,
        op_manager.clone(),
        conn_manager,
        event_listener.as_mut(),
        pending_op_result,
    )
    .await;

    // Report result and deliver to clients via the single canonical path:
    // report_result → send_client_result → ResultRouter → SessionActor
    report_result(
        Some(tx),
        op_result,
        &op_manager,
        executor_callback,
        &mut *event_listener,
    )
    .await;
}

/// Pure network message handling (no client concerns)
#[allow(clippy::too_many_arguments)]
async fn handle_pure_network_message<CB>(
    msg: NetMessage,
    source_addr: Option<std::net::SocketAddr>,
    op_manager: Arc<OpManager>,
    conn_manager: CB,
    event_listener: &mut dyn NetEventRegister,
    pending_op_result: Option<tokio::sync::mpsc::Sender<NetMessage>>,
) -> Result<Option<crate::operations::OpEnum>, crate::node::OpError>
where
    CB: NetworkBridge,
{
    match msg {
        NetMessage::V1(msg_v1) => {
            handle_pure_network_message_v1(
                msg_v1,
                source_addr,
                op_manager,
                conn_manager,
                event_listener,
                pending_op_result,
            )
            .await
        }
    }
}

/// Returns the exponential backoff delay for the given retry attempt.
///
/// Starts at 5ms and doubles each attempt, capped at 1000ms.
fn op_retry_backoff(attempt: usize) -> Duration {
    Duration::from_millis((5u64 << attempt.min(8)).min(1_000))
}

/// Route an inbound task-per-tx reply directly to an awaiting
/// [`OpCtx::send_and_await`][ocxawait] caller, bypassing the legacy op
/// state machine entirely.
///
/// Returns `true` if a callback was registered and the message was forwarded
/// (or dropped due to a closed receiver, which is also a successful
/// "bypass taken" from the pipeline's point of view — the legacy path must
/// not run). Returns `false` if no callback is registered; the caller then
/// falls through to the legacy `handle_op_request` path.
///
/// # Why this exists
///
/// Phase 1 (#3802) wired [`forward_pending_op_result_if_completed`] to fire
/// the callback after the legacy state machine classified the reply as
/// completed. That only works when a [`crate::operations::OpEnum`] was
/// previously pushed into [`crate::node::OpManager`]'s per-op DashMap:
/// `load_or_init` pops it, `process_message` produces
/// [`crate::operations::OperationResult::SendAndComplete`], and
/// `is_operation_completed` returns `true`.
///
/// Phase 2b's task-per-tx callers (starting with client-initiated SUBSCRIBE,
/// #1454) never push an op into that DashMap: state lives in the task's
/// locals. When a reply arrives, `load_or_init` sees an empty slot and
/// returns [`crate::operations::OpError::OpNotPresent`] (this is the
/// legacy guard against stale responses arriving after GC cleanup, e.g.
/// `operations/subscribe.rs:1181-1192`). `handle_op_result` swallows
/// `OpNotPresent` as benign and returns `Ok(None)`, so
/// `forward_pending_op_result_if_completed` short-circuits and the
/// awaiting task hangs forever.
///
/// This helper is the fix: any branch of
/// [`handle_pure_network_message_v1`] that wants to support a task-per-tx
/// caller checks this first, and on a hit returns without ever touching
/// `handle_op_request`. Phase 2b adds this guard to the SUBSCRIBE branch
/// only; Phases 2c/3/4 will add it to their own branches when they
/// introduce their first task-per-tx callers.
///
/// # Safety argument for the bypass
///
/// `p2p_protoc::pending_op_results` is only populated via
/// `p2p_protoc::handle_op_execution`, which is only driven by the
/// `op_execution_sender` channel. The only way to obtain a clone of that
/// sender is through [`crate::node::OpManager::op_ctx`] (production
/// factory) or the in-module `OpCtx` unit tests — both of which construct
/// an [`OpCtx`][ocx] whose only round-trip method is
/// [`OpCtx::send_and_await`][ocxawait]. This is a **structural
/// invariant**, not a convention: the sender field is `pub(crate)` and
/// there is no other `pub` accessor on `EventLoopNotificationsSender`.
///
/// Consequence: legacy paths (SUBSCRIBE renewals, PUT sub-op subscribes,
/// contract-executor-initiated subscribes, intermediate-peer forwarding)
/// never appear in `pending_op_results` for their own txs, so the bypass
/// never triggers for them and their behavior is unchanged.
///
/// [ocx]: crate::operations::OpCtx
/// [ocxawait]: crate::operations::OpCtx::send_and_await
///
/// # Channel safety
///
/// Uses `try_send` on the bounded capacity-1 channel created by
/// [`OpCtx::send_and_await`][ocxawait]. On a closed receiver (e.g., caller
/// timed out or was cancelled) the send fails and is logged; the
/// pure-network-message handler still makes progress. See
/// `.claude/rules/channel-safety.md`.
fn try_forward_task_per_tx_reply(
    pending_op_result: Option<&tokio::sync::mpsc::Sender<NetMessage>>,
    reply: NetMessage,
    op_label: &'static str,
) -> bool {
    let Some(callback) = pending_op_result else {
        return false;
    };
    let tx_id = *reply.id();
    if let Err(err) = callback.try_send(reply) {
        tracing::error!(
            %err,
            %tx_id,
            op = op_label,
            "Failed to forward task-per-tx reply to OpCtx task"
        );
    }
    true
}

/// If `op_result` indicates the operation completed and a `pending_op_result`
/// callback is wired, forward `reply` to the awaiting caller of
/// [`crate::operations::OpCtx::send_and_await`].
///
/// This is the reply side of the async sub-transaction round-trip primitive
/// introduced by #1454. The caller side — [`OpCtx::send_and_await`][ocx] —
/// installs a one-shot bounded [`tokio::sync::mpsc::Sender`] into
/// `p2p_protoc::pending_op_results` keyed by its `Transaction`, and each
/// branch of `handle_pure_network_message_v1` calls this helper after
/// `handle_op_request` so the caller can `await` a single reply keyed by
/// the same `Transaction`.
///
/// Wired for PUT and GET historically; extended to SUBSCRIBE, CONNECT, and
/// UPDATE in Phase 1 of the async-transaction refactor (#1454) so every op
/// kind can terminate an `OpCtx::send_and_await` round-trip without hanging.
/// (Phase 1 predates `OpCtx`; the caller side used to live directly on
/// `OpManager::notify_op_execution`, now deleted. Phase 2a moved the caller
/// side into `OpCtx` behind an `OpManager::op_ctx` factory.)
///
/// [ocx]: crate::operations::OpCtx::send_and_await
///
/// # Channel safety
///
/// Uses `try_send` rather than `.send().await` on the bounded capacity-1
/// mpsc channel created by `OpCtx::send_and_await`. This is sound because
/// the callback is fired **at most once per transaction**: the
/// `is_operation_completed` guard above combined with the `completed` /
/// `under_progress` dedup sets in `OpManager` ensures that subsequent
/// messages for the same tx short-circuit before reaching this code. So
/// `try_send` on an empty capacity-1 channel cannot fail with `Full` —
/// it can only fail with `Closed` when the `OpCtx` owner has dropped its
/// receiver. Using `try_send` eliminates any risk of blocking the
/// pure-network-message handler if a future consumer ever ends up unable
/// to drain the reply, satisfying the preference for non-blocking sends
/// in `.claude/rules/channel-safety.md`.
///
/// As of Phase 2a (#1454) this path is still dormant: `pending_op_result`
/// is always `None` because no production caller of `OpCtx::send_and_await`
/// exists yet. Phase 2b will introduce the first real caller (SUBSCRIBE
/// client-initiated path); the `try_send` choice here means Phase 2b does
/// not need to touch this function.
fn forward_pending_op_result_if_completed(
    op_result: &Result<Option<OpEnum>, OpError>,
    pending_op_result: Option<&tokio::sync::mpsc::Sender<NetMessage>>,
    reply: NetMessage,
) {
    if !is_operation_completed(op_result) {
        return;
    }
    let Some(callback) = pending_op_result else {
        return;
    };
    let tx_id = *reply.id();
    if let Err(err) = callback.try_send(reply) {
        tracing::error!(%err, %tx_id, "Failed to send message to executor");
    }
}

/// Pure network message processing for V1 messages (no client concerns)
#[allow(clippy::too_many_arguments)]
async fn handle_pure_network_message_v1<CB>(
    msg: NetMessageV1,
    source_addr: Option<std::net::SocketAddr>,
    op_manager: Arc<OpManager>,
    mut conn_manager: CB,
    event_listener: &mut dyn NetEventRegister,
    pending_op_result: Option<tokio::sync::mpsc::Sender<NetMessage>>,
) -> Result<Option<crate::operations::OpEnum>, crate::node::OpError>
where
    CB: NetworkBridge,
{
    // Register network events (pure network concern)
    event_listener
        .register_events(NetEventLog::from_inbound_msg_v1(
            &msg,
            &op_manager,
            source_addr,
        ))
        .await;

    const MAX_RETRIES: usize = 15usize;
    for i in 0..MAX_RETRIES {
        let tx = Some(*msg.id());
        tracing::debug!(?tx, "Processing pure network operation, iteration: {i}");

        match msg {
            NetMessageV1::Connect(ref op) => {
                let parent_span = tracing::Span::current();
                let span = tracing::info_span!(
                    parent: parent_span,
                    "handle_connect_op_request",
                    transaction = %msg.id(),
                    tx_type = %msg.id().transaction_type()
                );
                let op_result = handle_op_request::<ConnectOp, _>(
                    &op_manager,
                    &mut conn_manager,
                    op,
                    source_addr,
                )
                .instrument(span)
                .await;

                // Handle pending operation results (network concern)
                forward_pending_op_result_if_completed(
                    &op_result,
                    pending_op_result.as_ref(),
                    NetMessage::V1(NetMessageV1::Connect((*op).clone())),
                );

                if let Err(OpError::OpNotAvailable(state)) = &op_result {
                    match state {
                        OpNotAvailable::Running => {
                            let delay = op_retry_backoff(i);
                            tracing::debug!(
                                delay_ms = delay.as_millis() as u64,
                                attempt = i,
                                "Pure network: Operation still running, backing off"
                            );
                            tokio::time::sleep(delay).await;
                            continue;
                        }
                        OpNotAvailable::Completed => {
                            tracing::debug!(
                                tx = %msg.id(),
                                tx_type = ?msg.id().transaction_type(),
                                "Pure network: Operation already completed"
                            );
                            return Ok(None);
                        }
                    }
                }

                return handle_pure_network_result(
                    tx,
                    op_result,
                    &op_manager,
                    &mut *event_listener,
                )
                .await;
            }
            NetMessageV1::Put(ref op) => {
                // Phase 3a (#1454): task-per-tx bypass for client-initiated
                // PUT. Mirror of the SUBSCRIBE bypass at the Subscribe arm.
                //
                // Only forward **terminal** Response/ResponseStreaming messages.
                // Non-terminal messages (Request, RequestStreaming, ForwardingAck,
                // BroadcastTo, SuccessfulUpdate) must NOT be forwarded: they would
                // fill the capacity-1 reply channel and cause classify_reply to
                // fail with Unexpected (Phase 2b bug 2).
                if matches!(
                    op,
                    put::PutMsg::Response { .. } | put::PutMsg::ResponseStreaming { .. }
                ) && try_forward_task_per_tx_reply(
                    pending_op_result.as_ref(),
                    NetMessage::V1(NetMessageV1::Put((*op).clone())),
                    "put",
                ) {
                    return Ok(None);
                }

                tracing::debug!(
                    tx = %op.id(),
                    "handle_pure_network_message_v1: Processing PUT message"
                );
                let op_result = handle_op_request::<put::PutOp, _>(
                    &op_manager,
                    &mut conn_manager,
                    op,
                    source_addr,
                )
                .await;
                tracing::debug!(
                    tx = %op.id(),
                    op_result_ok = op_result.is_ok(),
                    "handle_pure_network_message_v1: PUT handle_op_request completed"
                );

                // Handle pending operation results (network concern)
                forward_pending_op_result_if_completed(
                    &op_result,
                    pending_op_result.as_ref(),
                    NetMessage::V1(NetMessageV1::Put((*op).clone())),
                );

                if let Err(OpError::OpNotAvailable(state)) = &op_result {
                    match state {
                        OpNotAvailable::Running => {
                            let delay = op_retry_backoff(i);
                            tracing::debug!(
                                delay_ms = delay.as_millis() as u64,
                                attempt = i,
                                "Pure network: Operation still running, backing off"
                            );
                            tokio::time::sleep(delay).await;
                            continue;
                        }
                        OpNotAvailable::Completed => {
                            tracing::debug!("Pure network: Operation already completed");
                            return Ok(None);
                        }
                    }
                }

                return handle_pure_network_result(
                    tx,
                    op_result,
                    &op_manager,
                    &mut *event_listener,
                )
                .await;
            }
            NetMessageV1::Get(ref op) => {
                // Phase 3b (#1454): task-per-tx bypass for client-initiated
                // GET. Mirror of the PUT bypass above.
                //
                // Only forward **terminal** Response/ResponseStreaming messages.
                // Non-terminal messages (Request, ResponseStreamingAck,
                // ForwardingAck) must NOT be forwarded: they would fill the
                // capacity-1 reply channel and cause classify_reply to fail
                // with Unexpected (Phase 2b bug 2).
                if matches!(
                    op,
                    get::GetMsg::Response { .. } | get::GetMsg::ResponseStreaming { .. }
                ) && try_forward_task_per_tx_reply(
                    pending_op_result.as_ref(),
                    NetMessage::V1(NetMessageV1::Get((*op).clone())),
                    "get",
                ) {
                    return Ok(None);
                }

                // #1454 phase 5 / #3883: relay GET task-per-tx dispatch.
                //
                // For true relay hops (source_addr.is_some() AND incoming
                // variant is GetMsg::Request AND no GetOp already exists in
                // OpManager for this tx), spawn the relay driver and return.
                // The driver owns routing, forwarding, retry, and upstream
                // bubble-up in its task locals.
                //
                // source_addr.is_none() (originator loop-back from phase-3b
                // client driver's send_and_await with target=None) falls
                // through to the legacy path to preserve the Request-echo
                // contract that drive_client_get_inner's `classify` relies on
                // for Terminal::LocalCompletion.
                //
                // GC-spawned retries and start_targeted_op register a GetOp
                // in OpManager before the Request hits the wire, so
                // op_manager.has_get_op(id) returning true means this is not
                // a fresh inbound relay call — fall through to legacy.
                if let get::GetMsg::Request {
                    id,
                    instance_id,
                    fetch_contract,
                    htl,
                    visited,
                    subscribe,
                } = op
                {
                    if let Some(upstream_addr) = source_addr {
                        if !op_manager.has_get_op(id) {
                            // True relay: no existing op, remote source, fresh
                            // Request. Fire-and-forget spawn; driver publishes
                            // its own upstream response.
                            if let Err(err) = get::op_ctx_task::start_relay_get(
                                op_manager.clone(),
                                *id,
                                *instance_id,
                                *htl,
                                upstream_addr,
                                visited.clone(),
                                *fetch_contract,
                                *subscribe,
                            )
                            .await
                            {
                                tracing::error!(
                                    tx = %id,
                                    %instance_id,
                                    error = %err,
                                    "GET relay dispatch: start_relay_get failed"
                                );
                            }
                            return Ok(None);
                        }
                    }
                }

                let op_result = handle_op_request::<get::GetOp, _>(
                    &op_manager,
                    &mut conn_manager,
                    op,
                    source_addr,
                )
                .await;

                // Handle pending operation results (network concern)
                forward_pending_op_result_if_completed(
                    &op_result,
                    pending_op_result.as_ref(),
                    NetMessage::V1(NetMessageV1::Get((*op).clone())),
                );

                if let Err(OpError::OpNotAvailable(state)) = &op_result {
                    match state {
                        OpNotAvailable::Running => {
                            let delay = op_retry_backoff(i);
                            tracing::debug!(
                                delay_ms = delay.as_millis() as u64,
                                attempt = i,
                                "Pure network: Operation still running, backing off"
                            );
                            tokio::time::sleep(delay).await;
                            continue;
                        }
                        OpNotAvailable::Completed => {
                            tracing::debug!("Pure network: Operation already completed");
                            return Ok(None);
                        }
                    }
                }

                return handle_pure_network_result(
                    tx,
                    op_result,
                    &op_manager,
                    &mut *event_listener,
                )
                .await;
            }
            NetMessageV1::Update(ref op) => {
                // #1454 phase 5 follow-up (slice A): relay UPDATE
                // task-per-tx dispatch.
                //
                // For true relay hops (source_addr.is_some() AND incoming
                // variant is non-streaming UpdateMsg::RequestUpdate or
                // UpdateMsg::BroadcastTo AND no UpdateOp already exists in
                // OpManager for this tx), spawn the relay driver and
                // return. The driver owns local apply or single forward
                // in its task locals. UPDATE is fire-and-forget end-to-end
                // — there's no upstream reply to await.
                //
                // source_addr.is_none() means the caller is internal (e.g.
                // start_op_with_id from start_targeted_op): fall through
                // to the legacy path. Existing UpdateOp means GC-spawned
                // retry or pre-registered op: also fall through.
                //
                // Streaming variants (RequestUpdateStreaming /
                // BroadcastToStreaming) and the deprecated Broadcasting
                // wire variant stay on the legacy path in slice A — see
                // port plan §3 / §9.
                if let Some(sender_addr) = source_addr {
                    #[allow(clippy::wildcard_enum_match_arm)]
                    match op {
                        update::UpdateMsg::RequestUpdate {
                            id,
                            key,
                            related_contracts,
                            value,
                        } if !op_manager.has_update_op(id) => {
                            if let Err(err) = update::op_ctx_task::start_relay_request_update(
                                op_manager.clone(),
                                *id,
                                *key,
                                related_contracts.clone(),
                                value.clone(),
                                sender_addr,
                            )
                            .await
                            {
                                tracing::error!(
                                    tx = %id,
                                    %key,
                                    error = %err,
                                    "UPDATE relay dispatch: start_relay_request_update failed"
                                );
                            }
                            return Ok(None);
                        }
                        update::UpdateMsg::BroadcastTo {
                            id,
                            key,
                            payload,
                            sender_summary_bytes,
                        } if !op_manager.has_update_op(id) => {
                            if let Err(err) = update::op_ctx_task::start_relay_broadcast_to(
                                op_manager.clone(),
                                *id,
                                *key,
                                payload.clone(),
                                sender_summary_bytes.clone(),
                                sender_addr,
                            )
                            .await
                            {
                                tracing::error!(
                                    tx = %id,
                                    %key,
                                    error = %err,
                                    "UPDATE relay dispatch: start_relay_broadcast_to failed"
                                );
                            }
                            return Ok(None);
                        }
                        // Streaming variants (RequestUpdateStreaming /
                        // BroadcastToStreaming), deprecated Broadcasting,
                        // existing UpdateOp, or guarded RequestUpdate /
                        // BroadcastTo with has_update_op == true → legacy
                        // path. Wildcard arm exists ONLY to satisfy
                        // non-exhaustive matches against the slice-A
                        // dispatch gate; do not expand.
                        _ => {}
                    }
                }

                let op_result = handle_op_request::<update::UpdateOp, _>(
                    &op_manager,
                    &mut conn_manager,
                    op,
                    source_addr,
                )
                .await;

                // Handle pending operation results (network concern)
                forward_pending_op_result_if_completed(
                    &op_result,
                    pending_op_result.as_ref(),
                    NetMessage::V1(NetMessageV1::Update((*op).clone())),
                );

                if let Err(OpError::OpNotAvailable(state)) = &op_result {
                    match state {
                        OpNotAvailable::Running => {
                            let delay = op_retry_backoff(i);
                            tracing::debug!(
                                delay_ms = delay.as_millis() as u64,
                                attempt = i,
                                "Pure network: Operation still running, backing off"
                            );
                            tokio::time::sleep(delay).await;
                            continue;
                        }
                        OpNotAvailable::Completed => {
                            tracing::debug!("Pure network: Operation already completed");
                            return Ok(None);
                        }
                    }
                }

                return handle_pure_network_result(
                    tx,
                    op_result,
                    &op_manager,
                    &mut *event_listener,
                )
                .await;
            }
            NetMessageV1::Subscribe(ref op) => {
                // Phase 2b (#1454): task-per-tx bypass for client-initiated
                // SUBSCRIBE. See `try_forward_task_per_tx_reply` for the
                // full reasoning (reply-side structural gap between Phase
                // 1's forwarding hook and task-per-tx callers who never
                // push an op into the OpManager DashMap).
                //
                // Only forward **terminal** Response messages to the
                // task-per-tx channel. Non-terminal messages like
                // ForwardingAck (sent by relay peers to signal "I'm working
                // on it") must NOT be forwarded: they would fill the
                // capacity-1 reply channel before the real Response arrives,
                // causing the task to classify the ForwardingAck as
                // Unexpected and fail with UnexpectedOpState.
                //
                // When a Response IS present and a callback is registered,
                // the bypass returns Ok(None) and skips handle_op_request.
                // For non-Response messages (Request, ForwardingAck, etc.)
                // with a pending callback, the bypass doesn't fire and we
                // fall through to handle_op_request. If handle_op_request
                // completes the operation (e.g., local subscribe completion),
                // forward_pending_op_result_if_completed below delivers
                // the result to the OpCtx::send_and_await callback.
                if matches!(op, subscribe::SubscribeMsg::Response { .. })
                    && try_forward_task_per_tx_reply(
                        pending_op_result.as_ref(),
                        NetMessage::V1(NetMessageV1::Subscribe((*op).clone())),
                        "subscribe",
                    )
                {
                    return Ok(None);
                }

                let op_result = handle_op_request::<subscribe::SubscribeOp, _>(
                    &op_manager,
                    &mut conn_manager,
                    op,
                    source_addr,
                )
                .await;

                // Forward result to OpCtx::send_and_await callback when
                // the operation completes. This path fires when the
                // originator processes its own Request locally via
                // handle_op_execution (pending_op_result is Some) and
                // the subscribe completes without needing the network
                // (e.g., contract available locally). Without this,
                // the response_sender in pending_op_results is dropped
                // without a reply, causing send_and_await to fail.
                //
                // We synthesize a Response (not forward the original
                // Request) because classify_reply in op_ctx_task expects
                // a SubscribeMsg::Response variant.
                if let Some(ref callback) = pending_op_result {
                    if is_operation_completed(&op_result) {
                        let instance_id = match op {
                            subscribe::SubscribeMsg::Request { instance_id, .. }
                            | subscribe::SubscribeMsg::Response { instance_id, .. }
                            | subscribe::SubscribeMsg::Unsubscribe { instance_id, .. }
                            | subscribe::SubscribeMsg::ForwardingAck { instance_id, .. } => {
                                *instance_id
                            }
                        };
                        let result = match &op_result {
                            Ok(Some(OpEnum::Subscribe(sub_op))) => match sub_op.completed_key() {
                                Some(key) => subscribe::SubscribeMsgResult::Subscribed { key },
                                None => subscribe::SubscribeMsgResult::NotFound,
                            },
                            _ => subscribe::SubscribeMsgResult::NotFound,
                        };
                        let reply = NetMessage::from(subscribe::SubscribeMsg::Response {
                            id: *op.id(),
                            instance_id,
                            result,
                        });
                        if let Err(err) = callback.try_send(reply) {
                            tracing::debug!(
                                %err,
                                "subscribe local-completion: callback send failed \
                                 (task may have timed out)"
                            );
                        }
                    }
                }

                if let Err(OpError::OpNotAvailable(state)) = &op_result {
                    match state {
                        OpNotAvailable::Running => {
                            let delay = op_retry_backoff(i);
                            tracing::debug!(
                                delay_ms = delay.as_millis() as u64,
                                attempt = i,
                                "Pure network: Operation still running, backing off"
                            );
                            tokio::time::sleep(delay).await;
                            continue;
                        }
                        OpNotAvailable::Completed => {
                            tracing::debug!("Pure network: Operation already completed");
                            return Ok(None);
                        }
                    }
                }

                return handle_pure_network_result(
                    tx,
                    op_result,
                    &op_manager,
                    &mut *event_listener,
                )
                .await;
            }
            // Non-transactional message types: process once and return immediately.
            // These must NOT fall through to the post-loop "Dropping message" warning,
            // which is only meant for operation retry exhaustion.
            NetMessageV1::NeighborHosting { ref message } => {
                let Some(source) = source_addr else {
                    tracing::warn!(
                        "Received NeighborHosting message without source address (pure network)"
                    );
                    return Ok(None);
                };
                tracing::debug!(
                    from = %source,
                    "Processing NeighborHosting message (pure network)"
                );

                // Note: In the simplified architecture (2026-01 refactor), we no longer
                // attempt to establish subscriptions based on HostingAnnounce messages.
                // Update propagation uses the neighbor hosting manager directly, and subscriptions
                // are lease-based with automatic expiry.

                // Resolve source SocketAddr to TransportPublicKey for neighbor hosting
                let source_pub_key = op_manager
                    .ring
                    .connection_manager
                    .get_peer_by_addr(source)
                    .map(|pkl| pkl.pub_key().clone());
                let Some(source_pub_key) = source_pub_key else {
                    tracing::debug!(
                        %source,
                        "NeighborHosting: could not resolve source addr to pub_key, skipping"
                    );
                    return Ok(None);
                };
                let result = op_manager
                    .neighbor_hosting
                    .handle_message(&source_pub_key, message.clone());
                if let Some(response) = result.response {
                    // Send response back to sender
                    let response_msg =
                        NetMessage::V1(NetMessageV1::NeighborHosting { message: response });
                    if let Err(err) = conn_manager.send(source, response_msg).await {
                        tracing::error!(%err, %source, "Failed to send NeighborHosting response");
                    }
                }
                // Proactive state sync: broadcast our state for shared contracts
                // so the neighbor gets current state if they're stale after restart.
                // Only sync contracts we're actively interested in (receiving updates
                // or have downstream subscribers) — skip cached-only contracts.
                for instance_id in result.overlapping_contracts {
                    if let Some((key, state)) =
                        get_contract_state_by_id(&op_manager, &instance_id).await
                    {
                        if !op_manager.ring.is_receiving_updates(&key)
                            && !op_manager.ring.has_downstream_subscribers(&key)
                        {
                            continue;
                        }
                        tracing::debug!(
                            contract = %key,
                            peer = %source_pub_key,
                            "Proximity cache overlap — syncing state to neighbor"
                        );
                        if let Err(e) = op_manager
                            .notify_node_event(NodeEvent::SyncStateToPeer {
                                key,
                                new_state: state,
                                target: source,
                            })
                            .await
                        {
                            tracing::warn!(
                                contract = %instance_id,
                                error = %e,
                                "Failed to emit SyncStateToPeer for proximity sync"
                            );
                        }
                    }
                }
                return Ok(None);
            }
            NetMessageV1::InterestSync { ref message } => {
                let Some(source) = source_addr else {
                    tracing::warn!("Received InterestSync message without source address");
                    return Ok(None);
                };
                tracing::debug!(
                    from = %source,
                    "Processing InterestSync message"
                );

                // Handle interest synchronization for delta-based updates
                if let Some(response) =
                    handle_interest_sync_message(&op_manager, source, message.clone()).await
                {
                    let response_msg =
                        NetMessage::V1(NetMessageV1::InterestSync { message: response });
                    if let Err(err) = conn_manager.send(source, response_msg).await {
                        tracing::error!(%err, %source, "Failed to send InterestSync response");
                    }
                }
                return Ok(None);
            }
            NetMessageV1::ReadyState { ready } => {
                let Some(source) = source_addr else {
                    tracing::warn!("Received ReadyState message without source address");
                    return Ok(None);
                };
                if ready {
                    op_manager.ring.connection_manager.mark_peer_ready(source);
                } else {
                    op_manager
                        .ring
                        .connection_manager
                        .mark_peer_not_ready(source);
                }
                tracing::debug!(
                    from = %source,
                    ready,
                    "Processed ReadyState from peer"
                );
                return Ok(None);
            }
            NetMessageV1::Aborted(tx) => {
                tracing::debug!(
                    %tx,
                    tx_type = ?tx.transaction_type(),
                    "Received Aborted message, delegating to handle_aborted_op"
                );
                // Empty gateways: Aborted messages arrive over p2p connections, not
                // from the gateway join path. The gateways list is only used by
                // Connect retries; other operation types ignore it entirely.
                if let Err(err) = handle_aborted_op(tx, &op_manager, &[]).await {
                    if !matches!(err, OpError::StatePushed) {
                        tracing::error!(
                            %tx,
                            error = %err,
                            "Error handling aborted operation"
                        );
                    }
                }
                return Ok(None);
            }
        }
    }

    // If we reach here, retries were exhausted waiting for a concurrent operation to finish
    tracing::warn!(
        tx = %msg.id(),
        tx_type = ?msg.id().transaction_type(),
        "Dropping message after {MAX_RETRIES} retry attempts (operation busy)"
    );
    Ok(None)
}

/// Pure network result handling - no client notification logic
async fn handle_pure_network_result(
    tx: Option<Transaction>,
    op_result: Result<Option<crate::operations::OpEnum>, OpError>,
    _op_manager: &Arc<OpManager>,
    _event_listener: &mut dyn NetEventRegister,
) -> Result<Option<crate::operations::OpEnum>, crate::node::OpError> {
    tracing::debug!("Pure network result handling for transaction: {:?}", tx);

    match &op_result {
        Ok(Some(_op_res)) => {
            // Log network operation completion
            tracing::debug!(
                "Network operation completed successfully for transaction: {:?}",
                tx
            );

            // Register completion event (pure network concern)
            if let Some(tx_id) = tx {
                // TODO: Register completion event properly
                tracing::debug!("Network operation completed for transaction: {}", tx_id);
            }

            // TODO: Handle executor callbacks (network concern)
        }
        Ok(None) => {
            tracing::debug!("Network operation returned no result");
        }
        Err(OpError::StatePushed) => {
            return Ok(None);
        }
        Err(OpError::OpNotPresent(tx_id)) => {
            // OpNotPresent means a response arrived for an operation that no longer exists.
            // This is benign - it happens when:
            // 1. An operation timed out before the response arrived
            // 2. A late response arrives after a peer restart
            // 3. The operation was already completed via another path
            //
            // We log at debug level and return Ok(None) to avoid propagating
            // confusing "op not present" errors to clients.
            tracing::debug!(
                tx = %tx_id,
                "Network response arrived for non-existent operation (likely timed out or already completed)"
            );
            return Ok(None);
        }
        Err(e) => {
            tracing::error!("Network operation failed: {}", e);
            // TODO: Register error event properly
            if let Some(tx_id) = tx {
                tracing::debug!(
                    "Network operation failed for transaction: {} with error: {}",
                    tx_id,
                    e
                );
            }
        }
    }

    op_result
}

/// Handle incoming InterestSync messages for delta-based state synchronization.
///
/// This function processes the interest exchange protocol:
/// - `Interests`: Connection-time discovery of shared contract interests
/// - `Summaries`: State summaries for shared contracts
/// - `ChangeInterests`: Incremental interest changes
/// - `ResyncRequest`: Request full state when delta application fails
async fn handle_interest_sync_message(
    op_manager: &Arc<OpManager>,
    source: std::net::SocketAddr,
    message: crate::message::InterestMessage,
) -> Option<crate::message::InterestMessage> {
    use crate::message::{InterestMessage, NodeEvent, SummaryEntry};
    use crate::ring::interest::contract_hash;

    match message {
        InterestMessage::Interests { hashes } => {
            tracing::debug!(
                from = %source,
                hash_count = hashes.len(),
                "Received Interests message"
            );

            let peer_key = get_peer_key_from_addr(op_manager, source);

            // Full-replace semantics: the incoming hashes represent the peer's
            // complete interest set. Remove entries for contracts whose hash is
            // NOT in the incoming set, then register/refresh the rest.
            if let Some(ref pk) = peer_key {
                let incoming_hashes: std::collections::HashSet<u32> =
                    hashes.iter().copied().collect();
                let current_contracts = op_manager.interest_manager.get_contracts_for_peer(pk);

                // Hash collisions (FNV-1a u32) can cause a stale entry to
                // survive if its hash collides with a live one. This is the
                // safe direction — false negatives on removal, not false
                // positives — and extremely rare in practice.
                let mut removed = 0usize;
                for contract in &current_contracts {
                    let h = contract_hash(contract);
                    if !incoming_hashes.contains(&h) {
                        op_manager
                            .interest_manager
                            .remove_peer_interest(contract, pk);
                        removed += 1;
                    }
                }
                if removed > 0 {
                    tracing::debug!(
                        from = %source,
                        removed,
                        "Full-replace: removed stale interest entries"
                    );
                }
            }

            // Find contracts we share interest in
            let matching = op_manager.interest_manager.get_matching_contracts(&hashes);

            // Build summaries for shared contracts and register/refresh peer interest
            let mut entries = Vec::with_capacity(matching.len());
            for contract in matching {
                let hash = contract_hash(&contract);
                let summary = get_contract_summary(op_manager, &contract).await;
                entries.push(SummaryEntry::from_summary(hash, summary.as_ref()));

                if let Some(ref pk) = peer_key {
                    // Refresh TTL for existing entries (preserves cached summary).
                    // Only register new interest if this is a genuinely new entry;
                    // otherwise register_peer_interest would overwrite the cached
                    // summary with None, defeating delta optimization.
                    if op_manager
                        .interest_manager
                        .get_peer_interest(&contract, pk)
                        .is_some()
                    {
                        op_manager
                            .interest_manager
                            .refresh_peer_interest(&contract, pk);
                    } else {
                        op_manager.interest_manager.register_peer_interest(
                            &contract,
                            pk.clone(),
                            None, // New entry; summary arrives in their Summaries response
                            false,
                        );
                    }
                }
            }

            if entries.is_empty() {
                None
            } else {
                Some(InterestMessage::Summaries { entries })
            }
        }

        InterestMessage::Summaries { entries } => {
            tracing::debug!(
                from = %source,
                entry_count = entries.len(),
                "Received Summaries message"
            );

            // Update peer summaries and detect stale peers (#3221).
            //
            // Compare each peer summary with our own before storing it. If they
            // differ, the peer missed an earlier broadcast. We send state only
            // to the specific peer that reported the stale summary via
            // SyncStateToPeer (not BroadcastStateChange which fans out to ALL
            // subscribers). This avoids O(peers^2) broadcast storms where N
            // peers each trigger a full fan-out broadcast. See #3791.
            //
            // Both sides may detect the same mismatch (A sees B is stale, B sees
            // A is stale). This is safe: the contract's merge semantics (CRDTs
            // etc.) ensure the newer/correct state wins regardless of push order.
            //
            // When either summary is None, we skip the comparison. A peer with
            // no summary has no state yet and should receive it via the normal
            // subscription/GET flow, not via broadcast.
            let peer_key = get_peer_key_from_addr(op_manager, source);
            let mut stale_contracts = Vec::new();
            // Collect (contract, state_hash) for deferred StateConfirmed telemetry.
            // Only emitted in direct-runner mode to avoid .await points that change
            // turmoil task scheduling.
            let emit_confirmed = crate::config::SimulationIdleTimeout::is_enabled();
            let mut confirmed_states: Vec<(freenet_stdlib::prelude::ContractKey, String)> =
                Vec::new();

            if let Some(pk) = peer_key {
                for entry in entries {
                    for contract in op_manager.interest_manager.lookup_by_hash(entry.hash) {
                        if !op_manager.interest_manager.has_local_interest(&contract) {
                            continue;
                        }

                        let their_summary = entry.to_summary();
                        let our_summary = get_contract_summary(op_manager, &contract).await;

                        if emit_confirmed {
                            if let Some(ref summary) = our_summary {
                                confirmed_states.push((contract, hex::encode(summary.as_ref())));
                            }
                        }

                        let is_stale = our_summary
                            .as_ref()
                            .zip(their_summary.as_ref())
                            .is_some_and(|(ours, theirs)| ours.as_ref() != theirs.as_ref());

                        op_manager.interest_manager.update_peer_summary(
                            &contract,
                            &pk,
                            their_summary,
                        );

                        if is_stale && !stale_contracts.contains(&contract) {
                            stale_contracts.push(contract);
                        }
                    }
                }
            }

            // Send current state only to the specific peer that reported a stale
            // summary. Previously this emitted BroadcastStateChange which fanned
            // out to ALL subscribers (~28 peers), causing O(peers^2) traffic when
            // many peers reported mismatches within the same heartbeat cycle.
            for contract in stale_contracts {
                let Some(state) = get_contract_state(op_manager, &contract).await else {
                    tracing::trace!(
                        contract = %contract,
                        "Skipping stale-peer sync — no local state available"
                    );
                    continue;
                };
                tracing::info!(
                    contract = %contract,
                    stale_peer = %source,
                    "Summary mismatch in interest sync — syncing state to stale peer"
                );
                if let Err(e) = op_manager
                    .notify_node_event(NodeEvent::SyncStateToPeer {
                        key: contract,
                        new_state: state,
                        target: source,
                    })
                    .await
                {
                    tracing::warn!(
                        contract = %contract,
                        error = %e,
                        "Failed to emit SyncStateToPeer for stale peer correction"
                    );
                }
            }

            // Emit deferred StateConfirmed telemetry so the convergence
            // checker has up-to-date state hashes for CRDT-merged state.
            for (key, state_hash) in confirmed_states {
                if let Some(event) =
                    crate::tracing::NetEventLog::state_confirmed(&op_manager.ring, key, state_hash)
                {
                    op_manager
                        .ring
                        .register_events(either::Either::Left(event))
                        .await;
                }
            }

            // No response needed for Summaries
            None
        }

        InterestMessage::ChangeInterests { added, removed } => {
            tracing::debug!(
                from = %source,
                added_count = added.len(),
                removed_count = removed.len(),
                "Received ChangeInterests message"
            );

            let peer_key = get_peer_key_from_addr(op_manager, source);

            // Handle removals
            if let Some(ref pk) = peer_key {
                for hash in removed {
                    // Handle hash collisions - remove interest from all matching contracts
                    for contract in op_manager.interest_manager.lookup_by_hash(hash) {
                        op_manager
                            .interest_manager
                            .remove_peer_interest(&contract, pk);
                    }
                }
            }

            // Handle additions - respond with summaries for newly shared contracts
            let mut entries = Vec::new();
            if let Some(ref pk) = peer_key {
                for hash in added {
                    // Handle hash collisions - process all matching contracts
                    for contract in op_manager.interest_manager.lookup_by_hash(hash) {
                        // Only process if we have local interest in this contract
                        if !op_manager.interest_manager.has_local_interest(&contract) {
                            continue;
                        }

                        // Register their interest
                        op_manager.interest_manager.register_peer_interest(
                            &contract,
                            pk.clone(),
                            None,
                            false,
                        );

                        // Get our summary to send back
                        let summary = get_contract_summary(op_manager, &contract).await;
                        entries.push(SummaryEntry::from_summary(hash, summary.as_ref()));
                    }
                }
            }

            if entries.is_empty() {
                None
            } else {
                Some(InterestMessage::Summaries { entries })
            }
        }

        InterestMessage::ResyncRequest { key } => {
            tracing::info!(
                from = %source,
                contract = %key,
                event = "resync_request_received",
                "Received ResyncRequest - peer needs full state"
            );

            // Track this for testing - high counts indicate incorrect summary caching (PR #2763)
            op_manager.interest_manager.record_resync_request_received();
            crate::config::GlobalTestMetrics::record_resync_request();

            // Clear cached summary for this peer
            let peer_key = get_peer_key_from_addr(op_manager, source);
            if let Some(ref pk) = peer_key {
                op_manager
                    .interest_manager
                    .update_peer_summary(&key, pk, None);
            }

            // Get PeerKeyLocation for telemetry
            let from_peer = op_manager.ring.connection_manager.get_peer_by_addr(source);

            // Emit telemetry for ResyncRequest received
            if let Some(ref from_pkl) = from_peer {
                if let Some(event) = crate::tracing::NetEventLog::resync_request_received(
                    &op_manager.ring,
                    key,
                    from_pkl.clone(),
                ) {
                    op_manager
                        .ring
                        .register_events(either::Either::Left(event))
                        .await;
                }
            } else {
                tracing::debug!(
                    contract = %key,
                    source = %source,
                    "ResyncRequest telemetry skipped: peer lookup failed"
                );
            }

            // Fetch current state from store
            let state = get_contract_state(op_manager, &key).await;
            let Some(state) = state else {
                tracing::warn!(
                    contract = %key,
                    "ResyncRequest for contract we don't have state for"
                );
                return None;
            };

            // Fetch our summary
            let summary = get_contract_summary(op_manager, &key).await;
            let Some(summary) = summary else {
                tracing::warn!(
                    contract = %key,
                    "ResyncRequest for contract we can't compute summary for"
                );
                return None;
            };

            tracing::info!(
                to = %source,
                contract = %key,
                state_size = state.as_ref().len(),
                summary_size = summary.as_ref().len(),
                event = "resync_response_sent",
                "Sending ResyncResponse with full state"
            );

            // Emit telemetry for ResyncResponse sent
            if let Some(ref to_pkl) = from_peer {
                if let Some(event) = crate::tracing::NetEventLog::resync_response_sent(
                    &op_manager.ring,
                    key,
                    to_pkl.clone(),
                    state.as_ref().len(),
                ) {
                    op_manager
                        .ring
                        .register_events(either::Either::Left(event))
                        .await;
                }
            }

            Some(InterestMessage::ResyncResponse {
                key,
                state_bytes: state.as_ref().to_vec(),
                summary_bytes: summary.as_ref().to_vec(),
            })
        }

        InterestMessage::ResyncResponse {
            key,
            state_bytes,
            summary_bytes,
        } => {
            tracing::info!(
                from = %source,
                contract = %key,
                state_size = state_bytes.len(),
                event = "resync_response_received",
                "Received ResyncResponse with full state"
            );

            // Apply the full state using an update
            let state = freenet_stdlib::prelude::State::from(state_bytes.clone());
            let update_data = freenet_stdlib::prelude::UpdateData::State(state);

            // Send to contract handler
            use crate::contract::ContractHandlerEvent;
            match op_manager
                .notify_contract_handler(ContractHandlerEvent::UpdateQuery {
                    key,
                    data: update_data,
                    related_contracts: Default::default(),
                })
                .await
            {
                Ok(ContractHandlerEvent::UpdateResponse {
                    new_value: Ok(_), ..
                }) => {
                    tracing::info!(
                        from = %source,
                        contract = %key,
                        event = "resync_applied",
                        changed = true,
                        "ResyncResponse state applied successfully"
                    );
                }
                Ok(ContractHandlerEvent::UpdateNoChange { .. }) => {
                    tracing::info!(
                        from = %source,
                        contract = %key,
                        event = "resync_applied",
                        changed = false,
                        "ResyncResponse state unchanged (already had this state)"
                    );
                }
                Ok(other) => {
                    tracing::warn!(
                        from = %source,
                        contract = %key,
                        event = "resync_failed",
                        response = ?other,
                        "Unexpected response to resync update"
                    );
                }
                Err(e) => {
                    tracing::error!(
                        from = %source,
                        contract = %key,
                        event = "resync_failed",
                        error = %e,
                        "Failed to apply resync state"
                    );
                }
            }

            // Update the peer's summary in our interest tracker
            let peer_key = get_peer_key_from_addr(op_manager, source);
            if let Some(pk) = peer_key {
                let summary = freenet_stdlib::prelude::StateSummary::from(summary_bytes);
                op_manager
                    .interest_manager
                    .update_peer_summary(&key, &pk, Some(summary));
            }

            // No response needed
            None
        }
    }
}

/// Get the contract state from the state store.
async fn get_contract_state(
    op_manager: &Arc<OpManager>,
    key: &freenet_stdlib::prelude::ContractKey,
) -> Option<freenet_stdlib::prelude::WrappedState> {
    get_contract_state_by_id(op_manager, key.id())
        .await
        .map(|(_, state)| state)
}

/// Get the contract state by instance ID, returning both the full `ContractKey` and state.
///
/// Used for proactive state sync when proximity cache discovers overlapping contracts,
/// where we only have a `ContractInstanceId` (not a full `ContractKey`).
async fn get_contract_state_by_id(
    op_manager: &Arc<OpManager>,
    instance_id: &freenet_stdlib::prelude::ContractInstanceId,
) -> Option<(
    freenet_stdlib::prelude::ContractKey,
    freenet_stdlib::prelude::WrappedState,
)> {
    use crate::contract::ContractHandlerEvent;

    match op_manager
        .notify_contract_handler(ContractHandlerEvent::GetQuery {
            instance_id: *instance_id,
            return_contract_code: false,
        })
        .await
    {
        Ok(ContractHandlerEvent::GetResponse {
            key: Some(key),
            response: Ok(store_response),
        }) => store_response.state.map(|state| (key, state)),
        Ok(ContractHandlerEvent::GetResponse {
            response: Err(e), ..
        }) => {
            tracing::warn!(
                contract = %instance_id,
                error = %e,
                "Failed to get contract state by instance id"
            );
            None
        }
        _ => None,
    }
}

/// Get the contract state summary using the contract's summarize_state method.
async fn get_contract_summary(
    op_manager: &Arc<OpManager>,
    key: &freenet_stdlib::prelude::ContractKey,
) -> Option<freenet_stdlib::prelude::StateSummary<'static>> {
    use crate::contract::ContractHandlerEvent;

    match op_manager
        .notify_contract_handler(ContractHandlerEvent::GetSummaryQuery { key: *key })
        .await
    {
        Ok(ContractHandlerEvent::GetSummaryResponse {
            summary: Ok(summary),
            ..
        }) => Some(summary),
        Ok(ContractHandlerEvent::GetSummaryResponse {
            summary: Err(e), ..
        }) => {
            tracing::warn!(
                contract = %key,
                error = %e,
                "Failed to get contract summary"
            );
            None
        }
        _ => None,
    }
}

/// Get the PeerKey for a socket address.
fn get_peer_key_from_addr(
    op_manager: &Arc<OpManager>,
    addr: std::net::SocketAddr,
) -> Option<crate::ring::interest::PeerKey> {
    op_manager
        .ring
        .connection_manager
        .get_peer_by_addr(addr)
        .map(|pkl| crate::ring::interest::PeerKey::from(pkl.pub_key.clone()))
}

/// Attempts to subscribe to a contract. Thin wrapper around
/// [`subscribe_with_id`] that allocates a fresh transaction.
#[allow(dead_code)]
pub async fn subscribe(
    op_manager: Arc<OpManager>,
    instance_id: ContractInstanceId,
    client_id: Option<ClientId>,
) -> Result<Transaction, OpError> {
    subscribe_with_id(op_manager, instance_id, client_id, None).await
}

/// Attempts to subscribe to a contract with a specific transaction ID (for deduplication).
///
/// Since #1454 Phase 2b, this function is the entry point for
/// **client-initiated** SUBSCRIBE only — it spawns a task-per-tx driver via
/// [`crate::operations::subscribe::start_client_subscribe`] rather than going
/// through the legacy `request_subscribe` + `handle_op_result` re-entry loop.
///
/// The renewal-initiated path (`ring::connection_maintenance`), the PUT
/// sub-op path (`operations::start_subscription_request_internal`), and the
/// executor/WASM-initiated path (`contract::executor::SubscribeContract::resume_op`)
/// all call `subscribe::request_subscribe` directly and bypass this function,
/// so they continue on the legacy path unchanged.
///
/// The legacy `is_renewal` parameter has been removed in Phase 2b: no live
/// caller passes `true`, and the task-per-tx path does not carry renewal
/// jitter / spam-prevention semantics (those are owned by
/// `ring::connection_maintenance` and are load-bearing there). Accepting
/// `is_renewal=true` here would silently route a renewal through the wrong
/// code path — removing the parameter makes the misuse a compile error
/// instead of a runtime footgun (review finding L1).
///
/// # Parameters
///
/// - `client_id`: If set, registers a legacy subscription-result waiter via
///   `ch_outbound.waiting_for_subscription_result`. Both WS call sites in
///   `client_events.rs` leave this `None` because they pre-register a
///   transaction-result waiter via `waiting_for_transaction_result`.
/// - `transaction_id`: The client-visible transaction id. If `None`, a fresh
///   one is allocated — currently only the dead-code wrapper `subscribe()`
///   does this.
pub async fn subscribe_with_id(
    op_manager: Arc<OpManager>,
    instance_id: ContractInstanceId,
    client_id: Option<ClientId>,
    transaction_id: Option<Transaction>,
) -> Result<Transaction, OpError> {
    let client_tx = match transaction_id {
        Some(id) => id,
        None => Transaction::new::<subscribe::SubscribeMsg>(),
    };

    if let Some(client_id) = client_id {
        use crate::client_events::RequestId;
        // Generate a default RequestId for internal subscription operations.
        // Legacy behaviour preserved: callers that pass a `client_id` expect
        // the subscription-result waiter to be registered here. The WS path
        // does not hit this branch (it pre-registers its own waiter).
        let request_id = RequestId::new();
        if let Err(e) = op_manager
            .ch_outbound
            .waiting_for_subscription_result(client_tx, instance_id, client_id, request_id)
            .await
        {
            tracing::warn!(tx = %client_tx, error = %e, "failed to register subscription result waiter");
        }
    }

    // Task-per-tx: spawn the driver and return the client-visible tx
    // immediately. The spawned task owns retries, peer selection, local
    // completion, and result delivery via `result_router_tx`.
    subscribe::start_client_subscribe(op_manager, instance_id, client_tx).await
}

async fn handle_aborted_op(
    tx: Transaction,
    op_manager: &OpManager,
    gateways: &[PeerKeyLocation],
) -> Result<(), OpError> {
    use crate::util::IterExt;
    match tx.transaction_type() {
        TransactionType::Connect => {
            // attempt to establish a connection failed, this could be a fatal error since the node
            // is useless without connecting to the network, we will retry with exponential backoff
            match op_manager.pop(&tx) {
                Ok(Some(OpEnum::Connect(op)))
                    if op_manager.ring.open_connections()
                        < op_manager.ring.connection_manager.min_connections =>
                {
                    let gateway = op.gateway().cloned();
                    if let Some(gateway) = gateway {
                        // Clean up phantom location_for_peer entry left by should_accept's
                        // record_pending_location (#3088). Without this, the gateway appears
                        // permanently connected and initial_join_procedure never retries it.
                        if let Some(peer_addr) = gateway.peer_addr.as_known() {
                            op_manager
                                .ring
                                .connection_manager
                                .prune_in_transit_connection(*peer_addr);

                            let backoff_duration = {
                                let mut backoff = op_manager.gateway_backoff.lock();
                                backoff.record_failure(*peer_addr);
                                backoff.remaining_backoff(*peer_addr)
                            };

                            if let Some(duration) = backoff_duration {
                                // Cap the wait at GATEWAY_BACKOFF_POLL_CAP when the
                                // node already has ring connections, matching the
                                // policy in initial_join_procedure (issue #3304).
                                // ±20% jitter to prevent thundering herd.
                                let open_conns = op_manager.ring.open_connections();
                                let effective = if open_conns > 0 {
                                    let jitter_ms = crate::config::GlobalRng::random_range(
                                        0u64..(connect::GATEWAY_BACKOFF_POLL_CAP.as_millis() / 5)
                                            as u64,
                                    );
                                    let cap = connect::GATEWAY_BACKOFF_POLL_CAP.mul_f64(0.8)
                                        + Duration::from_millis(jitter_ms);
                                    duration.min(cap)
                                } else {
                                    duration
                                };
                                tracing::info!(
                                    gateway = %gateway,
                                    backoff_secs = duration.as_secs(),
                                    effective_wait_secs = effective.as_secs(),
                                    open_connections = open_conns,
                                    "Gateway connection failed, waiting before retry"
                                );
                                // Use select! so suspend/isolation recovery can
                                // wake us immediately via gateway_backoff_cleared,
                                // matching the pattern in initial_join_procedure.
                                tokio::select! {
                                    _ = tokio::time::sleep(effective) => {},
                                    _ = op_manager.gateway_backoff_cleared.notified() => {
                                        tracing::info!(
                                            gateway = %gateway,
                                            "Gateway backoff cleared externally, retrying immediately"
                                        );
                                    },
                                }
                            }
                        }

                        tracing::debug!("Retrying connection to gateway {}", gateway);
                        connect::join_ring_request(&gateway, op_manager).await?;
                    }
                }
                Ok(Some(OpEnum::Connect(op))) => {
                    // Clean up phantom location_for_peer entry (#3088)
                    if let Some(peer_addr) = op.get_next_hop_addr() {
                        op_manager
                            .ring
                            .connection_manager
                            .prune_in_transit_connection(peer_addr);
                    }
                    if op_manager.ring.open_connections() == 0 && op_manager.ring.is_gateway() {
                        tracing::warn!("Retrying joining the ring with an other gateway");
                        if let Some(gateway) = gateways.iter().shuffle().next() {
                            connect::join_ring_request(gateway, op_manager).await?
                        }
                    }
                }
                Ok(Some(other)) => {
                    op_manager.push(tx, other).await?;
                }
                _ => {}
            }
        }
        TransactionType::Get => match op_manager.pop(&tx) {
            Ok(Some(OpEnum::Get(op))) => {
                if let Err(err) = op.handle_abort(op_manager).await {
                    if !matches!(err, OpError::StatePushed) {
                        return Err(err);
                    }
                }
            }
            Ok(Some(other)) => {
                op_manager.push(tx, other).await?;
            }
            _ => {}
        },
        TransactionType::Subscribe => match op_manager.pop(&tx) {
            Ok(Some(OpEnum::Subscribe(op))) => {
                if let Err(err) = op.handle_abort(op_manager).await {
                    if !matches!(err, OpError::StatePushed) {
                        return Err(err);
                    }
                }
            }
            Ok(Some(other)) => {
                op_manager.push(tx, other).await?;
            }
            _ => {}
        },
        TransactionType::Put => match op_manager.pop(&tx) {
            Ok(Some(OpEnum::Put(op))) => {
                if let Err(err) = op.handle_abort(op_manager).await {
                    if !matches!(err, OpError::StatePushed) {
                        return Err(err);
                    }
                }
            }
            Ok(Some(other)) => {
                op_manager.push(tx, other).await?;
            }
            _ => {}
        },
        TransactionType::Update => match op_manager.pop(&tx) {
            Ok(Some(OpEnum::Update(op))) => {
                if let Err(err) = op.handle_abort(op_manager).await {
                    if !matches!(err, OpError::StatePushed) {
                        return Err(err);
                    }
                }
            }
            Ok(Some(other)) => {
                op_manager.push(tx, other).await?;
            }
            _ => {}
        },
    }
    Ok(())
}

/// The identifier of a peer in the network: a known public key and socket address.
///
/// This is a type alias for [`ring::KnownPeerKeyLocation`], which bundles a peer's
/// cryptographic identity (public key) with its guaranteed-known network address.
///
/// Use `KnownPeerKeyLocation` directly when you need the full type name for clarity.
/// Use `PeerKeyLocation` when the address may be unknown (e.g., during NAT traversal).
pub type PeerId = crate::ring::KnownPeerKeyLocation;

pub async fn run_local_node(
    mut executor: Executor,
    socket: WebsocketApiConfig,
) -> anyhow::Result<()> {
    if !crate::server::is_private_ip(&socket.address) {
        anyhow::bail!(
            "invalid ip: {}, only loopback and private network addresses are allowed",
            socket.address
        )
    }

    let (mut gw, mut ws_proxy) = crate::server::serve_client_api_in(socket).await?;

    // TODO: use combinator instead
    // let mut all_clients =
    //    ClientEventsCombinator::new([Box::new(ws_handle), Box::new(http_handle)]);
    enum Receiver {
        Ws,
        Gw,
    }
    let mut receiver;
    loop {
        let req = crate::deterministic_select! {
            req = ws_proxy.recv() => {
                receiver = Receiver::Ws;
                req?
            },
            req = gw.recv() => {
                receiver = Receiver::Gw;
                req?
            },
        };
        let OpenRequest {
            client_id: id,
            request,
            notification_channel,
            token,
            origin_contract,
            ..
        } = req;
        tracing::debug!(client_id = %id, ?token, "Received OpenRequest -> {request}");

        let res = match *request {
            ClientRequest::ContractOp(op) => {
                executor
                    .contract_requests(op, id, notification_channel)
                    .await
            }
            ClientRequest::DelegateOp(op) => {
                // Use the origin_contract already resolved by the WebSocket/HTTP client API
                // instead of re-looking up from gw.origin_contracts (which could fail
                // if the token expired between WebSocket connect and this request)
                let op_name = match op {
                    DelegateRequest::RegisterDelegate { .. } => "RegisterDelegate",
                    DelegateRequest::ApplicationMessages { .. } => "ApplicationMessages",
                    DelegateRequest::UnregisterDelegate(_) => "UnregisterDelegate",
                    _ => "Unknown",
                };
                tracing::debug!(
                    op_name = ?op_name,
                    ?origin_contract,
                    "Handling ClientRequest::DelegateOp"
                );
                executor.delegate_request(op, origin_contract.as_ref(), None)
            }
            ClientRequest::Disconnect { cause } => {
                if let Some(cause) = cause {
                    tracing::info!("disconnecting cause: {cause}");
                }
                continue;
            }
            ClientRequest::Authenticate { .. }
            | ClientRequest::NodeQueries(_)
            | ClientRequest::Close
            | _ => Err(ExecutorError::other(anyhow::anyhow!("not supported"))),
        };

        match res {
            Ok(res) => {
                match receiver {
                    Receiver::Ws => ws_proxy.send(id, Ok(res)).await?,
                    Receiver::Gw => gw.send(id, Ok(res)).await?,
                };
            }
            Err(err) if err.is_request() => {
                let err = ErrorKind::RequestError(err.unwrap_request());
                match receiver {
                    Receiver::Ws => {
                        ws_proxy.send(id, Err(err.into())).await?;
                    }
                    Receiver::Gw => {
                        gw.send(id, Err(err.into())).await?;
                    }
                };
            }
            Err(err) => {
                tracing::error!("{err}");
                let err = Err(ErrorKind::Unhandled {
                    cause: format!("{err}").into(),
                }
                .into());
                match receiver {
                    Receiver::Ws => {
                        ws_proxy.send(id, err).await?;
                    }
                    Receiver::Gw => {
                        gw.send(id, err).await?;
                    }
                };
            }
        }
    }
}

pub async fn run_network_node(mut node: Node) -> anyhow::Result<()> {
    tracing::info!("Starting node");

    let is_gateway = node.inner.is_gateway;
    let location = if let Some(loc) = node.inner.location {
        Some(loc)
    } else {
        is_gateway
            .then(|| {
                node.inner
                    .peer_id
                    .as_ref()
                    .map(|id| Location::from_address(&id.socket_addr()))
            })
            .flatten()
    };

    if let Some(location) = location {
        tracing::info!("Setting initial location: {location}");
        node.update_location(location);
    }

    match node.run().await {
        Ok(_) => {
            if is_gateway {
                tracing::info!("Gateway finished");
            } else {
                tracing::info!("Node finished");
            }

            Ok(())
        }
        Err(e) => {
            tracing::error!("{e}");
            Err(e)
        }
    }
}

/// Trait to determine if an operation has completed, regardless of its specific type.
pub trait IsOperationCompleted {
    /// Returns true if the operation has completed (successfully or with error)
    fn is_completed(&self) -> bool;
}

impl IsOperationCompleted for OpEnum {
    fn is_completed(&self) -> bool {
        match self {
            OpEnum::Connect(op) => op.is_completed(),
            OpEnum::Put(op) => op.is_completed(),
            OpEnum::Get(op) => op.is_completed(),
            OpEnum::Subscribe(op) => op.is_completed(),
            OpEnum::Update(op) => op.is_completed(),
        }
    }
}

/// Classify a `(TransactionType, OpOutcome)` pair into an optional `OpType` and success flag
/// for dashboard recording. Returns `(None, false)` for CONNECT transactions (not contract ops).
///
/// - `Irrelevant`: operation completed but without routing stats for this peer → success
/// - `Incomplete`: operation never finalized → failure
fn classify_op_outcome(
    tx_type: TransactionType,
    outcome: OpOutcome<'_>,
) -> (Option<network_status::OpType>, bool) {
    use network_status::OpType;
    match (tx_type, outcome) {
        (
            TransactionType::Get,
            OpOutcome::ContractOpSuccess { .. } | OpOutcome::ContractOpSuccessUntimed { .. },
        ) => (Some(OpType::Get), true),
        (TransactionType::Get, OpOutcome::ContractOpFailure { .. }) => (Some(OpType::Get), false),
        (
            TransactionType::Put,
            OpOutcome::ContractOpSuccess { .. } | OpOutcome::ContractOpSuccessUntimed { .. },
        ) => (Some(OpType::Put), true),
        (TransactionType::Put, OpOutcome::ContractOpFailure { .. }) => (Some(OpType::Put), false),
        (
            TransactionType::Update,
            OpOutcome::ContractOpSuccess { .. } | OpOutcome::ContractOpSuccessUntimed { .. },
        ) => (Some(OpType::Update), true),
        (TransactionType::Update, OpOutcome::ContractOpFailure { .. }) => {
            (Some(OpType::Update), false)
        }
        (
            TransactionType::Subscribe,
            OpOutcome::ContractOpSuccess { .. } | OpOutcome::ContractOpSuccessUntimed { .. },
        ) => (Some(OpType::Subscribe), true),
        (TransactionType::Subscribe, OpOutcome::ContractOpFailure { .. }) => {
            (Some(OpType::Subscribe), false)
        }
        // Irrelevant = completed successfully but without routing stats
        // (e.g., UPDATE when stats.target is None, SUBSCRIBE when stats is None)
        (TransactionType::Get, OpOutcome::Irrelevant) => (Some(OpType::Get), true),
        (TransactionType::Put, OpOutcome::Irrelevant) => (Some(OpType::Put), true),
        (TransactionType::Update, OpOutcome::Irrelevant) => (Some(OpType::Update), true),
        (TransactionType::Subscribe, OpOutcome::Irrelevant) => (Some(OpType::Subscribe), true),
        // Incomplete = operation never finalized
        (TransactionType::Get, OpOutcome::Incomplete) => (Some(OpType::Get), false),
        (TransactionType::Put, OpOutcome::Incomplete) => (Some(OpType::Put), false),
        (TransactionType::Update, OpOutcome::Incomplete) => (Some(OpType::Update), false),
        (TransactionType::Subscribe, OpOutcome::Incomplete) => (Some(OpType::Subscribe), false),
        // CONNECT is not a contract operation
        _ => (None, false),
    }
}

/// Check if an operation result indicates completion
pub fn is_operation_completed(op_result: &Result<Option<OpEnum>, OpError>) -> bool {
    match op_result {
        // If we got an OpEnum, check its specific completion status using the trait
        Ok(Some(op)) => op.is_completed(),
        _ => false,
    }
}

#[cfg(test)]
mod tests {
    use std::net::{Ipv4Addr, Ipv6Addr};

    use super::*;
    use crate::operations::OpError;
    use rstest::rstest;

    // Hostname resolution tests
    #[tokio::test]
    async fn test_hostname_resolution_localhost() {
        let addr = Address::Hostname("localhost".to_string());
        let socket_addr = NodeConfig::parse_socket_addr(&addr).await.unwrap();
        assert!(
            socket_addr.ip() == IpAddr::V4(Ipv4Addr::LOCALHOST)
                || socket_addr.ip() == IpAddr::V6(Ipv6Addr::LOCALHOST)
        );
        assert!(socket_addr.port() > 1024);
    }

    #[tokio::test]
    async fn test_hostname_resolution_with_port() {
        let addr = Address::Hostname("google.com:8080".to_string());
        let socket_addr = NodeConfig::parse_socket_addr(&addr).await.unwrap();
        assert_eq!(socket_addr.port(), 8080);
    }

    #[tokio::test]
    async fn test_hostname_resolution_with_trailing_dot() {
        // DNS names with trailing dot should be handled
        let addr = Address::Hostname("localhost.".to_string());
        let result = NodeConfig::parse_socket_addr(&addr).await;
        // This should either succeed or fail gracefully
        if let Ok(socket_addr) = result {
            assert!(
                socket_addr.ip() == IpAddr::V4(Ipv4Addr::LOCALHOST)
                    || socket_addr.ip() == IpAddr::V6(Ipv6Addr::LOCALHOST)
            );
        }
    }

    #[tokio::test]
    async fn test_hostname_resolution_direct_socket_addr() {
        let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
        let addr = Address::HostAddress(socket);
        let resolved = NodeConfig::parse_socket_addr(&addr).await.unwrap();
        assert_eq!(resolved, socket);
    }

    #[tokio::test]
    async fn test_hostname_resolution_invalid_port() {
        let addr = Address::Hostname("localhost:not_a_port".to_string());
        let result = NodeConfig::parse_socket_addr(&addr).await;
        assert!(result.is_err());
    }

    // Superseded: Old addr-only equality (same_addr_different_keys → equal) was replaced
    // with full-field equality (addr + pub_key) in #3616. Kept as historical documentation
    // of the old behavior.
    #[ignore]
    #[rstest]
    #[case::same_addr_different_keys(8080, 8080, true)]
    #[case::different_addr_same_key(8080, 8081, false)]
    fn test_peer_id_equality(#[case] port1: u16, #[case] port2: u16, #[case] expected_equal: bool) {
        let keypair1 = TransportKeypair::new();
        let keypair2 = TransportKeypair::new();
        let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port1);
        let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port2);
        // Old behavior: PeerId equality was addr-only, so same_addr_different_keys was true.
        // New behavior: equality uses full fields, so same_addr_different_keys is false.
        let peer1 = PeerId::new(keypair1.public().clone(), addr1);
        let peer2 = PeerId::new(keypair2.public().clone(), addr2);
        assert_eq!(peer1 == peer2, expected_equal);
    }

    // PeerId (KnownPeerKeyLocation) equality tests
    // PeerId now uses full-field equality (both addr and pub_key), matching identity semantics.
    #[test]
    fn test_peer_id_equality_same_key_same_addr() {
        let keypair = TransportKeypair::new();
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let peer1 = PeerId::new(keypair.public().clone(), addr);
        let peer2 = PeerId::new(keypair.public().clone(), addr);
        assert_eq!(peer1, peer2);
    }

    #[test]
    fn test_peer_id_equality_different_key_same_addr() {
        let keypair1 = TransportKeypair::new();
        let keypair2 = TransportKeypair::new();
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        // Different keys at same addr are different peers (key is identity)
        let peer1 = PeerId::new(keypair1.public().clone(), addr);
        let peer2 = PeerId::new(keypair2.public().clone(), addr);
        assert_ne!(peer1, peer2);
    }

    #[test]
    fn test_peer_id_equality_different_addr() {
        let keypair = TransportKeypair::new();
        let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
        let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081);
        let peer1 = PeerId::new(keypair.public().clone(), addr1);
        let peer2 = PeerId::new(keypair.public().clone(), addr2);
        assert_ne!(peer1, peer2);
    }

    #[rstest]
    #[case::lower_port_first(8080, 8081)]
    #[case::high_port_diff(1024, 65535)]
    fn test_peer_id_ordering(#[case] lower_port: u16, #[case] higher_port: u16) {
        let keypair = TransportKeypair::new();
        let addr1 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), lower_port);
        let addr2 = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), higher_port);

        let peer1 = PeerId::new(keypair.public().clone(), addr1);
        let peer2 = PeerId::new(keypair.public().clone(), addr2);

        assert!(peer1 < peer2);
        assert!(peer2 > peer1);
    }

    #[test]
    fn test_peer_id_hash_consistency() {
        use std::collections::hash_map::DefaultHasher;
        use std::hash::{Hash, Hasher};

        let keypair = TransportKeypair::new();
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);

        let peer1 = PeerId::new(keypair.public().clone(), addr);
        let peer2 = PeerId::new(keypair.public().clone(), addr);

        let mut hasher1 = DefaultHasher::new();
        let mut hasher2 = DefaultHasher::new();
        peer1.hash(&mut hasher1);
        peer2.hash(&mut hasher2);

        // Same key + same address should produce same hash
        assert_eq!(hasher1.finish(), hasher2.finish());
    }

    #[test]
    fn test_peer_id_random_produces_unique() {
        let peer1 = PeerId::random();
        let peer2 = PeerId::random();

        // Random peers should have different addresses (with high probability)
        assert_ne!(peer1.socket_addr(), peer2.socket_addr());
    }

    #[test]
    fn test_peer_id_serialization() {
        let peer = PeerId::random();
        let bytes = peer.to_bytes();
        assert!(!bytes.is_empty());

        // Should be deserializable
        let deserialized: PeerId = bincode::deserialize(&bytes).unwrap();
        assert_eq!(peer.socket_addr(), deserialized.socket_addr());
    }

    #[test]
    fn test_peer_id_display() {
        let peer = PeerId::random();
        let display = format!("{}", peer);
        let debug = format!("{:?}", peer);

        // Display and Debug should produce the same output
        assert_eq!(display, debug);
        // Should not be empty
        assert!(!display.is_empty());
    }

    // InitPeerNode tests
    #[test]
    fn test_init_peer_node_construction() {
        let keypair = TransportKeypair::new();
        let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1)), 8080);
        let peer_key_location = PeerKeyLocation::new(keypair.public().clone(), addr);
        let location = Location::new(0.5);

        let init_peer = InitPeerNode::new(peer_key_location.clone(), location);

        assert_eq!(init_peer.peer_key_location, peer_key_location);
        assert_eq!(init_peer.location, location);
    }

    // is_operation_completed tests - parametrized
    #[rstest]
    #[case::with_none(Ok(None), false)]
    #[case::with_running_error(Err(OpError::OpNotAvailable(super::OpNotAvailable::Running)), false)]
    #[case::with_state_pushed_error(Err(OpError::StatePushed), false)]
    fn test_is_operation_completed(
        #[case] result: Result<Option<OpEnum>, OpError>,
        #[case] expected: bool,
    ) {
        assert_eq!(is_operation_completed(&result), expected);
    }

    // classify_op_outcome tests
    mod classify_op_outcome_tests {
        use super::super::{classify_op_outcome, network_status::OpType};
        use crate::message::TransactionType;
        use crate::operations::OpOutcome;

        #[test]
        fn irrelevant_counted_as_success() {
            let (op_type, success) =
                classify_op_outcome(TransactionType::Update, OpOutcome::Irrelevant);
            assert!(matches!(op_type, Some(OpType::Update)));
            assert!(success);
        }

        #[test]
        fn incomplete_counted_as_failure() {
            let (op_type, success) =
                classify_op_outcome(TransactionType::Get, OpOutcome::Incomplete);
            assert!(matches!(op_type, Some(OpType::Get)));
            assert!(!success);
        }

        #[test]
        fn connect_skipped() {
            let (op_type, _) = classify_op_outcome(TransactionType::Connect, OpOutcome::Irrelevant);
            assert!(op_type.is_none());

            let (op_type, _) = classify_op_outcome(TransactionType::Connect, OpOutcome::Incomplete);
            assert!(op_type.is_none());
        }

        #[test]
        fn subscribe_irrelevant_is_success() {
            let (op_type, success) =
                classify_op_outcome(TransactionType::Subscribe, OpOutcome::Irrelevant);
            assert!(matches!(op_type, Some(OpType::Subscribe)));
            assert!(success);
        }

        #[test]
        fn put_incomplete_is_failure() {
            let (op_type, success) =
                classify_op_outcome(TransactionType::Put, OpOutcome::Incomplete);
            assert!(matches!(op_type, Some(OpType::Put)));
            assert!(!success);
        }
    }

    // Phase 1 (#1454) tests for forward_pending_op_result_if_completed.
    //
    // These exercise the callback-forwarding helper used by every branch of
    // `handle_pure_network_message_v1`. The helper is the only place that
    // drives the `pending_op_result` oneshot channel from a completed op
    // result back to a caller of `OpCtx::send_and_await`. Phase 1 extended
    // the hook from PUT/GET only to cover SUBSCRIBE/CONNECT/UPDATE as well,
    // so these tests verify the helper forwards correctly for every op
    // variant and short-circuits in the negative cases. (The caller side
    // used to live on `OpManager::notify_op_execution`, which Phase 2a
    // replaced with `OpCtx::send_and_await` — see #1454.)
    mod callback_forward_tests {
        use super::super::{
            OpError, OpNotAvailable, forward_pending_op_result_if_completed,
            try_forward_task_per_tx_reply,
        };
        use crate::message::{MessageStats, NetMessage, NetMessageV1, Transaction};
        use crate::operations::OpEnum;
        use crate::operations::connect::{ConnectMsg, ConnectOp, ConnectState};

        fn completed_connect_op() -> ConnectOp {
            ConnectOp::with_state(ConnectState::Completed)
        }

        fn dummy_reply() -> NetMessage {
            // We don't care about the payload — the helper only looks at
            // `NetMessage::id()` for logging. Use the tx-only `Aborted`
            // variant to avoid building an entire ConnectMsg payload.
            NetMessage::V1(NetMessageV1::Aborted(Transaction::new::<ConnectMsg>()))
        }

        #[tokio::test]
        async fn forwards_reply_when_completed_and_sender_present() {
            let op = completed_connect_op();
            let op_result = Ok(Some(OpEnum::Connect(Box::new(op))));

            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let reply = dummy_reply();
            let expected_id = *reply.id();

            forward_pending_op_result_if_completed(&op_result, Some(&tx), reply);

            let received = rx.try_recv().expect("helper should forward the reply");
            assert_eq!(*received.id(), expected_id);
        }

        #[tokio::test]
        async fn no_forward_when_sender_absent() {
            // Helper must not panic / block when no pending_op_result sender is wired.
            let op = completed_connect_op();
            let op_result = Ok(Some(OpEnum::Connect(Box::new(op))));

            forward_pending_op_result_if_completed(&op_result, None, dummy_reply());
            // Nothing to assert beyond "did not panic".
        }

        #[tokio::test]
        async fn no_forward_when_op_not_completed() {
            // `Ok(None)` and OpError variants should not trigger a send even if
            // a sender is present. This is the guard that keeps in-progress
            // ops (e.g. `SendAndContinue`) from prematurely firing the callback.
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);

            let ok_none: Result<Option<OpEnum>, OpError> = Ok(None);
            forward_pending_op_result_if_completed(&ok_none, Some(&tx), dummy_reply());
            assert!(rx.try_recv().is_err(), "Ok(None) must not forward");

            let err_running: Result<Option<OpEnum>, OpError> =
                Err(OpError::OpNotAvailable(OpNotAvailable::Running));
            forward_pending_op_result_if_completed(&err_running, Some(&tx), dummy_reply());
            assert!(
                rx.try_recv().is_err(),
                "OpNotAvailable::Running must not forward"
            );

            let err_completed: Result<Option<OpEnum>, OpError> =
                Err(OpError::OpNotAvailable(OpNotAvailable::Completed));
            forward_pending_op_result_if_completed(&err_completed, Some(&tx), dummy_reply());
            assert!(
                rx.try_recv().is_err(),
                "OpNotAvailable::Completed must not forward (no OpEnum payload)"
            );
        }

        #[tokio::test]
        async fn no_forward_when_op_in_progress() {
            // A non-completed op state (WaitingForResponses) must not trigger
            // the callback even though the op exists — this is the core guard
            // that keeps mid-flight operations from prematurely terminating
            // an `OpCtx::send_and_await` round-trip.
            use crate::operations::connect::JoinerState;
            use std::collections::HashSet;
            use tokio::time::Instant;

            let waiting = ConnectState::WaitingForResponses(JoinerState {
                target_connections: 1,
                observed_address: None,
                accepted: HashSet::new(),
                last_progress: Instant::now(),
                started_without_address: true,
            });
            let op = ConnectOp::with_state(waiting);
            assert!(
                !op.is_completed(),
                "precondition: WaitingForResponses must not be completed"
            );
            let op_result = Ok(Some(OpEnum::Connect(Box::new(op))));

            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            forward_pending_op_result_if_completed(&op_result, Some(&tx), dummy_reply());
            assert!(
                rx.try_recv().is_err(),
                "in-progress op must not forward to pending_op_result"
            );
        }

        #[tokio::test]
        async fn no_hang_when_receiver_dropped() {
            // Regression guard for the `try_send` channel-safety choice:
            // if the `OpCtx::send_and_await` caller drops its receiver
            // (e.g. cancelled, timed out) before the op completes, the
            // reply side must not block the pure-network-message handler.
            // With `try_send` the send fails with `Closed` and we log;
            // with `.send().await` it would have succeeded but stranded
            // the message. Either way the handler must make progress —
            // the test asserts the helper returns promptly (the
            // `#[tokio::test]` runtime would hang the whole test process
            // on regression).
            let op = completed_connect_op();
            let op_result = Ok(Some(OpEnum::Connect(Box::new(op))));

            let (tx, rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            drop(rx);

            forward_pending_op_result_if_completed(&op_result, Some(&tx), dummy_reply());
            // Returning at all is the assertion.
        }

        // ───────────────────────────────────────────────────────────
        // Phase 2b (#1454) task-per-tx bypass tests for
        // `try_forward_task_per_tx_reply`.
        //
        // The bypass routes a reply directly to an awaiting
        // `OpCtx::send_and_await` caller, skipping the legacy
        // `handle_op_request` path entirely. These tests cover the
        // helper's contract; end-to-end "the SUBSCRIBE branch of
        // `handle_pure_network_message_v1` actually invokes the helper"
        // coverage comes from the `run_client_subscribe` tests added
        // alongside the Phase 2b migration.
        // ───────────────────────────────────────────────────────────

        #[tokio::test]
        async fn bypass_forwards_when_callback_registered() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let reply = dummy_reply();
            let expected_id = *reply.id();

            let taken = try_forward_task_per_tx_reply(Some(&tx), reply, "subscribe");
            assert!(taken, "callback present → bypass must be taken");

            let received = rx
                .try_recv()
                .expect("helper should forward the reply to the callback");
            assert_eq!(*received.id(), expected_id);
        }

        #[tokio::test]
        async fn bypass_returns_false_when_no_callback() {
            // No callback registered → caller must fall through to legacy
            // `handle_op_request`. The helper must not panic and must
            // return `false`.
            let taken = try_forward_task_per_tx_reply(None, dummy_reply(), "subscribe");
            assert!(!taken, "no callback → bypass must not be taken");
        }

        #[tokio::test]
        async fn bypass_returns_true_even_when_receiver_dropped() {
            // Structural rule: once a callback is registered, the bypass
            // is taken — the legacy path must NOT run regardless of
            // whether the task-side receiver is still alive. If the task
            // was cancelled and dropped its receiver, `try_send` fails
            // with `Closed` and we log, but we still return `true` so
            // the caller returns `Ok(None)` from the pipeline.
            //
            // Running `handle_op_request` in this case would call
            // `load_or_init` on an empty DashMap and return
            // `OpNotPresent`, which is meaningless for a tx owned by a
            // (now-dead) task and pointlessly wastes a pipeline
            // iteration.
            let (tx, rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            drop(rx);

            let taken = try_forward_task_per_tx_reply(Some(&tx), dummy_reply(), "subscribe");
            assert!(
                taken,
                "callback present but receiver dropped → bypass still taken"
            );
        }

        /// Pin the bypass call site. Without this regression guard a
        /// future refactor could delete the
        /// `try_forward_task_per_tx_reply` invocation in the SUBSCRIBE
        /// branch of `handle_pure_network_message_v1` and the unit tests
        /// on the helper itself would still pass — because unit coverage
        /// on the helper only proves the helper works, not that it's
        /// wired in. Integration (simulation) failures would catch it
        /// eventually but as end-to-end hangs, which is a noisy signal.
        ///
        /// This test reads the `node.rs` source at compile time via
        /// `include_str!` and asserts that the SUBSCRIBE branch of
        /// `handle_pure_network_message_v1` invokes
        /// `try_forward_task_per_tx_reply` before running
        /// `handle_op_request`. A refactor that deletes the bypass call
        /// will fail this test at the unit-test level (review finding
        /// Testing #1).
        ///
        /// If the match arm structure changes (e.g. SUBSCRIBE branch
        /// moves or is renamed), the string patterns below need to be
        /// updated to match. That's a load-bearing but intentional
        /// coupling — the whole point is to fail loudly when the wiring
        /// changes so the change is noticed.
        #[test]
        fn bypass_is_wired_into_subscribe_branch_regression_guard() {
            // Full file text, read at compile time.
            const SOURCE: &str = include_str!("node.rs");

            // Locate the SUBSCRIBE branch of handle_pure_network_message_v1.
            let subscribe_branch_anchor = "NetMessageV1::Subscribe(ref op) => {";
            let branch_start = SOURCE.find(subscribe_branch_anchor).expect(
                "SUBSCRIBE branch of handle_pure_network_message_v1 not found; \
                         the match arm has been renamed or moved — update this regression guard",
            );

            // Slice a window large enough to contain the branch body up
            // to (and including) the first `handle_op_request` call.
            let window_end = SOURCE[branch_start..]
                .find("handle_op_request::<subscribe::SubscribeOp, _>")
                .expect("SUBSCRIBE branch no longer calls handle_op_request — update guard")
                + branch_start;
            let window = &SOURCE[branch_start..window_end];

            // The bypass helper MUST be invoked BEFORE the legacy
            // handle_op_request call. If this assertion fails, either:
            //   (a) the bypass was removed (regression — re-add it), or
            //   (b) the branch was restructured (update this guard).
            assert!(
                window.contains("try_forward_task_per_tx_reply("),
                "SUBSCRIBE branch no longer calls try_forward_task_per_tx_reply \
                 before handle_op_request. This is the bypass Phase 2b (#1454) \
                 added to prevent task-per-tx callers from hanging on replies \
                 that load_or_init would drop as OpNotPresent. Either restore \
                 the bypass invocation or update this regression guard if the \
                 branch has been legitimately refactored."
            );

            // The bypass MUST be gated on Response-only. Without this
            // filter, non-terminal messages like ForwardingAck fill the
            // capacity-1 reply channel and cause UnexpectedOpState
            // (commit 5cb6f37c).
            assert!(
                window.contains("matches!(op, subscribe::SubscribeMsg::Response { .. })"),
                "SUBSCRIBE branch bypass is not gated on Response-only. \
                 Non-terminal messages (ForwardingAck, Unsubscribe) must NOT \
                 be forwarded to the task-per-tx channel — they would fill \
                 the capacity-1 reply slot and block the real Response."
            );
        }

        #[tokio::test]
        async fn bypass_does_not_block_when_channel_already_full() {
            // Defensive regression: `try_send` on a full channel must
            // fail without blocking the pure-network-message handler.
            // Although Phase 1's dedup guarantees the callback fires at
            // most once per tx (so a "full" capacity-1 channel
            // shouldn't happen in practice), this test pins the
            // non-blocking contract so future refactors can't
            // accidentally switch to `.send().await` and reintroduce
            // the class of bug documented in
            // `.claude/rules/channel-safety.md`.
            let (tx, _rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            // Pre-fill the capacity-1 channel.
            tx.try_send(dummy_reply())
                .expect("capacity-1 channel should accept first message");

            let taken = try_forward_task_per_tx_reply(Some(&tx), dummy_reply(), "subscribe");
            assert!(
                taken,
                "callback present but channel full → bypass still taken"
            );
            // The test would hang on regression: blocking `send().await`
            // on a full channel whose receiver is still alive would
            // stall the `#[tokio::test]` runtime indefinitely.
        }

        // Note on per-variant coverage: Phase 1's point is that every op
        // variant of `handle_pure_network_message_v1` can terminate an
        // `OpCtx::send_and_await` round-trip. The helper tested above is
        // variant-agnostic once the `is_operation_completed` guard passes,
        // and each op's own `is_completed` impl is covered by unit tests in
        // `crates/core/src/operations/{connect,put,get,subscribe,update}.rs`.
        // The remaining "do the five branches of `handle_pure_network_message_v1`
        // actually invoke the helper with the matching reply variant?"
        // question is enforced by the compiler — each branch binds `ref op`
        // for the concrete op type and reconstructs the same variant before
        // handing it to `forward_pending_op_result_if_completed`. An
        // end-to-end integration test that spins up a node and exercises
        // `OpCtx::send_and_await` for each op kind belongs in Phase 2b,
        // where the first real production caller is added.

        // ───────────────────────────────────────────────────────────
        // Regression tests for the subscribe-branch message-type
        // filter added in the ForwardingAck fix (5cb6f37c).
        //
        // The bug: `try_forward_task_per_tx_reply` was called for ALL
        // subscribe message types (including ForwardingAck). A relay
        // peer's ForwardingAck would fill the capacity-1 reply
        // channel, causing the task to receive it instead of the
        // real Response and fail with UnexpectedOpState.
        //
        // These tests verify the filtering logic that
        // `handle_pure_network_message_v1` applies BEFORE calling the
        // bypass helper: only `SubscribeMsg::Response` is forwarded.
        // ───────────────────────────────────────────────────────────

        use crate::operations::VisitedPeers;
        use crate::operations::subscribe::{SubscribeMsg, SubscribeMsgResult};

        /// Helper: simulate the filtering logic from the SUBSCRIBE
        /// branch of `handle_pure_network_message_v1`. Returns
        /// `true` if the message would be forwarded to the
        /// task-per-tx channel (and the branch would return early).
        fn subscribe_branch_would_forward(
            op: &SubscribeMsg,
            callback: Option<&tokio::sync::mpsc::Sender<NetMessage>>,
        ) -> bool {
            matches!(op, SubscribeMsg::Response { .. })
                && try_forward_task_per_tx_reply(
                    callback,
                    NetMessage::V1(NetMessageV1::Subscribe(op.clone())),
                    "subscribe",
                )
        }

        #[tokio::test]
        async fn subscribe_response_is_forwarded_to_task() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let sub_tx = Transaction::new::<SubscribeMsg>();
            let instance_id = freenet_stdlib::prelude::ContractInstanceId::new([1u8; 32]);
            let key = freenet_stdlib::prelude::ContractKey::from_id_and_code(
                instance_id,
                freenet_stdlib::prelude::CodeHash::new([2u8; 32]),
            );
            let op = SubscribeMsg::Response {
                id: sub_tx,
                instance_id,
                result: SubscribeMsgResult::Subscribed { key },
            };

            let taken = subscribe_branch_would_forward(&op, Some(&tx));
            assert!(taken, "Response with callback → must be forwarded");

            let received = rx.try_recv().expect("Response should be in channel");
            assert_eq!(*received.id(), sub_tx);
        }

        #[tokio::test]
        async fn forwarding_ack_is_not_forwarded_to_task() {
            // ForwardingAck is non-terminal: relay peers send it to
            // signal "I'm working on it". Forwarding it would fill
            // the capacity-1 channel and block the real Response.
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let sub_tx = Transaction::new::<SubscribeMsg>();
            let instance_id = freenet_stdlib::prelude::ContractInstanceId::new([3u8; 32]);
            let op = SubscribeMsg::ForwardingAck {
                id: sub_tx,
                instance_id,
            };

            let taken = subscribe_branch_would_forward(&op, Some(&tx));
            assert!(
                !taken,
                "ForwardingAck must NOT be forwarded to task channel"
            );
            assert!(
                rx.try_recv().is_err(),
                "channel must remain empty after ForwardingAck"
            );
        }

        #[tokio::test]
        async fn unsubscribe_is_not_forwarded_to_task() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let sub_tx = Transaction::new::<SubscribeMsg>();
            let instance_id = freenet_stdlib::prelude::ContractInstanceId::new([4u8; 32]);
            let op = SubscribeMsg::Unsubscribe {
                id: sub_tx,
                instance_id,
            };

            let taken = subscribe_branch_would_forward(&op, Some(&tx));
            assert!(!taken, "Unsubscribe must NOT be forwarded to task channel");
            assert!(rx.try_recv().is_err(), "channel must remain empty");
        }

        #[tokio::test]
        async fn request_is_not_forwarded_to_task() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let sub_tx = Transaction::new::<SubscribeMsg>();
            let instance_id = freenet_stdlib::prelude::ContractInstanceId::new([5u8; 32]);
            let op = SubscribeMsg::Request {
                id: sub_tx,
                instance_id,
                htl: 5,
                visited: VisitedPeers::new(&sub_tx),
                is_renewal: false,
            };

            let taken = subscribe_branch_would_forward(&op, Some(&tx));
            assert!(!taken, "Request must NOT be forwarded to task channel");
            assert!(rx.try_recv().is_err(), "channel must remain empty");
        }

        #[tokio::test]
        async fn response_without_callback_falls_through() {
            // No callback registered (legacy path) — filter must
            // return false so handle_op_request runs.
            let sub_tx = Transaction::new::<SubscribeMsg>();
            let instance_id = freenet_stdlib::prelude::ContractInstanceId::new([6u8; 32]);
            let op = SubscribeMsg::Response {
                id: sub_tx,
                instance_id,
                result: SubscribeMsgResult::NotFound,
            };

            let taken = subscribe_branch_would_forward(&op, None);
            assert!(
                !taken,
                "Response without callback → must fall through to legacy path"
            );
        }

        // ───────────────────────────────────────────────────────────
        // Regression guard: PUT branch of handle_pure_network_message_v1
        // must call try_forward_task_per_tx_reply before handle_op_request,
        // gated on Response|ResponseStreaming only. Mirror of the SUBSCRIBE
        // guard above. Added in Phase 3a (#1454).
        // ───────────────────────────────────────────────────────────

        #[test]
        fn bypass_is_wired_into_put_branch_regression_guard() {
            const SOURCE: &str = include_str!("node.rs");

            let put_branch_anchor = "NetMessageV1::Put(ref op) => {";
            let branch_start = SOURCE.find(put_branch_anchor).expect(
                "PUT branch of handle_pure_network_message_v1 not found; \
                 the match arm has been renamed or moved — update this regression guard",
            );

            let window_end = SOURCE[branch_start..]
                .find("handle_op_request::<put::PutOp, _>")
                .expect("PUT branch no longer calls handle_op_request — update guard")
                + branch_start;
            let window = &SOURCE[branch_start..window_end];

            assert!(
                window.contains("try_forward_task_per_tx_reply("),
                "PUT branch no longer calls try_forward_task_per_tx_reply \
                 before handle_op_request. This is the bypass Phase 3a (#1454) \
                 added to prevent task-per-tx callers from hanging on replies \
                 that load_or_init would drop as OpNotPresent. Either restore \
                 the bypass invocation or update this regression guard if the \
                 branch has been legitimately refactored."
            );

            assert!(
                window.contains("put::PutMsg::Response { .. }"),
                "PUT branch bypass is not gated on Response. \
                 Non-terminal messages must NOT be forwarded to the task-per-tx channel."
            );

            assert!(
                window.contains("put::PutMsg::ResponseStreaming { .. }"),
                "PUT branch bypass is not gated on ResponseStreaming. \
                 Both terminal variants must be forwarded."
            );
        }

        // ───────────────────────────────────────────────────────────
        // Per-variant filter tests for the PUT branch bypass.
        // Mirror of the subscribe filter tests above. Verifies that
        // only Response and ResponseStreaming are forwarded to the
        // task-per-tx channel; all other variants must fall through
        // to handle_op_request.
        // ───────────────────────────────────────────────────────────

        use crate::operations::put::PutMsg;
        use freenet_stdlib::prelude::*;

        fn dummy_put_key(a: u8, b: u8) -> ContractKey {
            ContractKey::from_id_and_code(ContractInstanceId::new([a; 32]), CodeHash::new([b; 32]))
        }

        fn put_branch_would_forward(
            op: &PutMsg,
            callback: Option<&tokio::sync::mpsc::Sender<NetMessage>>,
        ) -> bool {
            matches!(
                op,
                PutMsg::Response { .. } | PutMsg::ResponseStreaming { .. }
            ) && try_forward_task_per_tx_reply(
                callback,
                NetMessage::V1(NetMessageV1::Put(op.clone())),
                "put",
            )
        }

        #[tokio::test]
        async fn put_response_is_forwarded_to_task() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let put_tx = Transaction::new::<PutMsg>();
            let key = dummy_put_key(10, 11);
            let op = PutMsg::Response { id: put_tx, key };

            let taken = put_branch_would_forward(&op, Some(&tx));
            assert!(taken, "Response with callback → must be forwarded");

            let received = rx.try_recv().expect("Response should be in channel");
            assert_eq!(*received.id(), put_tx);
        }

        #[tokio::test]
        async fn put_response_streaming_is_forwarded_to_task() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let put_tx = Transaction::new::<PutMsg>();
            let key = dummy_put_key(12, 13);
            let op = PutMsg::ResponseStreaming {
                id: put_tx,
                key,
                continue_forwarding: false,
            };

            let taken = put_branch_would_forward(&op, Some(&tx));
            assert!(taken, "ResponseStreaming with callback → must be forwarded");

            let received = rx
                .try_recv()
                .expect("ResponseStreaming should be in channel");
            assert_eq!(*received.id(), put_tx);
        }

        #[tokio::test]
        async fn put_forwarding_ack_is_not_forwarded_to_task() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let put_tx = Transaction::new::<PutMsg>();
            let key = dummy_put_key(14, 15);
            let op = PutMsg::ForwardingAck {
                id: put_tx,
                contract_key: key,
            };

            let taken = put_branch_would_forward(&op, Some(&tx));
            assert!(
                !taken,
                "ForwardingAck must NOT be forwarded to task channel"
            );
            assert!(
                rx.try_recv().is_err(),
                "channel must remain empty after ForwardingAck"
            );
        }

        #[tokio::test]
        async fn put_request_is_not_forwarded_to_task() {
            let (tx, mut rx) = tokio::sync::mpsc::channel::<NetMessage>(1);
            let put_tx = Transaction::new::<PutMsg>();
            let op = PutMsg::Request {
                id: put_tx,
                contract: ContractContainer::Wasm(ContractWasmAPIVersion::V1(
                    WrappedContract::new(
                        std::sync::Arc::new(ContractCode::from(vec![0u8])),
                        Parameters::from(vec![]),
                    ),
                )),
                related_contracts: RelatedContracts::default(),
                value: WrappedState::new(vec![1u8]),
                htl: 5,
                skip_list: std::collections::HashSet::new(),
            };

            let taken = put_branch_would_forward(&op, Some(&tx));
            assert!(!taken, "Request must NOT be forwarded to task channel");
            assert!(rx.try_recv().is_err(), "channel must remain empty");
        }

        #[tokio::test]
        async fn put_response_without_callback_falls_through() {
            let put_tx = Transaction::new::<PutMsg>();
            let key = dummy_put_key(16, 17);
            let op = PutMsg::Response { id: put_tx, key };

            let taken = put_branch_would_forward(&op, None);
            assert!(
                !taken,
                "Response without callback → must fall through to legacy path"
            );
        }

        // ───────────────────────────────────────────────────────────
        // Regression guards for the GET branch of
        // handle_pure_network_message_v1 added in #3883 (phase 5).
        //
        // The GET branch has two distinct dispatch layers:
        //
        //   1. Phase-3b bypass: terminal Response/ResponseStreaming for an
        //      active client driver → try_forward_task_per_tx_reply (unchanged).
        //
        //   2. Relay dispatch (new in commit 2): GetMsg::Request with
        //      source_addr.is_some() and no existing GetOp in OpManager →
        //      start_relay_get (task-per-tx relay driver).
        //
        // Layer 1 is the same as PUT/SUBSCRIBE; the guards below cover
        // layer 2 (source-literal, not runtime dispatch).
        // ───────────────────────────────────────────────────────────

        // Source-literal guard: verify the GET branch invokes
        // try_forward_task_per_tx_reply before start_relay_get dispatch,
        // gated on Response|ResponseStreaming only (phase-3b bypass).
        #[test]
        fn bypass_is_wired_into_get_branch_regression_guard() {
            const SOURCE: &str = include_str!("node.rs");

            let get_branch_anchor = "NetMessageV1::Get(ref op) => {";
            let branch_start = SOURCE.find(get_branch_anchor).expect(
                "GET branch of handle_pure_network_message_v1 not found; \
                 the match arm has been renamed or moved — update this regression guard",
            );

            let window_end = SOURCE[branch_start..]
                .find("handle_op_request::<get::GetOp, _>")
                .expect("GET branch no longer calls handle_op_request — update guard")
                + branch_start;
            let window = &SOURCE[branch_start..window_end];

            // Phase-3b bypass must still be present (unchanged).
            assert!(
                window.contains("try_forward_task_per_tx_reply("),
                "GET branch no longer calls try_forward_task_per_tx_reply \
                 before handle_op_request. Phase-3b (#1454) bypass removed — restore it."
            );

            // Bypass must be gated on terminal variants only.
            assert!(
                window.contains("get::GetMsg::Response { .. }"),
                "GET branch bypass is not gated on Response. \
                 Non-terminal messages must NOT be forwarded to the task-per-tx channel."
            );

            assert!(
                window.contains("get::GetMsg::ResponseStreaming { .. }"),
                "GET branch bypass is not gated on ResponseStreaming. \
                 Both terminal variants must be forwarded."
            );

            // Relay dispatch must call start_relay_get.
            assert!(
                window.contains("start_relay_get("),
                "GET branch no longer calls start_relay_get for relay dispatch. \
                 #3883 phase-5 relay dispatch was removed — restore it."
            );

            // Relay dispatch must be gated on source_addr (relay vs. loop-back).
            assert!(
                window.contains("source_addr"),
                "GET branch relay dispatch is not gated on source_addr. \
                 Originator loop-back (source_addr.is_none()) must fall through to legacy."
            );

            // Relay dispatch must guard on no existing GetOp (has_get_op).
            assert!(
                window.contains("has_get_op"),
                "GET branch relay dispatch is not guarded by has_get_op. \
                 GC-spawned retries must fall through to legacy handle_op_request."
            );
        }

        // Source-literal guard: verify the GET branch wires
        // try_forward_task_per_tx_reply before start_relay_get.
        // (Ordering: phase-3b bypass comes first in source order.)
        #[test]
        fn get_branch_phase3b_bypass_precedes_relay_dispatch() {
            const SOURCE: &str = include_str!("node.rs");

            let get_branch_anchor = "NetMessageV1::Get(ref op) => {";
            let branch_start = SOURCE
                .find(get_branch_anchor)
                .expect("GET branch not found");

            let window_end = SOURCE[branch_start..]
                .find("handle_op_request::<get::GetOp, _>")
                .expect("GET branch no longer calls handle_op_request")
                + branch_start;
            let window = &SOURCE[branch_start..window_end];

            let bypass_pos = window
                .find("try_forward_task_per_tx_reply(")
                .expect("try_forward_task_per_tx_reply not found in GET branch");
            let relay_pos = window
                .find("start_relay_get(")
                .expect("start_relay_get not found in GET branch");

            assert!(
                bypass_pos < relay_pos,
                "Phase-3b bypass (try_forward_task_per_tx_reply) must appear \
                 BEFORE relay dispatch (start_relay_get) in the GET branch. \
                 Swapping order would break the client-driver terminal-reply fast path."
            );
        }

        // ───────────────────────────────────────────────────────────
        // Regression guards for the UPDATE branch of
        // handle_pure_network_message_v1 added in #1454 phase 5
        // follow-up (slice A: relay UPDATE task-per-tx).
        //
        // UPDATE is fire-and-forget end-to-end — there is no
        // upstream reply to await, so there is no phase-3b bypass
        // (no reply channel to forward into). Only relay dispatch
        // is wired here.
        // ───────────────────────────────────────────────────────────

        #[test]
        fn update_branch_dispatch_calls_relay_drivers() {
            const SOURCE: &str = include_str!("node.rs");

            let anchor = "NetMessageV1::Update(ref op) => {";
            let branch_start = SOURCE.find(anchor).expect(
                "UPDATE branch of handle_pure_network_message_v1 not found; \
                 the match arm has been renamed or moved — update this regression guard",
            );

            let window_end = SOURCE[branch_start..]
                .find("handle_op_request::<update::UpdateOp, _>")
                .expect("UPDATE branch no longer calls handle_op_request — update guard")
                + branch_start;
            let window = &SOURCE[branch_start..window_end];

            assert!(
                window.contains("start_relay_request_update("),
                "UPDATE branch no longer calls start_relay_request_update for relay \
                 dispatch. #1454 phase-5 relay UPDATE dispatch was removed — restore it."
            );
            assert!(
                window.contains("start_relay_broadcast_to("),
                "UPDATE branch no longer calls start_relay_broadcast_to for relay \
                 dispatch. #1454 phase-5 relay UPDATE dispatch was removed — restore it."
            );
        }

        #[test]
        fn update_branch_dispatch_gates_on_source_and_existing_op() {
            const SOURCE: &str = include_str!("node.rs");

            let anchor = "NetMessageV1::Update(ref op) => {";
            let branch_start = SOURCE.find(anchor).expect("UPDATE branch not found");
            let window_end = SOURCE[branch_start..]
                .find("handle_op_request::<update::UpdateOp, _>")
                .expect("UPDATE branch no longer calls handle_op_request")
                + branch_start;
            let window = &SOURCE[branch_start..window_end];

            assert!(
                window.contains("source_addr"),
                "UPDATE relay dispatch must be gated on source_addr — \
                 internal callers (source_addr.is_none()) must fall through to legacy."
            );
            assert!(
                window.contains("has_update_op"),
                "UPDATE relay dispatch must be guarded by has_update_op — \
                 GC-spawned retries / pre-registered ops must fall through to legacy."
            );
        }

        #[test]
        fn update_branch_does_not_dispatch_streaming_or_broadcasting() {
            const SOURCE: &str = include_str!("node.rs");

            let anchor = "NetMessageV1::Update(ref op) => {";
            let branch_start = SOURCE.find(anchor).expect("UPDATE branch not found");
            let window_end = SOURCE[branch_start..]
                .find("handle_op_request::<update::UpdateOp, _>")
                .expect("UPDATE branch no longer calls handle_op_request")
                + branch_start;
            let window = &SOURCE[branch_start..window_end];

            // Streaming + Broadcasting variants stay on legacy path in slice A.
            // Drivers must not be invoked for them.
            assert!(
                !window.contains("RequestUpdateStreaming {")
                    || window.contains("// Streaming variants"),
                "UPDATE branch appears to dispatch RequestUpdateStreaming through \
                 the relay driver. Slice A keeps streaming on the legacy path — \
                 see docs/port-plans/relay-update-task-per-tx.md."
            );
            assert!(
                !window.contains("BroadcastToStreaming {")
                    || window.contains("// Streaming variants"),
                "UPDATE branch appears to dispatch BroadcastToStreaming through \
                 the relay driver. Slice A keeps streaming on the legacy path."
            );
            assert!(
                !window.contains("UpdateMsg::Broadcasting {"),
                "UPDATE branch dispatches deprecated Broadcasting variant through \
                 the relay driver. Broadcasting must stay on the legacy path."
            );
        }
    }
}