supermachine 0.2.0

Run any OCI/Docker image as a hardware-isolated microVM on macOS HVF (Linux KVM and Windows WHP in progress). Single library API, zero flags for the common case, sub-100 ms cold-restore from snapshot.
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
// Status: minimal — connection table + REQUEST→RESPONSE/RST routing
// + TSI control DGRAM (port 1024..1031) handling.
//
// What we DON'T have yet (later commits):
//   - tsi_stream::TsiStreamProxy (TCP backend) — TSI_LISTEN
//     creates a stub Proxy that always replies success but doesn't
//     actually bind a host socket
//   - epoll-driven proxy event handling — without TCP backend, no
//     events to drive
//   - reaper thread — proxies just live until VM shutdown
//   - DGRAM (UDP) proxies via tsi_dgram

#![allow(dead_code)]

use std::collections::HashMap;
use std::net::{SocketAddr, TcpStream, UdpSocket};
use std::os::unix::net::UnixStream;
use std::sync::{mpsc, Arc, Mutex};
use std::time::Instant;

use super::mux_profile::{self, Stage};
use super::muxer_rxq::MuxerRxQ;
use super::muxer_thread::{self, MuxerCmd, MuxerStream};
use super::packet::{
    Header, RxPacket, VSOCK_HOST_CID, VSOCK_OP_CREDIT_REQUEST, VSOCK_OP_CREDIT_UPDATE,
    VSOCK_OP_REQUEST, VSOCK_OP_RESPONSE, VSOCK_OP_RST, VSOCK_OP_RW, VSOCK_OP_SHUTDOWN,
    VSOCK_TYPE_DGRAM, VSOCK_TYPE_STREAM,
};
use super::proxy::{proxy_key, Proxy};
use super::tsi_stream::TsiListener;

/// AF_TSI control op opcodes, encoded as the dst_port of a DGRAM
/// packet sent by the guest kernel's TSI patches. See the patch
/// series under `kernel-build/patches/af-tsi/`.
pub const TSI_PROXY_PORT: u32 = 620;
pub const VSOCK_ENV_PORT: u32 = 1026;
pub const TSI_PROXY_CREATE: u32 = 1024;
pub const TSI_CONNECT: u32 = 1025;
pub const TSI_GETNAME: u32 = 1026;
pub const TSI_SENDTO_ADDR: u32 = 1027;
pub const TSI_SENDTO_DATA: u32 = 1028;
pub const TSI_LISTEN: u32 = 1029;
pub const TSI_ACCEPT: u32 = 1030;
pub const TSI_PROXY_RELEASE: u32 = 1031;

/// Snapshot record for one TSI listener: the (cid, peer_port, vm_port,
/// family, socktype) tuple needed to re-bind a host TcpListener on
/// restore. The host listener itself can't be serialized (OS socket
/// fd) — it gets rebuilt on restore via `restore_tsi_listeners`.
#[derive(Clone, Debug)]
pub struct TsiListenerSnapshot {
    pub cid: u64,
    pub peer_port: u32,
    pub vm_port: u32,
    pub family: u16,
    pub socktype: u16,
}

/// State for one TSI listener proxy. Created on TSI_PROXY_CREATE,
/// activated on TSI_LISTEN. We don't yet bind a host socket (no
/// tsi_stream port); we just remember the (local_port, peer_port,
/// vm_port) so subsequent PROXY_CREATE/LISTEN/ACCEPT calls can be
/// answered.
struct TsiState {
    family: u16,
    socktype: u16,
    /// vm_port from TSI_LISTEN — guest's listening port.
    vm_port: Option<u32>,
    /// Host TCP listener bound on TSI_LISTEN. Drops on
    /// PROXY_RELEASE (or VM shutdown).
    listener: Option<TsiListener>,
}

/// In-flight inbound TCP conn that's been accepted on the host
/// side but the guest hasn't sent RESPONSE yet.
pub struct PendingInbound {
    pub cid: u64,
    pub host_src_port: u32,
    pub vm_port: u32,
    pub stream: Option<MuxerStream>,
}

/// In-flight outbound: guest sent TSI_CONNECT(peer_port, target);
/// we opened TCP to target and now park it until the guest's
/// follow-up VSOCK_OP_REQUEST(src=peer_port, dst=TSI_PROXY_PORT)
/// arrives. Keyed by (cid, peer_port).
pub struct PendingOutbound {
    pub cid: u64,
    pub peer_port: u32,
    pub tcp: Option<TcpStream>,
}

/// Per-conn flow-control accounting. Vsock peer sees us as
/// having `OUR_BUF_ALLOC` of receive buffer; every byte they
/// send to us must be ACKed via fwd_cnt or they stop sending
/// once they've used (peer_buf_alloc - 0) = OUR_BUF_ALLOC bytes.
/// We bump our_fwd_cnt as we consume (which is immediate, since
/// we forward straight to TCP), and emit CREDIT_UPDATE when
/// half of the buffer has been consumed since last update.
pub struct InboundState {
    pub our_fwd_cnt: u32,
    pub last_credit_fwd_cnt: u32,
    pub guest_dst_port: u32,
}

/// Buffer size we advertise to the guest. We forward bytes to
/// host TCP synchronously, so we have effectively-unlimited
/// receive capacity from the guest's perspective. Set to
/// (u32::MAX / 2) so the guest's flow-control math never sees
/// us as "full"; we still emit periodic CREDIT_UPDATE for
/// correctness but guest never blocks waiting for it.
pub const OUR_BUF_ALLOC: u32 = u32::MAX / 2;
const CREDIT_UPDATE_THRESHOLD: u32 = OUR_BUF_ALLOC / 2;

fn body_le_u16(body: &[u8], offset: usize) -> Option<u16> {
    let bytes = body.get(offset..offset + 2)?.try_into().ok()?;
    Some(u16::from_le_bytes(bytes))
}

fn body_be_u16(body: &[u8], offset: usize) -> Option<u16> {
    let bytes = body.get(offset..offset + 2)?.try_into().ok()?;
    Some(u16::from_be_bytes(bytes))
}

fn body_le_u32(body: &[u8], offset: usize) -> Option<u32> {
    let bytes = body.get(offset..offset + 4)?.try_into().ok()?;
    Some(u32::from_le_bytes(bytes))
}

/// Guest connects to (HOST_CID, VSOCK_ENV_PORT) via plain AF_VSOCK to
/// fetch its env JSON. Set at startup by `--env` / `--env-file`. The
/// blob is delivered as a single VSOCK_OP_RW followed by SHUTDOWN.
/// Format: `{"env":{"K":"V",...},"secrets":{"K":"V",...}}`.
pub static VSOCK_ENV_JSON: Mutex<Option<String>> = Mutex::new(None);

pub fn set_env_json(json: String) {
    *VSOCK_ENV_JSON.lock().unwrap() = Some(json);
}

/// Cached `$SUPERMACHINE_VSOCK_TRACE` flag. Calling
/// `std::env::var_os(...)` per packet path is a real perf hit:
/// `getenv` takes a libc global lock + linear scan of `environ`.
/// At nginx c=64 it dominated muxer-io non-syscall CPU. Read
/// once on first use, cache. Toggling at runtime no longer
/// supported — set the env before the worker starts.
#[inline]
pub fn vsock_trace_enabled() -> bool {
    use std::sync::atomic::{AtomicU8, Ordering};
    static CACHED: AtomicU8 = AtomicU8::new(0);
    let v = CACHED.load(Ordering::Relaxed);
    if v != 0 {
        return v == 2;
    }
    // The actual env lookup. Inside the cached helper we call the
    // libc path directly; the sed-rewrite that replaced sites in
    // this file would otherwise recurse here.
    let on = std::env::var_os("SUPERMACHINE_VSOCK_TRACE").is_some();
    CACHED.store(if on { 2 } else { 1 }, Ordering::Relaxed);
    on
}

pub struct VsockMuxer {
    cid: u64,
    /// (local_port, peer_port) → Proxy.
    proxies: Mutex<HashMap<u64, Box<dyn Proxy>>>,
    /// (cid, host_src_port) → PendingInbound waiting for guest's
    /// RESPONSE to a host-initiated REQUEST.
    pending_inbound: Arc<Mutex<HashMap<(u64, u32), PendingInbound>>>,
    /// (cid, peer_port) → PendingOutbound. Set by TSI_CONNECT,
    /// consumed by VSOCK_OP_REQUEST(dst=TSI_PROXY_PORT) from guest.
    pending_outbound: Mutex<HashMap<(u64, u32), PendingOutbound>>,
    /// (cid, peer_port) → staged dst SocketAddr from TSI_SENDTO_ADDR,
    /// consumed by the next TSI_SENDTO_DATA.
    udp_dst: Mutex<HashMap<(u64, u32), SocketAddr>>,
    /// (cid, peer_port) → persistent UdpSocket. Send fd shared with
    /// the muxer-io thread (which holds a try_clone()'d recv fd).
    udp_sockets: Mutex<HashMap<(u64, u32), Arc<UdpSocket>>>,
    /// (cid, host_src_port) → InboundState. The TCP stream lives
    /// in the muxer-io thread; we keep flow-control accounting
    /// here so handle_rw can emit CREDIT_UPDATE packets.
    inbound_conns: Mutex<HashMap<(u64, u32), InboundState>>,
    /// Sender to the single muxer I/O thread. Owns ALL inbound
    /// TcpStreams, replaces per-conn reader+writer threads.
    io_tx: mpsc::Sender<MuxerCmd>,
    /// Waker to nudge the I/O thread after sending a command.
    io_waker: Arc<mio::Waker>,
    /// (peer_cid, peer_port) → TsiState. Indexed by the guest CID
    /// (which is always our `cid` for in-guest TSI) and the
    /// guest's chosen `peer_port` from the TSI control DGRAMs.
    tsi: Mutex<HashMap<(u64, u32), TsiState>>,
    /// Outbound RX queue (intents waiting for guest descriptors).
    /// The vsock device drains this on every notify() and pushes
    /// the packets into RX virtq. Anyone (TsiListener accept
    /// thread, future TsiConn pump threads) can push to it.
    pub rxq: Arc<Mutex<MuxerRxQ>>,
    /// Callback the muxer invokes after pushing to rxq, to wake
    /// the vsock device so it drains. Set by the device after
    /// construction (it has the IRQ-raise closure).
    kick_device: Mutex<Option<Arc<dyn Fn() + Send + Sync>>>,
}

impl VsockMuxer {
    pub fn new(cid: u64) -> Result<Self, muxer_thread::StartError> {
        let (io_tx, io_waker) = muxer_thread::spawn()?;
        Ok(Self {
            cid,
            proxies: Mutex::new(HashMap::new()),
            pending_inbound: Arc::new(Mutex::new(HashMap::new())),
            pending_outbound: Mutex::new(HashMap::new()),
            udp_dst: Mutex::new(HashMap::new()),
            udp_sockets: Mutex::new(HashMap::new()),
            inbound_conns: Mutex::new(HashMap::new()),
            tsi: Mutex::new(HashMap::new()),
            rxq: Arc::new(Mutex::new(MuxerRxQ::new())),
            kick_device: Mutex::new(None),
            io_tx,
            io_waker,
        })
    }

    pub fn set_kick(&self, kick: Arc<dyn Fn() + Send + Sync>) {
        *self.kick_device.lock().unwrap() = Some(kick);
    }

    /// True when no per-request transport state is still in flight.
    ///
    /// Persistent TSI listeners and cached UDP sockets are intentionally
    /// allowed to remain alive here: the pool can snapshot an idle listening
    /// service, but must not interrupt active TCP streams, queued packets, or
    /// pending outbound connects.
    pub fn is_transport_idle(&self) -> bool {
        self.pending_inbound.lock().unwrap().is_empty()
            && self.pending_outbound.lock().unwrap().is_empty()
            && self.inbound_conns.lock().unwrap().is_empty()
            && self.rxq.lock().unwrap().is_empty()
    }

    /// Drain all per-dispatch state so the muxer is fresh for the
    /// next RESTORE. Pool-worker mode calls this between dispatches.
    /// Concrete steps:
    /// - Drop every TsiState (their TsiListener::Drop closes the
    ///   listener fd + joins the accept thread → no leaked threads).
    /// - Clear all in-flight bookkeeping (proxies, pending_*, udp_*,
    ///   inbound_conns, rxq).
    /// - Send MuxerCmd::Reset to the io thread to drop its tcp/udp
    ///   registrations.
    /// - Keep the io thread + waker + io_tx alive (re-used).
    pub fn reset(&self) {
        // Order matters: drop TSI listeners FIRST so accept threads
        // can't push fresh connections while we're clearing maps.
        self.tsi.lock().unwrap().clear();
        // Reset io thread state and wait for the boundary before
        // draining queues below. Without the ack, a rapid restore can
        // register a fresh stream before the previous async Reset is
        // processed, then have that stale Reset drop the new stream.
        // Also, the io thread can enqueue EOF/SHUTDOWN callbacks while
        // dropping old streams, so rxq must be drained after this ack.
        let (done_tx, done_rx) = mpsc::channel();
        if self.io_tx.send(MuxerCmd::Reset { done: done_tx }).is_ok() {
            let _ = self.io_waker.wake();
            let _ = done_rx.recv_timeout(std::time::Duration::from_millis(50));
        }
        self.proxies.lock().unwrap().clear();
        self.pending_inbound.lock().unwrap().clear();
        self.pending_outbound.lock().unwrap().clear();
        self.udp_dst.lock().unwrap().clear();
        self.udp_sockets.lock().unwrap().clear();
        self.inbound_conns.lock().unwrap().clear();
        // Drain any stale packets, including callbacks the io thread
        // emitted while closing old streams during Reset.
        self.rxq.lock().unwrap().drain();
    }

    /// Look up the host-side TCP port currently bound for the
    /// listener serving guest `vm_port`. Returns None if no listener
    /// is registered yet (e.g. guest hasn't done TSI_LISTEN).
    ///
    /// Caveat: the guest's TSI driver assigns RANDOM vm_ports per
    /// bind — they don't match the AF_INET port the guest userspace
    /// asked for (e.g. http-echo's bind(8000) becomes vm_port=
    /// 0x72b58b71 or similar). So this is rarely what callers want;
    /// `first_host_port` is usually what you reach for.
    pub fn host_port_for_vm_port(&self, vm_port: u32) -> Option<u16> {
        let s = self.tsi.lock().unwrap();
        s.values().find_map(|st| {
            if st.vm_port == Some(vm_port) {
                st.listener.as_ref().map(|l| l.host_addr.port())
            } else {
                None
            }
        })
    }

    /// Return the host TCP port of the first TSI listener registered.
    /// Used by the TLS terminator: typical guest workloads have one
    /// HTTP server, so "the first listener" is unambiguous. Returns
    /// None if no listener has been bound yet.
    pub fn first_host_port(&self) -> Option<u16> {
        let s = self.tsi.lock().unwrap();
        // Prefer the AF_INET TSI listener. Many production images
        // bind both 0.0.0.0 and ::; AF_TSI6 still has a separate,
        // less-tested guest-kernel data path, while AF_TSI is the
        // path we know carries bytes correctly.
        s.values()
            .find_map(|st| {
                if st.family == libc::AF_INET as u16 {
                    st.listener.as_ref().map(|l| l.host_addr.port())
                } else {
                    None
                }
            })
            .or_else(|| {
                s.values()
                    .find_map(|st| st.listener.as_ref().map(|l| l.host_addr.port()))
            })
    }

    fn first_listener_endpoint(&self, vm_port: Option<u32>) -> Option<(u64, u32)> {
        let s = self.tsi.lock().unwrap();
        let endpoint = |cid: u64, st: &TsiState| {
            let listener_vm_port = st.vm_port?;
            if vm_port.is_some_and(|want| want != listener_vm_port) {
                return None;
            }
            Some((cid, listener_vm_port))
        };
        s.iter()
            .find_map(|((cid, _), st)| {
                if st.family == libc::AF_INET as u16 {
                    endpoint(*cid, st)
                } else {
                    None
                }
            })
            .or_else(|| s.iter().find_map(|((cid, _), st)| endpoint(*cid, st)))
    }

    fn open_stream_to_guest(
        &self,
        stream: MuxerStream,
        vm_port: Option<u32>,
    ) -> std::io::Result<()> {
        let Some((cid, vm_port)) = self.first_listener_endpoint(vm_port) else {
            return Err(std::io::Error::new(
                std::io::ErrorKind::NotConnected,
                "no TSI listener",
            ));
        };
        let host_src_port = crate::devices::virtio::vsock::tsi_stream::alloc_host_src_port();
        self.pending_inbound.lock().unwrap().insert(
            (cid, host_src_port),
            PendingInbound {
                cid,
                host_src_port,
                vm_port,
                stream: Some(stream),
            },
        );
        let req = RxPacket {
            hdr: Header {
                src_cid: VSOCK_HOST_CID,
                dst_cid: cid,
                src_port: host_src_port,
                dst_port: vm_port,
                len: 0,
                type_: VSOCK_TYPE_STREAM,
                op: VSOCK_OP_REQUEST,
                flags: 0,
                buf_alloc: OUR_BUF_ALLOC,
                fwd_cnt: 0,
            },
            data: Vec::new(),
        };
        let kick = self.kick_device.lock().unwrap().clone();
        push_rxq_and_kick(&self.rxq, &kick, req);
        Ok(())
    }

    pub fn open_unix_to_guest(
        &self,
        unix: UnixStream,
        vm_port: Option<u32>,
    ) -> std::io::Result<()> {
        self.open_stream_to_guest(MuxerStream::Unix(unix), vm_port)
    }

    /// Host→guest *native* AF_VSOCK connect. Used by the
    /// `<vsock_mux>-exec.sock` frontend to reach the in-guest exec
    /// agent without going through TSI's TCP-emulation path.
    ///
    /// Differences vs [`open_unix_to_guest`] / [`open_tcp_to_guest`]:
    ///
    /// - No TSI listener registry lookup. The destination is just
    ///   `(self.cid, guest_port)` — caller-specified guest port,
    ///   muxer-owned guest CID.
    /// - The guest kernel routes the resulting `OP_REQUEST` to
    ///   whichever vsock socket is bound to `guest_port` (the
    ///   `socket(AF_VSOCK, ...) + bind(...) + listen()` path on the
    ///   guest), independent of TSI.
    /// - `handle_response` matches by `(cid, host_src_port)` from
    ///   `pending_inbound` so the byte bridge is identical to TSI.
    ///
    /// Return value: Ok if the request was queued. The actual
    /// connect outcome is observed on `OP_RESPONSE` (success) or
    /// `OP_RST` (no listener / kernel refused) over the muxer's
    /// rxq. Callers that need a "did it connect" handshake on the
    /// stream should expect to see EOF immediately if the guest
    /// rejected the request.
    pub fn open_native_to_guest(
        &self,
        stream: MuxerStream,
        guest_port: u32,
    ) -> std::io::Result<()> {
        let cid = self.cid;
        let host_src_port = crate::devices::virtio::vsock::tsi_stream::alloc_host_src_port();
        self.pending_inbound.lock().unwrap().insert(
            (cid, host_src_port),
            PendingInbound {
                cid,
                host_src_port,
                vm_port: guest_port,
                stream: Some(stream),
            },
        );
        let req = RxPacket {
            hdr: Header {
                src_cid: VSOCK_HOST_CID,
                dst_cid: cid,
                src_port: host_src_port,
                dst_port: guest_port,
                len: 0,
                type_: VSOCK_TYPE_STREAM,
                op: VSOCK_OP_REQUEST,
                flags: 0,
                buf_alloc: OUR_BUF_ALLOC,
                fwd_cnt: 0,
            },
            data: Vec::new(),
        };
        let kick = self.kick_device.lock().unwrap().clone();
        push_rxq_and_kick(&self.rxq, &kick, req);
        Ok(())
    }

    pub fn open_tcp_to_guest(&self, tcp: TcpStream, vm_port: Option<u32>) -> std::io::Result<()> {
        self.open_stream_to_guest(MuxerStream::Tcp(tcp), vm_port)
    }

    /// Same as `open_tcp_to_guest` but injects `prefix` bytes into the
    /// guest as if they had been read from the TCP socket first. Used
    /// for SCM_RIGHTS handoff, where the router has already consumed
    /// some bytes from the client's TCP buffer to make a routing
    /// decision and now needs the guest to see them.
    pub fn open_tcp_to_guest_with_prefix(
        &self,
        tcp: TcpStream,
        prefix: Vec<u8>,
        vm_port: Option<u32>,
    ) -> std::io::Result<()> {
        self.open_stream_to_guest(MuxerStream::TcpWithPrefix(tcp, prefix), vm_port)
    }

    /// Snapshot just the TSI listeners (the only host-side state worth
    /// surviving restore). In-flight TCP/UDP conns and pending hand-
    /// shakes are intentionally dropped — peer TCPs see RST, retried
    /// requests come back.
    pub fn capture_tsi_listeners(&self) -> Vec<TsiListenerSnapshot> {
        let s = self.tsi.lock().unwrap();
        s.iter()
            .filter_map(|((cid, peer_port), st)| {
                // Only listeners — entries past TSI_LISTEN have vm_port set.
                st.vm_port.map(|vm_port| TsiListenerSnapshot {
                    cid: *cid,
                    peer_port: *peer_port,
                    vm_port,
                    family: st.family,
                    socktype: st.socktype,
                })
            })
            .collect()
    }

    pub fn listener_count(&self) -> usize {
        let s = self.tsi.lock().unwrap();
        s.values().filter(|st| st.vm_port.is_some()).count()
    }

    /// Re-bind every captured TSI listener: spawn a fresh accept
    /// thread on a new ephemeral host port and re-insert the
    /// TsiState. The guest only knows its `vm_port`; the host port
    /// changes across restore (transparent — guest never sees it).
    pub fn restore_tsi_listeners(self: &Arc<Self>, snaps: &[TsiListenerSnapshot]) {
        for s in snaps {
            // Build the on_accept closure exactly as TSI_LISTEN does.
            let pending = self.pending_inbound.clone();
            let rxq = self.rxq.clone();
            let kick = self.kick_device.lock().unwrap().clone();
            let on_accept: Arc<dyn Fn(u64, u32, u32, std::net::TcpStream) + Send + Sync> =
                Arc::new(move |cid, host_src_port, vm_port, tcp| {
                    pending.lock().unwrap().insert(
                        (cid, host_src_port),
                        PendingInbound {
                            cid,
                            host_src_port,
                            vm_port,
                            stream: Some(MuxerStream::Tcp(tcp)),
                        },
                    );
                    let req = RxPacket {
                        hdr: Header {
                            src_cid: VSOCK_HOST_CID,
                            dst_cid: cid,
                            src_port: host_src_port,
                            dst_port: vm_port,
                            len: 0,
                            type_: VSOCK_TYPE_STREAM,
                            op: VSOCK_OP_REQUEST,
                            flags: 0,
                            buf_alloc: OUR_BUF_ALLOC,
                            fwd_cnt: 0,
                        },
                        data: Vec::new(),
                    };
                    push_rxq_and_kick(&rxq, &kick, req);
                });
            match TsiListener::bind(s.cid, s.vm_port, on_accept) {
                Ok(listener) => {
                    if vsock_trace_enabled() {
                        eprintln!(
                            "[muxer] restored TSI listener cid={} vm_port={} -> host {}",
                            s.cid, s.vm_port, listener.host_addr
                        );
                    }
                    self.tsi.lock().unwrap().insert(
                        (s.cid, s.peer_port),
                        TsiState {
                            family: s.family,
                            socktype: s.socktype,
                            vm_port: Some(s.vm_port),
                            listener: Some(listener),
                        },
                    );
                }
                Err(e) => {
                    eprintln!(
                        "[muxer] restore listener cid={} vm_port={} ERR: {e}",
                        s.cid, s.vm_port
                    );
                }
            }
        }
    }

    fn pending_inbound_arc(&self) -> Arc<Mutex<HashMap<(u64, u32), PendingInbound>>> {
        self.pending_inbound.clone()
    }

    /// Push a packet to the muxer RX queue + wake the device.
    pub fn submit(&self, pkt: RxPacket) {
        let kick = self.kick_device.lock().unwrap().clone();
        push_rxq_and_kick(&self.rxq, &kick, pkt);
    }

    /// Process one TX packet from the guest. Returns RxPackets to
    /// be queued back. This is the muxer's main entry point;
    /// vsock::device calls it for every TX-queue head.
    pub fn handle_tx(&self, hdr: &Header, payload: &[u8]) -> Vec<RxPacket> {
        // TSI control DGRAMs: dst_port in [1024, 1031], type=DGRAM.
        if hdr.type_ == VSOCK_TYPE_DGRAM
            && hdr.dst_port >= TSI_PROXY_CREATE
            && hdr.dst_port <= TSI_PROXY_RELEASE
        {
            return self.handle_tsi_control(hdr, payload);
        }
        if hdr.type_ == VSOCK_TYPE_DGRAM && hdr.op == VSOCK_OP_RW {
            // Connected UDP sockets send payloads as regular vsock
            // DGRAMs to their per-socket peer port after TSI_CONNECT.
            // sendto(2) with an explicit address still uses the
            // TSI_SENDTO_DATA control port handled below.
            self.send_udp_payload(hdr.src_cid, hdr.dst_port, hdr.src_port, payload);
            return Vec::new();
        }

        // Stream connection ops.
        match hdr.op {
            VSOCK_OP_REQUEST => self.handle_request(hdr),
            VSOCK_OP_RESPONSE => self.handle_response(hdr),
            VSOCK_OP_RW => self.handle_rw(hdr, payload),
            VSOCK_OP_SHUTDOWN | VSOCK_OP_RST => self.handle_close(hdr),
            VSOCK_OP_CREDIT_UPDATE | VSOCK_OP_CREDIT_REQUEST => self.handle_credit(hdr),
            _ => Vec::new(),
        }
    }

    fn handle_request(&self, hdr: &Header) -> Vec<RxPacket> {
        // Env service: guest opens AF_VSOCK to (HOST, VSOCK_ENV_PORT)
        // and reads JSON. Reply RESPONSE + RW(json) + SHUTDOWN. No
        // muxer state lives past the request.
        if hdr.dst_port == VSOCK_ENV_PORT {
            let json = VSOCK_ENV_JSON
                .lock()
                .unwrap()
                .clone()
                .unwrap_or_else(|| r#"{"env":{},"secrets":{}}"#.into());
            let mk = |op: u16, flags: u32, data: Vec<u8>| RxPacket {
                hdr: Header {
                    src_cid: hdr.dst_cid,
                    dst_cid: hdr.src_cid,
                    src_port: hdr.dst_port,
                    dst_port: hdr.src_port,
                    len: data.len() as u32,
                    type_: VSOCK_TYPE_STREAM,
                    op,
                    flags,
                    buf_alloc: OUR_BUF_ALLOC,
                    fwd_cnt: 0,
                },
                data,
            };
            return vec![
                mk(VSOCK_OP_RESPONSE, 0, Vec::new()),
                mk(VSOCK_OP_RW, 0, json.into_bytes()),
                mk(VSOCK_OP_SHUTDOWN, 1u32 | 2u32, Vec::new()),
            ];
        }

        // Outbound CONNECT phase 2: guest opens vsock to
        // (HOST, TSI_PROXY_PORT=620) carrying src_port=peer_port
        // (the same peer_port the guest sent in the prior
        // TSI_CONNECT). Pair it with the parked TcpStream.
        if hdr.dst_port == TSI_PROXY_PORT {
            let key = (hdr.src_cid, hdr.src_port);
            let mut pending = self.pending_outbound.lock().unwrap();
            if let Some(mut po) = pending.remove(&key) {
                let tcp = match po.tcp.take() {
                    Some(t) => t,
                    None => return vec![RxPacket::rst_for(hdr)],
                };
                drop(pending);
                // For outbound, we use guest's peer_port as the
                // host-side port too (so RW packets we send back
                // have src=hdr.dst_port=TSI_PROXY_PORT and the
                // guest's RW to us has dst=our_host_port=peer_port).
                // Track via inbound_conns keyed the same way; the
                // Write/Close routing in handle_rw matches on
                // (cid, hdr.dst_port = our side port).
                let host_src_port = hdr.src_port; // = peer_port
                let guest_dst_port = hdr.src_port;
                let rxq = self.rxq.clone();
                let kick = self.kick_device.lock().unwrap().clone();
                let cid = po.cid;
                let on_data: Arc<dyn Fn(Vec<u8>) + Send + Sync> = Arc::new(move |data| {
                    let (op, flags) = if data.is_empty() {
                        (VSOCK_OP_SHUTDOWN, 1u32 | 2u32)
                    } else {
                        (VSOCK_OP_RW, 0u32)
                    };
                    let pkt = RxPacket {
                        hdr: Header {
                            src_cid: VSOCK_HOST_CID,
                            dst_cid: cid,
                            src_port: TSI_PROXY_PORT,
                            dst_port: guest_dst_port,
                            len: data.len() as u32,
                            type_: VSOCK_TYPE_STREAM,
                            op,
                            flags,
                            buf_alloc: OUR_BUF_ALLOC,
                            fwd_cnt: 0,
                        },
                        data,
                    };
                    push_rxq_and_kick(&rxq, &kick, pkt);
                });
                let _ = self.io_tx.send(MuxerCmd::Register {
                    host_src_port,
                    stream: MuxerStream::Tcp(tcp),
                    on_data,
                });
                let _ = self.io_waker.wake();
                self.inbound_conns.lock().unwrap().insert(
                    (cid, host_src_port),
                    InboundState {
                        our_fwd_cnt: 0,
                        last_credit_fwd_cnt: 0,
                        guest_dst_port,
                    },
                );
                if vsock_trace_enabled() {
                    eprintln!(
                        "[muxer] outbound conn established cid={cid} peer_port={host_src_port}"
                    );
                }
                // Send RESPONSE to acknowledge the vsock REQUEST.
                return vec![RxPacket {
                    hdr: Header {
                        src_cid: VSOCK_HOST_CID,
                        dst_cid: cid,
                        src_port: TSI_PROXY_PORT,
                        dst_port: hdr.src_port,
                        len: 0,
                        type_: VSOCK_TYPE_STREAM,
                        op: VSOCK_OP_RESPONSE,
                        flags: 0,
                        buf_alloc: OUR_BUF_ALLOC,
                        fwd_cnt: 0,
                    },
                    data: Vec::new(),
                }];
            }
            return vec![RxPacket::rst_for(hdr)];
        }
        // No matching listener for any other dst_port → RST.
        vec![RxPacket::rst_for(hdr)]
    }

    fn handle_response(&self, hdr: &Header) -> Vec<RxPacket> {
        // Guest accepted our host-initiated REQUEST. Match by
        // (cid, our_host_src_port = hdr.dst_port). Spawn the
        // TsiConn bridge: TCP→on_data closure pushes RW packets
        // to the muxer's rxq + kicks the device.
        let key = (hdr.src_cid, hdr.dst_port);
        let pending = self.pending_inbound.lock().unwrap().remove(&key);
        if let Some(mut pi) = pending {
            let stream = match pi.stream.take() {
                Some(s) => s,
                None => return vec![RxPacket::rst_for(hdr)],
            };
            let rxq = self.rxq.clone();
            let kick = self.kick_device.lock().unwrap().clone();
            let cid = pi.cid;
            let host_src_port = pi.host_src_port;
            // guest_dst_port = hdr.src_port — the guest's accepted
            // socket's source port, used as the dst for our RW.
            let guest_dst_port = hdr.src_port;
            // on_data: empty Vec means "host closed, send SHUTDOWN".
            let on_data: Arc<dyn Fn(Vec<u8>) + Send + Sync> = Arc::new(move |data| {
                let (op, flags) = if data.is_empty() {
                    // VSOCK_FLAGS_SHUTDOWN_RCV | VSOCK_FLAGS_SHUTDOWN_SEND
                    (VSOCK_OP_SHUTDOWN, 1u32 | 2u32)
                } else {
                    (VSOCK_OP_RW, 0u32)
                };
                let pkt = RxPacket {
                    hdr: Header {
                        src_cid: VSOCK_HOST_CID,
                        dst_cid: cid,
                        src_port: host_src_port,
                        dst_port: guest_dst_port,
                        len: data.len() as u32,
                        type_: VSOCK_TYPE_STREAM,
                        op,
                        flags,
                        buf_alloc: OUR_BUF_ALLOC,
                        fwd_cnt: 0,
                    },
                    data,
                };
                push_rxq_and_kick(&rxq, &kick, pkt);
            });
            // Hand the TCP stream to the single muxer-io thread.
            // No per-conn pthread_create — see the single-epoll-
            // thread design in muxer_thread.rs.
            let _ = self.io_tx.send(MuxerCmd::Register {
                host_src_port,
                stream,
                on_data,
            });
            let _ = self.io_waker.wake();
            self.inbound_conns.lock().unwrap().insert(
                key,
                InboundState {
                    our_fwd_cnt: 0,
                    last_credit_fwd_cnt: 0,
                    guest_dst_port,
                },
            );
            if vsock_trace_enabled() {
                eprintln!("[muxer] inbound conn established cid={cid} host_src={host_src_port} guest_dst={guest_dst_port}");
            }
        }
        Vec::new()
    }

    fn handle_rw(&self, hdr: &Header, payload: &[u8]) -> Vec<RxPacket> {
        // Inbound vs outbound routing:
        //   inbound  RW: dst_port = our_host_src_port (we registered under it)
        //   outbound RW: dst_port = TSI_PROXY_PORT, src_port = peer_port
        //                (we registered under peer_port)
        let conn_key = if hdr.dst_port == TSI_PROXY_PORT {
            (hdr.src_cid, hdr.src_port)
        } else {
            (hdr.src_cid, hdr.dst_port)
        };
        let needs_credit_update = {
            let mut conns = self.inbound_conns.lock().unwrap();
            if let Some(st) = conns.get_mut(&conn_key) {
                st.our_fwd_cnt = st.our_fwd_cnt.wrapping_add(payload.len() as u32);
                let consumed_since_last = st.our_fwd_cnt.wrapping_sub(st.last_credit_fwd_cnt);
                if vsock_trace_enabled()
                    && st.our_fwd_cnt % 50000 < 1500
                {
                    eprintln!(
                        "[muxer-rw-fc] our_fwd_cnt={} last_credit={} consumed_since_last={} thr={}",
                        st.our_fwd_cnt,
                        st.last_credit_fwd_cnt,
                        consumed_since_last,
                        CREDIT_UPDATE_THRESHOLD
                    );
                }
                if consumed_since_last >= CREDIT_UPDATE_THRESHOLD {
                    let fwd_cnt = st.our_fwd_cnt;
                    let guest_dst = st.guest_dst_port;
                    st.last_credit_fwd_cnt = fwd_cnt;
                    Some((fwd_cnt, guest_dst))
                } else {
                    None
                }
            } else {
                // Not an inbound conn — try fallback proxy.
                drop(conns);
                let key = proxy_key(hdr.dst_port, hdr.src_port);
                let mut proxies = self.proxies.lock().unwrap();
                if let Some(p) = proxies.get_mut(&key) {
                    return p.handle_packet(hdr, payload);
                }
                return vec![RxPacket::rst_for(hdr)];
            }
        };

        if !payload.is_empty() {
            let t0 = Instant::now();
            let _ = self.io_tx.send(MuxerCmd::Write {
                host_src_port: conn_key.1,
                bytes: payload.to_vec(),
            });
            let _ = self.io_waker.wake();
            mux_profile::record(
                Stage::GuestToTcpSend,
                payload.len(),
                t0.elapsed().as_micros() as u64,
            );
        }

        if let Some((fwd_cnt, guest_dst_port)) = needs_credit_update {
            return vec![RxPacket {
                hdr: Header {
                    src_cid: VSOCK_HOST_CID,
                    dst_cid: hdr.src_cid,
                    src_port: hdr.dst_port,
                    dst_port: guest_dst_port,
                    len: 0,
                    type_: VSOCK_TYPE_STREAM,
                    op: VSOCK_OP_CREDIT_UPDATE,
                    flags: 0,
                    buf_alloc: OUR_BUF_ALLOC,
                    fwd_cnt,
                },
                data: Vec::new(),
            }];
        }
        Vec::new()
    }

    fn handle_close(&self, hdr: &Header) -> Vec<RxPacket> {
        let key = proxy_key(hdr.dst_port, hdr.src_port);
        self.proxies.lock().unwrap().remove(&key);
        let conn_key = if hdr.dst_port == TSI_PROXY_PORT {
            (hdr.src_cid, hdr.src_port)
        } else {
            (hdr.src_cid, hdr.dst_port)
        };
        if self
            .inbound_conns
            .lock()
            .unwrap()
            .remove(&conn_key)
            .is_some()
        {
            let _ = self.io_tx.send(MuxerCmd::Close {
                host_src_port: conn_key.1,
            });
            let _ = self.io_waker.wake();
        }
        Vec::new()
    }

    fn handle_credit(&self, _hdr: &Header) -> Vec<RxPacket> {
        // CREDIT_UPDATE / CREDIT_REQUEST update flow control state
        // on the proxy. No-op for now.
        Vec::new()
    }

    /// TSI control DGRAM dispatcher. Each control op is a separate
    /// DGRAM packet whose dst_port IS the opcode (1024..1031).
    /// Replies are DGRAM RW packets pushed back via the same
    /// (local_port, peer_port) — guest reads the result from a
    /// recvmsg on the AF_TSI control socket.
    fn handle_tsi_control(&self, hdr: &Header, body: &[u8]) -> Vec<RxPacket> {
        let mk_resp = |result: i32| -> RxPacket {
            // TSI replies ride on RW DGRAMs.
            let bytes = result.to_le_bytes().to_vec();
            RxPacket {
                hdr: Header {
                    src_cid: hdr.dst_cid,
                    dst_cid: hdr.src_cid,
                    src_port: hdr.dst_port,
                    dst_port: hdr.src_port,
                    len: bytes.len() as u32,
                    type_: VSOCK_TYPE_DGRAM,
                    op: VSOCK_OP_RW,
                    flags: 0,
                    buf_alloc: OUR_BUF_ALLOC,
                    fwd_cnt: 0,
                },
                data: bytes,
            }
        };

        match hdr.dst_port {
            TSI_PROXY_CREATE if body.len() >= 8 => {
                // struct { u32 peer_port; u16 family; u16 type; }
                let (Some(peer_port), Some(family), Some(socktype)) = (
                    body_le_u32(body, 0),
                    body_le_u16(body, 4),
                    body_le_u16(body, 6),
                ) else {
                    return Vec::new();
                };
                eprintln!(
                    "[muxer] PROXY_CREATE peer_port={peer_port} family={family} type={socktype}"
                );
                self.tsi.lock().unwrap().insert(
                    (hdr.src_cid, peer_port),
                    TsiState {
                        family,
                        socktype,
                        vm_port: None,
                        listener: None,
                    },
                );
                // No reply: the AF_TSI control op doesn't emit a
                // recvmsg on the guest side for this op.
                Vec::new()
            }
            TSI_LISTEN if body.len() >= 8 => {
                // struct { u32 peer_port; u32 vm_port; ... }
                let (Some(peer_port), Some(vm_port)) = (body_le_u32(body, 0), body_le_u32(body, 4))
                else {
                    return vec![mk_resp(-22)];
                };
                // Accept-thread closure must hold weak refs to our
                // own state. We can't get Arc<Self> from inside an
                // &self method, so we capture the inner Arc-able
                // pieces directly.
                let pending = self.pending_inbound_arc();
                let rxq = self.rxq.clone();
                let kick = self.kick_device.lock().unwrap().clone();
                let on_accept: Arc<dyn Fn(u64, u32, u32, TcpStream) + Send + Sync> =
                    Arc::new(move |cid, host_src_port, vm_port, tcp| {
                        // Stash the TcpStream until guest sends RESPONSE.
                        pending.lock().unwrap().insert(
                            (cid, host_src_port),
                            PendingInbound {
                                cid,
                                host_src_port,
                                vm_port,
                                stream: Some(MuxerStream::Tcp(tcp)),
                            },
                        );
                        // Push vsock REQUEST to the guest.
                        let req = RxPacket {
                            hdr: Header {
                                src_cid: VSOCK_HOST_CID,
                                dst_cid: cid,
                                src_port: host_src_port,
                                dst_port: vm_port,
                                len: 0,
                                type_: VSOCK_TYPE_STREAM,
                                op: VSOCK_OP_REQUEST,
                                flags: 0,
                                buf_alloc: OUR_BUF_ALLOC,
                                fwd_cnt: 0,
                            },
                            data: Vec::new(),
                        };
                        push_rxq_and_kick(&rxq, &kick, req);
                    });
                let listener = TsiListener::bind(hdr.src_cid, vm_port, on_accept).ok();
                let host = listener.as_ref().map(|l| l.host_addr);
                eprintln!(
                    "[muxer] LISTEN peer_port={peer_port} vm_port={vm_port} -> host {host:?}"
                );
                if let Some(s) = self.tsi.lock().unwrap().get_mut(&(hdr.src_cid, peer_port)) {
                    s.vm_port = Some(vm_port);
                    s.listener = listener;
                }
                vec![mk_resp(if host.is_some() { 0 } else { -22 })]
            }
            TSI_ACCEPT if body.len() >= 4 => {
                let Some(peer_port) = body_le_u32(body, 0) else {
                    return vec![mk_resp(-22)];
                };
                let s = self.tsi.lock().unwrap();
                let r = if s.contains_key(&(hdr.src_cid, peer_port)) {
                    0
                } else {
                    -22
                };
                vec![mk_resp(r)]
            }
            TSI_GETNAME if body.len() >= 12 => {
                // Build a fake AF_INET sockaddr response — addr_len=16,
                // family=AF_INET, port=0, ip=127.0.0.1.
                let mut buf = vec![0u8; 4 + 4 + 128];
                buf[0..4].copy_from_slice(&0i32.to_le_bytes()); // result=0
                buf[4..8].copy_from_slice(&16u32.to_le_bytes()); // addr_len=16
                buf[8..10].copy_from_slice(&2u16.to_le_bytes()); // AF_INET
                buf[12..16].copy_from_slice(&[127, 0, 0, 1]); // 127.0.0.1
                vec![RxPacket {
                    hdr: Header {
                        src_cid: hdr.dst_cid,
                        dst_cid: hdr.src_cid,
                        src_port: hdr.dst_port,
                        dst_port: hdr.src_port,
                        len: buf.len() as u32,
                        type_: VSOCK_TYPE_DGRAM,
                        op: VSOCK_OP_RW,
                        flags: 0,
                        buf_alloc: OUR_BUF_ALLOC,
                        fwd_cnt: 0,
                    },
                    data: buf,
                }]
            }
            TSI_CONNECT if body.len() >= 8 => {
                // struct tsi_connect_req {
                //   u32 peer_port; u32 addr_len; sockaddr addr;
                // }
                // sockaddr_in: u16 family; u16 port_be; u32 addr_be4;
                let (Some(peer_port), Some(addr_len)) =
                    (body_le_u32(body, 0), body_le_u32(body, 4))
                else {
                    return vec![mk_resp(-22)];
                };
                let addr_len = addr_len as usize;
                if body.len() < 8 + addr_len.min(128) || addr_len < 16 {
                    return vec![mk_resp(-22)]; // -EINVAL
                }
                let Some(family) = body_le_u16(body, 8) else {
                    return vec![mk_resp(-22)];
                };
                if family != 2 {
                    // AF_INET only for now (no IPv6).
                    return vec![mk_resp(-97)]; // -EAFNOSUPPORT
                }
                let Some(port) = body_be_u16(body, 10) else {
                    return vec![mk_resp(-22)];
                };
                let ip = std::net::Ipv4Addr::new(body[12], body[13], body[14], body[15]);
                let target = std::net::SocketAddr::from((ip, port));
                let socktype = self
                    .tsi
                    .lock()
                    .unwrap()
                    .get(&(hdr.src_cid, peer_port))
                    .map(|s| s.socktype);
                if socktype == Some(libc::SOCK_DGRAM as u16) {
                    if let Err(why) = crate::vmm::egress_policy::check_addr(target) {
                        eprintln!("[muxer] UDP CONNECT cid={} peer_port={peer_port} -> {target} BLOCKED: {why}",
                            hdr.src_cid);
                        return vec![mk_resp(-13)]; // -EACCES
                    }
                    self.udp_dst
                        .lock()
                        .unwrap()
                        .insert((hdr.src_cid, peer_port), target);
                    eprintln!(
                        "[muxer] UDP CONNECT cid={} peer_port={peer_port} -> {target} OK",
                        hdr.src_cid
                    );
                    return vec![mk_resp(0)];
                }
                if let Err(why) = crate::vmm::egress_policy::check_addr(target) {
                    eprintln!(
                        "[muxer] CONNECT cid={} peer_port={peer_port} -> {target} BLOCKED: {why}",
                        hdr.src_cid
                    );
                    return vec![mk_resp(-13)]; // -EACCES
                }
                let res = std::net::TcpStream::connect_timeout(
                    &target,
                    std::time::Duration::from_secs(2),
                );
                match res {
                    Ok(tcp) => {
                        let _ = tcp.set_nodelay(true);
                        let _ = tcp.set_nonblocking(true);
                        eprintln!(
                            "[muxer] CONNECT cid={} peer_port={peer_port} -> {target} OK",
                            hdr.src_cid
                        );
                        self.pending_outbound.lock().unwrap().insert(
                            (hdr.src_cid, peer_port),
                            PendingOutbound {
                                cid: hdr.src_cid,
                                peer_port,
                                tcp: Some(tcp),
                            },
                        );
                        vec![mk_resp(0)]
                    }
                    Err(e) => {
                        eprintln!(
                            "[muxer] CONNECT cid={} peer_port={peer_port} -> {target} ERR: {e}",
                            hdr.src_cid
                        );
                        vec![mk_resp(-111)] // -ECONNREFUSED
                    }
                }
            }
            TSI_PROXY_RELEASE => {
                // No-op: vsock accepts inherit the listener's port,
                // so releasing on conn close kills the listener
                // permanently. Listener stays alive for the VM.
                Vec::new()
            }
            TSI_SENDTO_ADDR if body.len() >= 8 => {
                // struct tsi_sendto_addr { u32 peer_port; u32 addr_len;
                //                          sockaddr_in addr; }
                let (Some(peer_port), Some(addr_len)) =
                    (body_le_u32(body, 0), body_le_u32(body, 4))
                else {
                    return Vec::new();
                };
                let addr_len = addr_len as usize;
                if body.len() < 8 + addr_len.min(128) || addr_len < 16 {
                    return Vec::new();
                }
                let Some(family) = body_le_u16(body, 8) else {
                    return Vec::new();
                };
                if family == 2 {
                    let Some(port) = body_be_u16(body, 10) else {
                        return Vec::new();
                    };
                    let ip = std::net::Ipv4Addr::new(body[12], body[13], body[14], body[15]);
                    let addr = SocketAddr::from((ip, port));
                    if let Err(why) = crate::vmm::egress_policy::check_addr(addr) {
                        eprintln!(
                            "[muxer] SENDTO_ADDR cid={} -> {addr} BLOCKED: {why}",
                            hdr.src_cid
                        );
                    } else {
                        self.udp_dst
                            .lock()
                            .unwrap()
                            .insert((hdr.src_cid, peer_port), addr);
                    }
                }
                Vec::new()
            }
            TSI_SENDTO_DATA => {
                // The DGRAM socket sends data here directly; peer_port
                // is in hdr.src_port (the AF_INET socket's local TSI
                // port). Body is just the datagram payload.
                let peer_port = hdr.src_port;
                self.send_udp_payload(hdr.src_cid, peer_port, hdr.src_port, body);
                Vec::new()
            }
            _ => Vec::new(),
        }
    }

    fn send_udp_payload(&self, cid: u64, peer_port: u32, guest_dst_port: u32, data: &[u8]) {
        if data.is_empty() {
            return;
        }
        let key = (cid, peer_port);
        let dst = match self.udp_dst.lock().unwrap().get(&key).cloned() {
            Some(a) => a,
            None => {
                if vsock_trace_enabled() {
                    eprintln!("[muxer] UDP DATA cid={cid} peer_port={peer_port} with no dst");
                }
                return;
            }
        };
        let sock_arc = {
            let mut s = self.udp_sockets.lock().unwrap();
            if let Some(s) = s.get(&key) {
                s.clone()
            } else {
                let udp = match UdpSocket::bind("0.0.0.0:0") {
                    Ok(u) => u,
                    Err(e) => {
                        eprintln!("[muxer] UDP bind cid={cid} peer_port={peer_port}: {e}");
                        return;
                    }
                };
                let recv_fd = match udp.try_clone() {
                    Ok(fd) => fd,
                    Err(e) => {
                        eprintln!("[muxer] UDP clone cid={cid} peer_port={peer_port}: {e}");
                        return;
                    }
                };
                let arc = Arc::new(udp);
                s.insert(key, arc.clone());

                let rxq = self.rxq.clone();
                let kick = self.kick_device.lock().unwrap().clone();
                let on_data: Arc<dyn Fn(Vec<u8>) + Send + Sync> = Arc::new(move |data| {
                    let pkt = RxPacket {
                        hdr: Header {
                            src_cid: VSOCK_HOST_CID,
                            dst_cid: cid,
                            src_port: peer_port,
                            dst_port: guest_dst_port,
                            len: data.len() as u32,
                            type_: VSOCK_TYPE_DGRAM,
                            op: VSOCK_OP_RW,
                            flags: 0,
                            buf_alloc: OUR_BUF_ALLOC,
                            fwd_cnt: 0,
                        },
                        data,
                    };
                    push_rxq_and_kick(&rxq, &kick, pkt);
                });
                let _ = self.io_tx.send(MuxerCmd::RegisterUdp {
                    key: peer_port,
                    udp: recv_fd,
                    on_data,
                });
                let _ = self.io_waker.wake();
                arc
            }
        };
        if vsock_trace_enabled() {
            eprintln!(
                "[muxer] UDP DATA cid={cid} peer_port={peer_port} guest_dst={guest_dst_port} -> {dst} {}B",
                data.len()
            );
        }
        let _ = sock_arc.send_to(data, dst);
    }
}

fn push_rxq_and_kick(
    rxq: &Arc<Mutex<MuxerRxQ>>,
    kick: &Option<Arc<dyn Fn() + Send + Sync>>,
    pkt: RxPacket,
) {
    let bytes = pkt.data.len();
    let t0 = Instant::now();
    // Coalesce kicks: only raise the SPI when we transition the
    // queue from empty → non-empty. Subsequent pushes ride the
    // in-flight drain. Saves one `hv_gic_set_spi` syscall
    // (~3.6% of muxer-io non-syscall CPU under load) per
    // coalesced packet during bursts. See
    // docs/design/concurrency-floor-2026-05-04.md.
    //
    // Note: `push_was_empty` returns None when the queue is
    // saturated (back-pressure). In that case we don't kick
    // either — the existing in-flight drain has more than
    // enough work to do; one more SPI won't help.
    let was_empty = rxq.lock().unwrap().push_was_empty(pkt);
    if was_empty == Some(true) {
        if let Some(k) = kick.as_ref() {
            k();
        }
    }
    mux_profile::record(Stage::RxqKick, bytes, t0.elapsed().as_micros() as u64);
}

// Use HOST_CID so the unused-import warning goes away.
const _: u64 = VSOCK_HOST_CID;

#[cfg(test)]
mod tests {
    use super::*;

    fn control_header(dst_port: u32) -> Header {
        Header {
            src_cid: 3,
            dst_cid: VSOCK_HOST_CID,
            src_port: 40_000,
            dst_port,
            len: 0,
            type_: VSOCK_TYPE_DGRAM,
            op: VSOCK_OP_RW,
            flags: 0,
            buf_alloc: OUR_BUF_ALLOC,
            fwd_cnt: 0,
        }
    }

    fn response_i32(resp: &[RxPacket]) -> Option<i32> {
        let bytes = resp.first()?.data.get(0..4)?.try_into().ok()?;
        Some(i32::from_le_bytes(bytes))
    }

    #[test]
    fn malformed_tsi_connect_returns_einval() -> Result<(), Box<dyn std::error::Error>> {
        let muxer = VsockMuxer::new(3)?;
        let hdr = control_header(TSI_CONNECT);

        let mut body = Vec::new();
        body.extend_from_slice(&1234u32.to_le_bytes());
        body.extend_from_slice(&16u32.to_le_bytes());

        let resp = muxer.handle_tsi_control(&hdr, &body);
        assert_eq!(response_i32(&resp), Some(-22));
        Ok(())
    }

    #[test]
    fn malformed_tsi_sendto_addr_is_ignored() -> Result<(), Box<dyn std::error::Error>> {
        let muxer = VsockMuxer::new(3)?;
        let hdr = control_header(TSI_SENDTO_ADDR);

        let mut body = Vec::new();
        body.extend_from_slice(&1234u32.to_le_bytes());
        body.extend_from_slice(&16u32.to_le_bytes());

        let resp = muxer.handle_tsi_control(&hdr, &body);
        assert!(resp.is_empty());
        Ok(())
    }
}