rusty_enet/c/
host.rs

1use core::{alloc::Layout, mem::MaybeUninit, ptr::write_bytes, time::Duration};
2
3use crate::{
4    consts::*, enet_free, enet_list_clear, enet_malloc, enet_packet_destroy,
5    enet_peer_queue_outgoing_command, enet_peer_reset, enet_peer_send, enet_time_get, Box,
6    Compressor, ENetBuffer, ENetChannel, ENetList, ENetPacket, ENetPeer, ENetProtocol,
7    ENetProtocolCommandHeader, Socket, SocketOptions, ENET_PEER_STATE_CONNECTED,
8    ENET_PEER_STATE_CONNECTING, ENET_PEER_STATE_DISCONNECTED, ENET_PEER_STATE_DISCONNECT_LATER,
9    ENET_PROTOCOL_COMMAND_BANDWIDTH_LIMIT, ENET_PROTOCOL_COMMAND_CONNECT,
10    ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE,
11};
12
13#[allow(clippy::type_complexity)]
14pub(crate) struct ENetHost<S: Socket> {
15    pub(crate) socket: MaybeUninit<S>,
16    pub(crate) incoming_bandwidth: u32,
17    pub(crate) outgoing_bandwidth: u32,
18    pub(crate) bandwidth_throttle_epoch: u32,
19    pub(crate) mtu: u32,
20    pub(crate) random_seed: u32,
21    pub(crate) recalculate_bandwidth_limits: i32,
22    pub(crate) peers: *mut ENetPeer<S>,
23    pub(crate) peer_count: usize,
24    pub(crate) channel_limit: usize,
25    pub(crate) service_time: u32,
26    pub(crate) dispatch_queue: ENetList,
27    pub(crate) total_queued: u32,
28    pub(crate) packet_size: usize,
29    pub(crate) header_flags: u16,
30    pub(crate) commands: [ENetProtocol; PROTOCOL_MAXIMUM_PACKET_COMMANDS as usize],
31    pub(crate) command_count: usize,
32    pub(crate) buffers: [ENetBuffer; BUFFER_MAXIMUM as usize],
33    pub(crate) buffer_count: usize,
34    pub(crate) checksum: MaybeUninit<Option<Box<dyn Fn(&[&[u8]]) -> u32>>>,
35    pub(crate) time: MaybeUninit<Box<dyn Fn() -> Duration>>,
36    pub(crate) compressor: MaybeUninit<Option<Box<dyn Compressor>>>,
37    pub(crate) packet_data: [[u8; PROTOCOL_MAXIMUM_MTU]; 2],
38    pub(crate) received_address: MaybeUninit<Option<S::Address>>,
39    pub(crate) received_data: *mut u8,
40    pub(crate) received_data_length: usize,
41    pub(crate) total_sent_data: u32,
42    pub(crate) total_sent_packets: u32,
43    pub(crate) total_received_data: u32,
44    pub(crate) total_received_packets: u32,
45    pub(crate) connected_peers: usize,
46    pub(crate) bandwidth_limited_peers: usize,
47    pub(crate) duplicate_peers: usize,
48    pub(crate) maximum_packet_size: usize,
49    pub(crate) maximum_waiting_data: usize,
50}
51pub(crate) unsafe fn enet_host_create<S: Socket>(
52    mut socket: S,
53    peer_count: usize,
54    mut channel_limit: usize,
55    incoming_bandwidth: u32,
56    outgoing_bandwidth: u32,
57    time: Box<dyn Fn() -> Duration>,
58    seed: Option<u32>,
59) -> Result<*mut ENetHost<S>, S::Error> {
60    let mut current_peer: *mut ENetPeer<S>;
61    let host: *mut ENetHost<S> = enet_malloc(Layout::new::<ENetHost<S>>()).cast();
62    write_bytes(host, 0, 1);
63    (*host).peers = enet_malloc(Layout::array::<ENetPeer<S>>(peer_count).unwrap()).cast();
64    write_bytes((*host).peers, 0, peer_count);
65    socket.init(SocketOptions {
66        receive_buffer: HOST_RECEIVE_BUFFER_SIZE as usize,
67        send_buffer: HOST_SEND_BUFFER_SIZE as usize,
68    })?;
69    (*host).socket.write(socket);
70    if channel_limit == 0 || channel_limit > PROTOCOL_MAXIMUM_CHANNEL_COUNT as i32 as usize {
71        channel_limit = PROTOCOL_MAXIMUM_CHANNEL_COUNT as i32 as usize;
72    } else if channel_limit < PROTOCOL_MINIMUM_CHANNEL_COUNT as i32 as usize {
73        channel_limit = PROTOCOL_MINIMUM_CHANNEL_COUNT as i32 as usize;
74    }
75    (*host).time.write(time);
76    if let Some(seed) = seed {
77        (*host).random_seed = seed;
78    } else {
79        (*host).random_seed = host as usize as u32;
80        (*host).random_seed =
81            ((*host).random_seed as u32).wrapping_add(enet_time_get(host)) as u32 as u32;
82        (*host).random_seed = (*host).random_seed.rotate_right(16);
83    }
84    (*host).channel_limit = channel_limit;
85    (*host).incoming_bandwidth = incoming_bandwidth;
86    (*host).outgoing_bandwidth = outgoing_bandwidth;
87    (*host).bandwidth_throttle_epoch = 0_i32 as u32;
88    (*host).recalculate_bandwidth_limits = 0_i32;
89    (*host).mtu = HOST_DEFAULT_MTU as i32 as u32;
90    (*host).peer_count = peer_count;
91    (*host).command_count = 0_i32 as usize;
92    (*host).buffer_count = 0_i32 as usize;
93    (*host).checksum.write(None);
94    (*host).received_address.write(None);
95    (*host).received_data = core::ptr::null_mut();
96    (*host).received_data_length = 0_i32 as usize;
97    (*host).total_sent_data = 0_i32 as u32;
98    (*host).total_sent_packets = 0_i32 as u32;
99    (*host).total_received_data = 0_i32 as u32;
100    (*host).total_received_packets = 0_i32 as u32;
101    (*host).total_queued = 0_i32 as u32;
102    (*host).connected_peers = 0_i32 as usize;
103    (*host).bandwidth_limited_peers = 0_i32 as usize;
104    (*host).duplicate_peers = PROTOCOL_MAXIMUM_PEER_ID as i32 as usize;
105    (*host).maximum_packet_size = HOST_DEFAULT_MAXIMUM_PACKET_SIZE as i32 as usize;
106    (*host).maximum_waiting_data = HOST_DEFAULT_MAXIMUM_WAITING_DATA as i32 as usize;
107    (*host).compressor.write(None);
108    enet_list_clear(&mut (*host).dispatch_queue);
109    current_peer = (*host).peers;
110    while current_peer < ((*host).peers).add((*host).peer_count) {
111        (*current_peer).host = host;
112        (*current_peer).incoming_peer_id = current_peer.offset_from((*host).peers) as i64 as u16;
113        (*current_peer).incoming_session_id = 0xff_i32 as u8;
114        (*current_peer).outgoing_session_id = (*current_peer).incoming_session_id;
115        (*current_peer).address.write(None);
116        (*current_peer).data = core::ptr::null_mut();
117        enet_list_clear(&mut (*current_peer).acknowledgements);
118        enet_list_clear(&mut (*current_peer).sent_reliable_commands);
119        enet_list_clear(&mut (*current_peer).outgoing_commands);
120        enet_list_clear(&mut (*current_peer).outgoing_send_reliable_commands);
121        enet_list_clear(&mut (*current_peer).dispatched_commands);
122        enet_peer_reset(current_peer);
123        current_peer = current_peer.offset(1);
124    }
125    Ok(host)
126}
127pub(crate) unsafe fn enet_host_destroy<S: Socket>(host: *mut ENetHost<S>) {
128    let mut current_peer: *mut ENetPeer<S>;
129    if host.is_null() {
130        return;
131    }
132    (*host).socket.assume_init_drop();
133    current_peer = (*host).peers;
134    while current_peer < ((*host).peers).add((*host).peer_count) {
135        enet_peer_reset(current_peer);
136        (*current_peer).address.assume_init_drop();
137        current_peer = current_peer.offset(1);
138    }
139    (*host).checksum.assume_init_drop();
140    (*host).time.assume_init_drop();
141    (*host).compressor.assume_init_drop();
142    (*host).received_address.assume_init_drop();
143    enet_free(
144        (*host).peers.cast(),
145        Layout::array::<ENetPeer<S>>((*host).peer_count).unwrap(),
146    );
147    enet_free(host.cast(), Layout::new::<ENetHost<S>>());
148}
149pub(crate) unsafe fn enet_host_random<S: Socket>(host: *mut ENetHost<S>) -> u32 {
150    (*host).random_seed = (*host).random_seed.wrapping_add(0x6d2b79f5_u32);
151    let mut n: u32 = (*host).random_seed;
152    n = (n ^ n >> 15_i32).wrapping_mul(n | 1_u32);
153    n ^= n.wrapping_add((n ^ n >> 7_i32).wrapping_mul(n | 61_u32));
154    n ^ n >> 14_i32
155}
156pub(crate) unsafe fn enet_host_connect<S: Socket>(
157    host: *mut ENetHost<S>,
158    address: S::Address,
159    mut channel_count: usize,
160    data: u32,
161) -> *mut ENetPeer<S> {
162    let mut current_peer: *mut ENetPeer<S>;
163    let mut channel: *mut ENetChannel;
164    let mut command: ENetProtocol = ENetProtocol {
165        header: ENetProtocolCommandHeader {
166            command: 0,
167            channel_id: 0,
168            reliable_sequence_number: 0,
169        },
170    };
171    if channel_count < PROTOCOL_MINIMUM_CHANNEL_COUNT as i32 as usize {
172        channel_count = PROTOCOL_MINIMUM_CHANNEL_COUNT as i32 as usize;
173    } else if channel_count > PROTOCOL_MAXIMUM_CHANNEL_COUNT as i32 as usize {
174        channel_count = PROTOCOL_MAXIMUM_CHANNEL_COUNT as i32 as usize;
175    }
176    current_peer = (*host).peers;
177    while current_peer < ((*host).peers).add((*host).peer_count) {
178        if (*current_peer).state == ENET_PEER_STATE_DISCONNECTED as i32 as u32 {
179            break;
180        }
181        current_peer = current_peer.offset(1);
182    }
183    if current_peer >= ((*host).peers).add((*host).peer_count) {
184        return core::ptr::null_mut();
185    }
186    (*current_peer).channels =
187        enet_malloc(Layout::array::<ENetChannel>(channel_count).unwrap()).cast();
188    (*current_peer).channel_count = channel_count;
189    (*current_peer).state = ENET_PEER_STATE_CONNECTING;
190    *(*current_peer).address.assume_init_mut() = Some(address);
191    (*current_peer).connect_id = enet_host_random(host);
192    (*current_peer).mtu = (*host).mtu;
193    if (*host).outgoing_bandwidth == 0_i32 as u32 {
194        (*current_peer).window_size = PROTOCOL_MAXIMUM_WINDOW_SIZE as i32 as u32;
195    } else {
196        (*current_peer).window_size = ((*host).outgoing_bandwidth)
197            .wrapping_div(PEER_WINDOW_SIZE_SCALE as i32 as u32)
198            .wrapping_mul(PROTOCOL_MINIMUM_WINDOW_SIZE as i32 as u32);
199    }
200    if (*current_peer).window_size < PROTOCOL_MINIMUM_WINDOW_SIZE as i32 as u32 {
201        (*current_peer).window_size = PROTOCOL_MINIMUM_WINDOW_SIZE as i32 as u32;
202    } else if (*current_peer).window_size > PROTOCOL_MAXIMUM_WINDOW_SIZE as i32 as u32 {
203        (*current_peer).window_size = PROTOCOL_MAXIMUM_WINDOW_SIZE as i32 as u32;
204    }
205    channel = (*current_peer).channels;
206    while channel < ((*current_peer).channels).add(channel_count) {
207        (*channel).outgoing_reliable_sequence_number = 0_i32 as u16;
208        (*channel).outgoing_unreliable_sequence_number = 0_i32 as u16;
209        (*channel).incoming_reliable_sequence_number = 0_i32 as u16;
210        (*channel).incoming_unreliable_sequence_number = 0_i32 as u16;
211        enet_list_clear(&mut (*channel).incoming_reliable_commands);
212        enet_list_clear(&mut (*channel).incoming_unreliable_commands);
213        (*channel).used_reliable_windows = 0_i32 as u16;
214        write_bytes(((*channel).reliable_windows).as_mut_ptr(), 0, 16);
215        channel = channel.offset(1);
216    }
217    command.header.command = (ENET_PROTOCOL_COMMAND_CONNECT as i32
218        | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE as i32) as u8;
219    command.header.channel_id = 0xff_i32 as u8;
220    command.connect.outgoing_peer_id = (*current_peer).incoming_peer_id.to_be();
221    command.connect.incoming_session_id = (*current_peer).incoming_session_id;
222    command.connect.outgoing_session_id = (*current_peer).outgoing_session_id;
223    command.connect.mtu = (*current_peer).mtu.to_be();
224    command.connect.window_size = (*current_peer).window_size.to_be();
225    command.connect.channel_count = (channel_count as u32).to_be();
226    command.connect.incoming_bandwidth = (*host).incoming_bandwidth.to_be();
227    command.connect.outgoing_bandwidth = (*host).outgoing_bandwidth.to_be();
228    command.connect.packet_throttle_interval = (*current_peer).packet_throttle_interval.to_be();
229    command.connect.packet_throttle_acceleration =
230        (*current_peer).packet_throttle_acceleration.to_be();
231    command.connect.packet_throttle_deceleration =
232        (*current_peer).packet_throttle_deceleration.to_be();
233    command.connect.connect_id = (*current_peer).connect_id;
234    command.connect.data = data.to_be();
235    enet_peer_queue_outgoing_command(
236        current_peer,
237        &command,
238        core::ptr::null_mut(),
239        0_i32 as u32,
240        0_i32 as u16,
241    );
242    current_peer
243}
244pub(crate) unsafe fn enet_host_broadcast<S: Socket>(
245    host: *mut ENetHost<S>,
246    channel_id: u8,
247    packet: *mut ENetPacket,
248) {
249    let mut current_peer: *mut ENetPeer<S>;
250    current_peer = (*host).peers;
251    while current_peer < ((*host).peers).add((*host).peer_count) {
252        if (*current_peer).state == ENET_PEER_STATE_CONNECTED as i32 as u32 {
253            // TODO: do we really want to ignore the result type here?
254            _ = enet_peer_send(current_peer, channel_id, packet);
255        }
256        current_peer = current_peer.offset(1);
257    }
258    if (*packet).reference_count == 0_i32 as usize {
259        enet_packet_destroy(packet);
260    }
261}
262pub(crate) unsafe fn enet_host_compress<S: Socket>(
263    host: *mut ENetHost<S>,
264    compressor: Option<Box<dyn Compressor>>,
265) {
266    *(*host).compressor.assume_init_mut() = compressor;
267}
268pub(crate) unsafe fn enet_host_channel_limit<S: Socket>(
269    host: *mut ENetHost<S>,
270    mut channel_limit: usize,
271) {
272    if channel_limit == 0 || channel_limit > PROTOCOL_MAXIMUM_CHANNEL_COUNT as i32 as usize {
273        channel_limit = PROTOCOL_MAXIMUM_CHANNEL_COUNT as i32 as usize;
274    } else if channel_limit < PROTOCOL_MINIMUM_CHANNEL_COUNT as i32 as usize {
275        channel_limit = PROTOCOL_MINIMUM_CHANNEL_COUNT as i32 as usize;
276    }
277    (*host).channel_limit = channel_limit;
278}
279pub(crate) unsafe fn enet_host_bandwidth_limit<S: Socket>(
280    host: *mut ENetHost<S>,
281    incoming_bandwidth: u32,
282    outgoing_bandwidth: u32,
283) {
284    (*host).incoming_bandwidth = incoming_bandwidth;
285    (*host).outgoing_bandwidth = outgoing_bandwidth;
286    (*host).recalculate_bandwidth_limits = 1_i32;
287}
288pub(crate) unsafe fn enet_host_bandwidth_throttle<S: Socket>(host: *mut ENetHost<S>) {
289    let time_current: u32 = enet_time_get(host);
290    let elapsed_time: u32 = time_current.wrapping_sub((*host).bandwidth_throttle_epoch);
291    let mut peers_remaining: u32 = (*host).connected_peers as u32;
292    let mut data_total: u32 = !0_i32 as u32;
293    let mut bandwidth: u32 = !0_i32 as u32;
294    let mut throttle: u32;
295    let mut bandwidth_limit: u32 = 0_i32 as u32;
296    let mut needs_adjustment = (*host).bandwidth_limited_peers > 0_usize;
297    let mut peer: *mut ENetPeer<S>;
298    let mut command: ENetProtocol = ENetProtocol {
299        header: ENetProtocolCommandHeader {
300            command: 0,
301            channel_id: 0,
302            reliable_sequence_number: 0,
303        },
304    };
305    if elapsed_time < HOST_BANDWIDTH_THROTTLE_INTERVAL as i32 as u32 {
306        return;
307    }
308    (*host).bandwidth_throttle_epoch = time_current;
309    if peers_remaining == 0_i32 as u32 {
310        return;
311    }
312    if (*host).outgoing_bandwidth != 0_i32 as u32 {
313        data_total = 0_i32 as u32;
314        bandwidth = ((*host).outgoing_bandwidth)
315            .wrapping_mul(elapsed_time)
316            .wrapping_div(1000_i32 as u32);
317        peer = (*host).peers;
318        while peer < ((*host).peers).add((*host).peer_count) {
319            if !((*peer).state != ENET_PEER_STATE_CONNECTED as i32 as u32
320                && (*peer).state != ENET_PEER_STATE_DISCONNECT_LATER as i32 as u32)
321            {
322                data_total = data_total.wrapping_add((*peer).outgoing_data_total);
323            }
324            peer = peer.offset(1);
325        }
326    }
327    while peers_remaining > 0_i32 as u32 && needs_adjustment {
328        needs_adjustment = false;
329        if data_total <= bandwidth {
330            throttle = PEER_PACKET_THROTTLE_SCALE as i32 as u32;
331        } else {
332            throttle = bandwidth
333                .wrapping_mul(PEER_PACKET_THROTTLE_SCALE as i32 as u32)
334                .wrapping_div(data_total);
335        }
336        peer = (*host).peers;
337        while peer < ((*host).peers).add((*host).peer_count) {
338            let peer_bandwidth: u32;
339            if !((*peer).state != ENET_PEER_STATE_CONNECTED as i32 as u32
340                && (*peer).state != ENET_PEER_STATE_DISCONNECT_LATER as i32 as u32
341                || (*peer).incoming_bandwidth == 0_i32 as u32
342                || (*peer).outgoing_bandwidth_throttle_epoch == time_current)
343            {
344                peer_bandwidth = ((*peer).incoming_bandwidth)
345                    .wrapping_mul(elapsed_time)
346                    .wrapping_div(1000_i32 as u32);
347                if throttle
348                    .wrapping_mul((*peer).outgoing_data_total)
349                    .wrapping_div(PEER_PACKET_THROTTLE_SCALE as i32 as u32)
350                    > peer_bandwidth
351                {
352                    (*peer).packet_throttle_limit = peer_bandwidth
353                        .wrapping_mul(PEER_PACKET_THROTTLE_SCALE as i32 as u32)
354                        .wrapping_div((*peer).outgoing_data_total);
355                    if (*peer).packet_throttle_limit == 0_i32 as u32 {
356                        (*peer).packet_throttle_limit = 1_i32 as u32;
357                    }
358                    if (*peer).packet_throttle > (*peer).packet_throttle_limit {
359                        (*peer).packet_throttle = (*peer).packet_throttle_limit;
360                    }
361                    (*peer).outgoing_bandwidth_throttle_epoch = time_current;
362                    (*peer).incoming_data_total = 0_i32 as u32;
363                    (*peer).outgoing_data_total = 0_i32 as u32;
364                    needs_adjustment = true;
365                    peers_remaining = peers_remaining.wrapping_sub(1);
366                    bandwidth = bandwidth.wrapping_sub(peer_bandwidth);
367                    data_total = data_total.wrapping_sub(peer_bandwidth);
368                }
369            }
370            peer = peer.offset(1);
371        }
372    }
373    if peers_remaining > 0_i32 as u32 {
374        if data_total <= bandwidth {
375            throttle = PEER_PACKET_THROTTLE_SCALE as i32 as u32;
376        } else {
377            throttle = bandwidth
378                .wrapping_mul(PEER_PACKET_THROTTLE_SCALE as i32 as u32)
379                .wrapping_div(data_total);
380        }
381        peer = (*host).peers;
382        while peer < ((*host).peers).add((*host).peer_count) {
383            if !((*peer).state != ENET_PEER_STATE_CONNECTED as i32 as u32
384                && (*peer).state != ENET_PEER_STATE_DISCONNECT_LATER as i32 as u32
385                || (*peer).outgoing_bandwidth_throttle_epoch == time_current)
386            {
387                (*peer).packet_throttle_limit = throttle;
388                if (*peer).packet_throttle > (*peer).packet_throttle_limit {
389                    (*peer).packet_throttle = (*peer).packet_throttle_limit;
390                }
391                (*peer).incoming_data_total = 0_i32 as u32;
392                (*peer).outgoing_data_total = 0_i32 as u32;
393            }
394            peer = peer.offset(1);
395        }
396    }
397    if (*host).recalculate_bandwidth_limits != 0 {
398        (*host).recalculate_bandwidth_limits = 0_i32;
399        peers_remaining = (*host).connected_peers as u32;
400        bandwidth = (*host).incoming_bandwidth;
401        needs_adjustment = true;
402        if bandwidth == 0_i32 as u32 {
403            bandwidth_limit = 0_i32 as u32;
404        } else {
405            while peers_remaining > 0_i32 as u32 && needs_adjustment {
406                needs_adjustment = false;
407                bandwidth_limit = bandwidth.wrapping_div(peers_remaining);
408                peer = (*host).peers;
409                while peer < ((*host).peers).add((*host).peer_count) {
410                    if !((*peer).incoming_bandwidth_throttle_epoch == time_current
411                        || (*peer).state != ENET_PEER_STATE_CONNECTED as i32 as u32
412                            && (*peer).state != ENET_PEER_STATE_DISCONNECT_LATER as i32 as u32
413                        || (*peer).outgoing_bandwidth > 0_i32 as u32
414                            && (*peer).outgoing_bandwidth >= bandwidth_limit)
415                    {
416                        (*peer).incoming_bandwidth_throttle_epoch = time_current;
417                        needs_adjustment = true;
418                        peers_remaining = peers_remaining.wrapping_sub(1);
419                        bandwidth = bandwidth.wrapping_sub((*peer).outgoing_bandwidth);
420                    }
421                    peer = peer.offset(1);
422                }
423            }
424        }
425        peer = (*host).peers;
426        while peer < ((*host).peers).add((*host).peer_count) {
427            if !((*peer).state != ENET_PEER_STATE_CONNECTED as i32 as u32
428                && (*peer).state != ENET_PEER_STATE_DISCONNECT_LATER as i32 as u32)
429            {
430                command.header.command = (ENET_PROTOCOL_COMMAND_BANDWIDTH_LIMIT as i32
431                    | ENET_PROTOCOL_COMMAND_FLAG_ACKNOWLEDGE as i32)
432                    as u8;
433                command.header.channel_id = 0xff_i32 as u8;
434                command.bandwidth_limit.outgoing_bandwidth = (*host).outgoing_bandwidth.to_be();
435                if (*peer).incoming_bandwidth_throttle_epoch == time_current {
436                    command.bandwidth_limit.incoming_bandwidth = (*peer).outgoing_bandwidth.to_be();
437                } else {
438                    command.bandwidth_limit.incoming_bandwidth = bandwidth_limit.to_be();
439                }
440                enet_peer_queue_outgoing_command(
441                    peer,
442                    &command,
443                    core::ptr::null_mut(),
444                    0_i32 as u32,
445                    0_i32 as u16,
446                );
447            }
448            peer = peer.offset(1);
449        }
450    }
451}