use mio::{Poll, PollOpt, Ready, Registration, SetReadiness, Token};
use std::io;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
#[derive(Debug)]
pub struct SelectorId {
id: AtomicUsize,
}
impl SelectorId {
pub fn new() -> SelectorId {
SelectorId {
id: AtomicUsize::new(0),
}
}
pub fn associate_selector(&self, poll: &Poll) -> io::Result<()> {
let selector_id = self.id.load(Ordering::SeqCst);
let poll_id = skinny::selector_id(poll);
if selector_id != 0 && selector_id != poll_id {
Err(io::Error::new(
io::ErrorKind::Other,
"socket already registered",
))
} else {
self.id.store(poll_id, Ordering::SeqCst);
Ok(())
}
}
}
impl Clone for SelectorId {
fn clone(&self) -> SelectorId {
SelectorId {
id: AtomicUsize::new(self.id.load(Ordering::SeqCst)),
}
}
}
pub fn new_registration(
poll: &Poll,
token: Token,
ready: Ready,
opt: PollOpt,
) -> (Registration, SetReadiness) {
#[allow(deprecated)]
Registration::new(poll, token, ready, opt)
}
pub mod skinny {
use mio;
use std::sync::{atomic::AtomicUsize, Arc, Condvar, Mutex};
fn reinterpret_cast<T, U>(obj: &T) -> &U {
unsafe { &*(obj as *const T as *const U) }
}
pub mod sys {
use super::reinterpret_cast;
use lazycell::AtomicLazyCell;
use mio::windows::Binding as MiowBinding;
use miow::iocp::CompletionPort;
use std::sync::{Arc, Mutex};
#[derive(Debug)]
struct Binding {
selector: AtomicLazyCell<Arc<SelectorInner>>,
}
#[derive(Debug)]
struct BufferPool {
pool: Vec<Vec<u8>>,
}
impl BufferPool {
#[allow(dead_code)]
pub fn new(cap: usize) -> BufferPool {
BufferPool {
pool: Vec::with_capacity(cap),
}
}
pub fn get(&mut self, default_cap: usize) -> Vec<u8> {
self.pool
.pop()
.unwrap_or_else(|| Vec::with_capacity(default_cap))
}
pub fn put(&mut self, mut buf: Vec<u8>) {
if self.pool.len() < self.pool.capacity() {
unsafe {
buf.set_len(0);
}
self.pool.push(buf);
}
}
}
#[derive(Debug)]
pub struct Selector {
inner: Arc<SelectorInner>,
}
impl Selector {
pub fn id(&self) -> usize {
self.inner.id
}
}
#[derive(Debug)]
struct SelectorInner {
id: usize,
#[allow(dead_code)]
port: CompletionPort,
buffers: Mutex<BufferPool>,
}
impl AsRef<Binding> for MiowBinding {
fn as_ref(&self) -> &Binding {
reinterpret_cast(self)
}
}
pub fn get_buffer(binding: &MiowBinding, size: usize) -> Vec<u8> {
match binding.as_ref().selector.borrow() {
Some(i) => i.buffers.lock().unwrap().get(size),
None => Vec::with_capacity(size),
}
}
pub fn put_buffer(binding: &MiowBinding, buf: Vec<u8>) {
if let Some(i) = binding.as_ref().selector.borrow() {
i.buffers.lock().unwrap().put(buf);
}
}
#[cfg(test)]
mod tests {
use mio;
fn mem_addr<T>(obj: &T) -> usize {
obj as *const T as usize
}
#[test]
fn binding_as_ref_returns_ref_to_same_memory() {
let binding = &mio::windows::Binding::new();
let mybinding = binding.as_ref();
assert_eq!(mem_addr(binding), mem_addr(mybinding));
}
}
}
#[allow(dead_code)]
#[derive(Debug)]
struct Poll {
selector: sys::Selector,
readiness_queue: ReadinessQueue,
lock_state: AtomicUsize,
lock: Mutex<()>,
condvar: Condvar,
}
#[derive(Debug)]
struct ReadinessQueue {
#[allow(dead_code)]
inner: Arc<ReadinessQueueInner>,
}
#[derive(Debug)]
struct ReadinessQueueInner {}
impl AsRef<Poll> for mio::Poll {
fn as_ref(&self) -> &Poll {
reinterpret_cast(self)
}
}
pub fn selector_id(poll: &mio::Poll) -> usize {
poll.as_ref().selector.id()
}
pub fn get_buffer(binding: &mio::windows::Binding, size: usize) -> Vec<u8> {
sys::get_buffer(binding, size)
}
pub fn put_buffer(binding: &mio::windows::Binding, buf: Vec<u8>) {
sys::put_buffer(binding, buf)
}
#[cfg(test)]
mod tests {
use mio;
fn mem_addr<T>(obj: &T) -> usize {
obj as *const T as usize
}
#[test]
fn poll_as_ref_returns_ref_to_same_memory() {
let poll = &mio::Poll::new().unwrap();
let mypoll = poll.as_ref();
assert_eq!(mem_addr(poll), mem_addr(mypoll));
}
}
}