use std::sync::atomic::{AtomicUsize, Ordering};
use hibitset::{AtomicBitSet, BitSet, BitSetOr};
use shred::Read;
use error::WrongGeneration;
use join::{Join, ParJoin};
pub type Index = u32;
pub type Entities<'a> = Read<'a, EntitiesRes>;
#[derive(Default, Debug)]
pub(crate) struct Allocator {
pub(crate) generations: Vec<Generation>,
alive: BitSet,
raised: AtomicBitSet,
killed: AtomicBitSet,
start_from: AtomicUsize,
}
impl Allocator {
pub fn kill(&mut self, delete: &[Entity]) -> Result<(), WrongGeneration> {
for &entity in delete {
let id = entity.id() as usize;
if !self.is_alive(entity) {
return self.del_err(entity);
}
self.alive.remove(entity.id());
self.raised.remove(entity.id());
self.generations[id].die();
if id < self.start_from.load(Ordering::Relaxed) {
self.start_from.store(id, Ordering::Relaxed);
}
}
Ok(())
}
pub fn kill_atomic(&self, e: Entity) -> Result<(), WrongGeneration> {
if !self.is_alive(e) {
return self.del_err(e);
}
self.killed.add_atomic(e.id());
Ok(())
}
pub(crate) fn del_err(&self, e: Entity) -> Result<(), WrongGeneration> {
Err(WrongGeneration {
action: "delete",
actual_gen: self.generations[e.id() as usize],
entity: e,
})
}
pub fn is_alive(&self, e: Entity) -> bool {
e.gen() == match self.generations.get(e.id() as usize) {
Some(g) if !g.is_alive() && self.raised.contains(e.id()) => g.raised(),
Some(g) => *g,
None => Generation(1),
}
}
pub fn entity(&self, id: Index) -> Entity {
let gen = match self.generations.get(id as usize) {
Some(g) if !g.is_alive() && self.raised.contains(id) => g.raised(),
Some(g) => *g,
None => Generation(1),
};
Entity(id, gen)
}
pub fn update_start_from(&self, start_from: usize) {
loop {
let current = self.start_from.load(Ordering::Relaxed);
if current >= start_from {
return;
}
if start_from
== self.start_from
.compare_and_swap(current, start_from, Ordering::Relaxed)
{
return;
}
}
}
pub fn allocate_atomic(&self) -> Entity {
let idx = self.start_from.load(Ordering::Relaxed);
for i in idx.. {
if !self.alive.contains(i as Index) && !self.raised.add_atomic(i as Index) {
self.update_start_from(i + 1);
let gen = self.generations
.get(i as usize)
.map(|&gen| {
if gen.is_alive() {
gen
} else {
gen.raised()
}
})
.unwrap_or(Generation(1));
return Entity(i as Index, gen);
}
}
panic!("No entities left to allocate")
}
pub fn allocate(&mut self) -> Entity {
let idx = self.start_from.load(Ordering::Relaxed);
for i in idx.. {
if !self.raised.contains(i as Index) && !self.alive.add(i as Index) {
self.start_from.store(i + 1, Ordering::Relaxed);
while self.generations.len() <= i as usize {
self.generations.push(Generation(0));
}
self.generations[i as usize] = self.generations[i as usize].raised();
return Entity(i as Index, self.generations[i as usize]);
}
}
panic!("No entities left to allocate")
}
pub fn merge(&mut self) -> Vec<Entity> {
use hibitset::BitSetLike;
let mut deleted = vec![];
for i in (&self.raised).iter() {
while self.generations.len() <= i as usize {
self.generations.push(Generation(0));
}
self.generations[i as usize] = self.generations[i as usize].raised();
self.alive.add(i);
}
self.raised.clear();
if let Some(lowest) = (&self.killed).iter().next() {
if lowest < self.start_from.load(Ordering::Relaxed) as Index {
self.start_from.store(lowest as usize, Ordering::Relaxed);
}
}
for i in (&self.killed).iter() {
self.alive.remove(i);
deleted.push(Entity(i, self.generations[i as usize]));
self.generations[i as usize].die();
}
self.killed.clear();
deleted
}
}
pub struct CreateIterAtomic<'a>(&'a Allocator);
impl<'a> Iterator for CreateIterAtomic<'a> {
type Item = Entity;
fn next(&mut self) -> Option<Entity> {
Some(self.0.allocate_atomic())
}
}
#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub struct Entity(Index, Generation);
impl Entity {
#[cfg(test)]
pub fn new(index: Index, gen: Generation) -> Entity {
Entity(index, gen)
}
#[inline]
pub fn id(&self) -> Index {
self.0
}
#[inline]
pub fn gen(&self) -> Generation {
self.1
}
}
#[derive(Debug, Default)]
pub struct EntitiesRes {
pub(crate) alloc: Allocator,
}
impl EntitiesRes {
pub fn create(&self) -> Entity {
self.alloc.allocate_atomic()
}
pub fn create_iter(&self) -> CreateIterAtomic {
CreateIterAtomic(&self.alloc)
}
pub fn delete(&self, e: Entity) -> Result<(), WrongGeneration> {
self.alloc.kill_atomic(e)
}
pub fn entity(&self, id: Index) -> Entity {
self.alloc.entity(id)
}
#[inline]
pub fn is_alive(&self, e: Entity) -> bool {
self.alloc.is_alive(e)
}
}
impl<'a> Join for &'a EntitiesRes {
type Type = Entity;
type Value = Self;
type Mask = BitSetOr<&'a BitSet, &'a AtomicBitSet>;
fn open(self) -> (Self::Mask, Self) {
(BitSetOr(&self.alloc.alive, &self.alloc.raised), self)
}
unsafe fn get(v: &mut &'a EntitiesRes, idx: Index) -> Entity {
let gen = v.alloc
.generations
.get(idx as usize)
.map(|&gen| {
if gen.is_alive() {
gen
} else {
gen.raised()
}
})
.unwrap_or(Generation(1));
Entity(idx, gen)
}
}
unsafe impl<'a> ParJoin for &'a EntitiesRes {}
#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
pub struct Generation(pub(crate) i32);
impl Generation {
#[cfg(test)]
pub fn new(v: i32) -> Self {
Generation(v)
}
#[inline]
pub fn id(&self) -> i32 {
self.0
}
#[inline]
pub fn is_alive(&self) -> bool {
self.0 > 0
}
fn die(&mut self) {
debug_assert!(self.is_alive());
self.0 = -self.0;
}
fn raised(self) -> Generation {
debug_assert!(!self.is_alive());
Generation(1 - self.0)
}
}