use core::f32;
use std::{
array,
cell::{RefCell, UnsafeCell},
fmt::Debug,
mem,
sync::Arc,
};
use azalea_block::{
BlockState,
properties::{self, SlabKind, StairShape},
};
use azalea_core::{
bitset::FastFixedBitSet,
position::{BlockPos, ChunkPos, ChunkSectionBlockPos},
};
use azalea_physics::collision::BlockWithShape;
use azalea_registry::builtin::BlockKind;
use azalea_world::{World, palette::PalettedContainer};
use parking_lot::RwLock;
use rustc_hash::FxHashMap;
use super::{mining::MiningCache, positions::RelBlockPos};
use crate::pathfinder::positions::SmallChunkSectionPos;
const MAX_VIEW_DISTANCE: usize = 32;
pub struct CachedWorld {
origin: BlockPos,
min_y: i32,
world_lock: Arc<RwLock<World>>,
bounded_chunk_cache: RefCell<[(ChunkPos, CachedChunk); MAX_VIEW_DISTANCE * MAX_VIEW_DISTANCE]>,
unbounded_chunk_cache: RefCell<FxHashMap<ChunkPos, CachedChunk>>,
cached_blocks: UnsafeCell<CachedSections>,
#[allow(clippy::type_complexity)]
cached_mining_costs: UnsafeCell<Option<Box<[(RelBlockPos, f32)]>>>,
}
type CachedChunk = Box<[PalettedContainer<BlockState>]>;
pub struct CachedSections {
pub fast_sections: Box<[Option<CachedSection>; FAST_SECTIONS_CACHE_SIZE]>,
pub fallback_sections: Vec<CachedSection>,
}
const FAST_SECTIONS_CACHE_SIZE: usize = 16 * 16 * 16;
fn fast_section_idx(pos: SmallChunkSectionPos) -> usize {
(pos.y as usize % 16) + (pos.x as usize % 16) * 16 + (pos.z as usize % 16) * 16 * 16
}
impl CachedSections {
pub fn get_mut(&mut self, pos: SmallChunkSectionPos) -> Option<&mut CachedSection> {
let idx = fast_section_idx(pos);
if let Some(fast_item) = &mut self.fast_sections[idx]
&& fast_item.pos == pos
{
return Some(fast_item);
}
if let Some(item) = self.fallback_sections.iter_mut().find(|s| s.pos == pos) {
return Some(item);
}
None
}
#[inline]
pub fn insert(&mut self, section: CachedSection) {
let idx = fast_section_idx(section.pos);
if let item @ None = &mut self.fast_sections[idx] {
*item = Some(section);
return;
}
let index = self
.fallback_sections
.binary_search_by(|s| s.pos.cmp(§ion.pos))
.unwrap_or_else(|e| e);
self.fallback_sections.insert(index, section);
}
}
impl Default for CachedSections {
fn default() -> Self {
Self {
fast_sections: (0..FAST_SECTIONS_CACHE_SIZE)
.map(|_| None)
.collect::<Box<[_]>>()
.try_into()
.unwrap(),
fallback_sections: Default::default(),
}
}
}
pub struct CachedSection {
pub pos: SmallChunkSectionPos,
pub bitsets: Box<SectionBitsets>,
}
impl Debug for CachedSection {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("CachedSection")
.field("pos", &self.pos)
.finish()
}
}
#[derive(Default)]
pub struct SectionBitsets {
pub passable: FastFixedBitSet<4096>,
pub solid: FastFixedBitSet<4096>,
pub standable: FastFixedBitSet<4096>,
pub water: FastFixedBitSet<4096>,
}
impl CachedWorld {
pub fn new(world_lock: Arc<RwLock<World>>, origin: BlockPos) -> Self {
let min_y = world_lock.read().chunks.min_y();
Self {
origin,
min_y,
world_lock,
bounded_chunk_cache: RefCell::new(array::from_fn(|_| {
(ChunkPos::new(i32::MAX, i32::MAX), Default::default())
})),
unbounded_chunk_cache: Default::default(),
cached_blocks: Default::default(),
cached_mining_costs: UnsafeCell::new(None),
}
}
fn with_section<T>(
&self,
section_pos: SmallChunkSectionPos,
f: impl FnOnce(&azalea_world::palette::PalettedContainer<BlockState>) -> T,
) -> Option<T> {
if section_pos.y * 16 < self.min_y {
return None;
}
let chunk_pos = ChunkPos::new(section_pos.x as i32, section_pos.z as i32);
let section_index =
azalea_world::chunk::section_index(section_pos.y * 16, self.min_y) as usize;
let mut cache_idx = 0;
let mut unbounded_chunk_cache = self.unbounded_chunk_cache.borrow_mut();
let mut bounded_chunk_cache = self.bounded_chunk_cache.borrow_mut();
if unbounded_chunk_cache.is_empty() {
const D: i32 = MAX_VIEW_DISTANCE as i32;
let cache_x = i32::rem_euclid(chunk_pos.x, D) * D;
let cache_z = i32::rem_euclid(chunk_pos.z, D);
cache_idx = (cache_x + cache_z) as usize;
if !bounded_chunk_cache[cache_idx].1.is_empty() {
if bounded_chunk_cache[cache_idx].0 != chunk_pos {
for (moving_chunk_pos, moving_chunk) in bounded_chunk_cache.iter_mut() {
if !moving_chunk.is_empty() {
unbounded_chunk_cache
.insert(*moving_chunk_pos, mem::take(moving_chunk));
}
}
}
let sections = &bounded_chunk_cache[cache_idx].1;
if section_index >= sections.len() {
return None;
};
let section = §ions[section_index];
return Some(f(section));
}
} else if let Some(sections) = unbounded_chunk_cache.get(&chunk_pos) {
if section_index >= sections.len() {
return None;
};
let section = §ions[section_index];
return Some(f(section));
}
let world = self.world_lock.read();
let chunk = world.chunks.get(&chunk_pos)?;
let chunk = chunk.read();
let sections = chunk
.sections
.iter()
.map(|section| section.states.clone())
.collect::<Box<[PalettedContainer<BlockState>]>>();
if section_index >= sections.len() {
return None;
};
let section = §ions[section_index];
let r = f(section);
if unbounded_chunk_cache.is_empty() {
bounded_chunk_cache[cache_idx] = (chunk_pos, sections);
} else {
unbounded_chunk_cache.insert(chunk_pos, sections);
}
Some(r)
}
fn calculate_bitsets_for_section(&self, section_pos: SmallChunkSectionPos) -> CachedSection {
let bitsets = self
.with_section(section_pos, |section| {
let mut bitsets = SectionBitsets {
passable: FastFixedBitSet::<4096>::new(),
solid: FastFixedBitSet::<4096>::new(),
standable: FastFixedBitSet::<4096>::new(),
water: FastFixedBitSet::<4096>::new(),
};
for i in 0..4096 {
let block_state = section.get_at_index(i);
if is_block_state_passable(block_state) {
bitsets.passable.set(i);
}
if is_block_state_solid(block_state) {
bitsets.solid.set(i);
}
if is_block_state_standable(block_state) {
bitsets.standable.set(i);
}
if is_block_state_water(block_state) {
bitsets.water.set(i);
}
}
Box::new(bitsets)
})
.unwrap_or_default();
CachedSection {
pos: section_pos,
bitsets,
}
}
fn check_bitset_for_block(
&self,
pos: BlockPos,
cb: impl FnOnce(&SectionBitsets, usize) -> bool,
) -> bool {
let (section_pos, section_block_pos) = (
SmallChunkSectionPos::from(pos),
ChunkSectionBlockPos::from(pos),
);
let index = u16::from(section_block_pos) as usize;
let cached_blocks = unsafe { &mut *self.cached_blocks.get() };
if let Some(cached) = cached_blocks.get_mut(section_pos) {
return cb(&cached.bitsets, index);
}
let cached = self.calculate_bitsets_for_section(section_pos);
let passable = cb(&cached.bitsets, index);
cached_blocks.insert(cached);
passable
}
pub fn is_block_passable(&self, pos: RelBlockPos) -> bool {
self.is_block_pos_passable(pos.apply(self.origin))
}
fn is_block_pos_passable(&self, pos: BlockPos) -> bool {
self.check_bitset_for_block(pos, |bitsets, index| bitsets.passable.index(index))
}
pub fn is_block_water(&self, pos: RelBlockPos) -> bool {
self.is_block_pos_water(pos.apply(self.origin))
}
fn is_block_pos_water(&self, pos: BlockPos) -> bool {
self.check_bitset_for_block(pos, |bitsets, index| bitsets.water.index(index))
}
pub fn get_block_state(&self, pos: RelBlockPos) -> BlockState {
self.get_block_state_at_pos(pos.apply(self.origin))
}
fn get_block_state_at_pos(&self, pos: BlockPos) -> BlockState {
let (section_pos, section_block_pos) = (
SmallChunkSectionPos::from(pos),
ChunkSectionBlockPos::from(pos),
);
let index = u16::from(section_block_pos) as usize;
self.with_section(section_pos, |section| section.get_at_index(index))
.unwrap_or_default()
}
pub fn is_block_solid(&self, pos: RelBlockPos) -> bool {
self.is_block_pos_solid(pos.apply(self.origin))
}
pub fn is_block_standable(&self, pos: RelBlockPos) -> bool {
self.is_block_pos_standable(pos.apply(self.origin))
}
fn is_block_pos_solid(&self, pos: BlockPos) -> bool {
self.check_bitset_for_block(pos, |bitsets, index| bitsets.solid.index(index))
}
fn is_block_pos_standable(&self, pos: BlockPos) -> bool {
self.check_bitset_for_block(pos, |bitsets, index| bitsets.standable.index(index))
}
pub fn cost_for_breaking_block(&self, pos: RelBlockPos, mining_cache: &MiningCache) -> f32 {
let cached_mining_costs = self.cached_mining_costs();
let hash_index = calculate_cached_mining_costs_index(pos);
let &(cached_pos, potential_cost) =
unsafe { cached_mining_costs.get_unchecked(hash_index) };
if cached_pos == pos {
return potential_cost;
}
let cost = self.uncached_cost_for_breaking_block(pos, mining_cache);
unsafe {
*cached_mining_costs.get_unchecked_mut(hash_index) = (pos, cost);
};
cost
}
#[allow(clippy::mut_from_ref)]
fn cached_mining_costs(&self) -> &mut [(RelBlockPos, f32)] {
let cached_mining_costs = unsafe { &mut *self.cached_mining_costs.get() };
if let Some(cached_mining_costs) = cached_mining_costs {
return cached_mining_costs;
}
*cached_mining_costs = Some(
vec![(RelBlockPos::new(i16::MAX, i32::MAX, i16::MAX), 0.); CACHED_MINING_COSTS_SIZE]
.into(),
);
cached_mining_costs.as_mut().unwrap()
}
fn uncached_cost_for_breaking_block(
&self,
pos: RelBlockPos,
mining_cache: &MiningCache,
) -> f32 {
if self.is_block_passable(pos) {
return 0.;
}
let rel_pos = pos;
let pos = pos.apply(self.origin);
let (section_pos, section_block_pos) = (
SmallChunkSectionPos::from(pos),
ChunkSectionBlockPos::from(pos),
);
let up_is_in_same_section = section_block_pos.y != 15;
let north_is_in_same_section = section_block_pos.z != 0;
let east_is_in_same_section = section_block_pos.x != 15;
let south_is_in_same_section = section_block_pos.z != 15;
let west_is_in_same_section = section_block_pos.x != 0;
let mut is_falling_block_above = false;
let Some(mut mining_cost) = self.with_section(section_pos, |section| {
let block_state = section.get_at_index(u16::from(section_block_pos) as usize);
let mining_cost = mining_cache.cost_for(block_state);
if mining_cost == f32::INFINITY {
return f32::INFINITY;
}
if up_is_in_same_section {
let up_block = section.get_at_index(u16::from(section_block_pos.up(1)) as usize);
if mining_cache.is_liquid(up_block) {
return f32::INFINITY;
}
if mining_cache.is_falling_block(up_block) {
is_falling_block_above = true;
}
}
if north_is_in_same_section {
let north_block =
section.get_at_index(u16::from(section_block_pos.north(1)) as usize);
if mining_cache.is_liquid(north_block) {
return f32::INFINITY;
}
}
if east_is_in_same_section {
let east_block =
section.get_at_index(u16::from(section_block_pos.east(1)) as usize);
if mining_cache.is_liquid(east_block) {
return f32::INFINITY;
}
}
if south_is_in_same_section {
let south_block =
section.get_at_index(u16::from(section_block_pos.south(1)) as usize);
if mining_cache.is_liquid(south_block) {
return f32::INFINITY;
}
}
if west_is_in_same_section {
let west_block =
section.get_at_index(u16::from(section_block_pos.west(1)) as usize);
if mining_cache.is_liquid(west_block) {
return f32::INFINITY;
}
}
mining_cost
}) else {
let cost = if self.is_block_pos_solid(pos) {
f32::INFINITY
} else {
0.
};
return cost;
};
if mining_cost == f32::INFINITY {
return f32::INFINITY;
}
fn check_should_avoid_this_block(
world: &CachedWorld,
pos: BlockPos,
check: impl FnOnce(BlockState) -> bool,
) -> bool {
let block_state = world
.with_section(SmallChunkSectionPos::from(pos), |section| {
section.get_at_index(u16::from(ChunkSectionBlockPos::from(pos)) as usize)
})
.unwrap_or_default();
check(block_state)
}
if !up_is_in_same_section
&& check_should_avoid_this_block(self, pos.up(1), |b| {
if mining_cache.is_falling_block(b) {
is_falling_block_above = true;
}
mining_cache.is_liquid(b)
})
{
return f32::INFINITY;
}
if !north_is_in_same_section
&& check_should_avoid_this_block(self, pos.north(1), |b| mining_cache.is_liquid(b))
{
return f32::INFINITY;
}
if !east_is_in_same_section
&& check_should_avoid_this_block(self, pos.east(1), |b| mining_cache.is_liquid(b))
{
return f32::INFINITY;
}
if !south_is_in_same_section
&& check_should_avoid_this_block(self, pos.south(1), |b| mining_cache.is_liquid(b))
{
return f32::INFINITY;
}
if !west_is_in_same_section
&& check_should_avoid_this_block(self, pos.west(1), |b| mining_cache.is_liquid(b))
{
return f32::INFINITY;
}
if is_falling_block_above {
mining_cost += self.cost_for_breaking_block(rel_pos.up(1), mining_cache);
}
mining_cost
}
pub fn is_passable(&self, pos: RelBlockPos) -> bool {
self.is_passable_at_block_pos(pos.apply(self.origin))
}
fn is_passable_at_block_pos(&self, pos: BlockPos) -> bool {
self.is_block_pos_passable(pos) && self.is_block_pos_passable(pos.up(1))
}
pub fn cost_for_passing(&self, pos: RelBlockPos, mining_cache: &MiningCache) -> f32 {
self.cost_for_breaking_block(pos, mining_cache)
+ self.cost_for_breaking_block(pos.up(1), mining_cache)
}
pub fn is_standable(&self, pos: RelBlockPos) -> bool {
self.is_standable_at_block_pos(pos.apply(self.origin))
}
fn is_standable_at_block_pos(&self, pos: BlockPos) -> bool {
self.is_block_pos_standable(pos.down(1)) && self.is_passable_at_block_pos(pos)
}
pub fn cost_for_standing(&self, pos: RelBlockPos, mining_cache: &MiningCache) -> f32 {
if !self.is_block_standable(pos.down(1)) {
return f32::INFINITY;
}
self.cost_for_passing(pos, mining_cache)
}
pub fn fall_distance(&self, pos: RelBlockPos) -> u32 {
let mut distance = 0;
let mut current_pos = pos.down(1);
while self.is_block_passable(current_pos) {
distance += 1;
current_pos = current_pos.down(1);
if current_pos.y < self.min_y {
return u32::MAX;
}
}
distance
}
pub fn origin(&self) -> BlockPos {
self.origin
}
}
const CACHED_MINING_COSTS_SIZE: usize = 2usize.pow(18);
fn calculate_cached_mining_costs_index(pos: RelBlockPos) -> usize {
const X_BITS: usize = 6;
const Y_BITS: usize = 6;
const Z_BITS: usize = 6;
const X_MASK: usize = (1 << X_BITS) - 1;
const Y_MASK: usize = (1 << Y_BITS) - 1;
const Z_MASK: usize = (1 << Z_BITS) - 1;
let hash_index = ((pos.x as usize & X_MASK) << (Y_BITS + Z_BITS))
| ((pos.z as usize & Z_MASK) << Y_BITS)
| (pos.y as usize & Y_MASK);
debug_assert!(hash_index < CACHED_MINING_COSTS_SIZE);
hash_index
}
pub fn is_block_state_passable(block_state: BlockState) -> bool {
if block_state.is_air() {
return true;
}
if !block_state.is_collision_shape_empty() {
return false;
}
let registry_block = BlockKind::from(block_state);
if registry_block == BlockKind::Water {
return false;
}
if block_state
.property::<azalea_block::properties::Waterlogged>()
.unwrap_or_default()
{
return false;
}
if registry_block == BlockKind::Lava {
return false;
}
if block_state == BlockKind::Seagrass.into() {
return false;
}
if registry_block == BlockKind::Fire || registry_block == BlockKind::SoulFire {
return false;
}
if registry_block == BlockKind::PowderSnow {
return false;
}
if registry_block == BlockKind::SweetBerryBush {
return false;
}
true
}
#[inline]
pub fn is_block_state_solid(block_state: BlockState) -> bool {
if block_state.is_air() {
return false;
}
if block_state.is_collision_shape_full() {
if block_state == BlockState::from(BlockKind::MagmaBlock) {
return false;
};
return true;
}
if matches!(
block_state.property::<properties::SlabKind>(),
Some(properties::SlabKind::Top | properties::SlabKind::Double)
) {
return true;
}
let block = BlockKind::from(block_state);
if matches!(block, BlockKind::DirtPath | BlockKind::Farmland) {
return true;
}
false
}
pub fn is_block_state_standable(block_state: BlockState) -> bool {
if block_state.is_air() {
return false;
}
if is_block_state_solid(block_state) {
return true;
}
if block_state.property::<SlabKind>().is_some()
|| block_state.property::<StairShape>().is_some()
{
return true;
}
false
}
pub fn is_block_state_water(block_state: BlockState) -> bool {
block_state == BlockState::from(BlockKind::Water)
}
#[cfg(test)]
mod tests {
use azalea_world::{Chunk, ChunkStorage, PartialWorld};
use super::*;
#[test]
fn test_is_passable() {
let mut partial_world = PartialWorld::default();
let mut world = ChunkStorage::default();
partial_world
.chunks
.set(&ChunkPos { x: 0, z: 0 }, Some(Chunk::default()), &mut world);
partial_world.chunks.set_block_state(
BlockPos::new(0, 0, 0),
BlockKind::Stone.into(),
&world,
);
partial_world
.chunks
.set_block_state(BlockPos::new(0, 1, 0), BlockState::AIR, &world);
let ctx = CachedWorld::new(Arc::new(RwLock::new(world.into())), BlockPos::default());
assert!(!ctx.is_block_pos_passable(BlockPos::new(0, 0, 0)));
assert!(ctx.is_block_pos_passable(BlockPos::new(0, 1, 0)));
}
#[test]
fn test_is_solid() {
let mut partial_world = PartialWorld::default();
let mut world = ChunkStorage::default();
partial_world
.chunks
.set(&ChunkPos { x: 0, z: 0 }, Some(Chunk::default()), &mut world);
partial_world.chunks.set_block_state(
BlockPos::new(0, 0, 0),
BlockKind::Stone.into(),
&world,
);
partial_world
.chunks
.set_block_state(BlockPos::new(0, 1, 0), BlockState::AIR, &world);
let ctx = CachedWorld::new(Arc::new(RwLock::new(world.into())), BlockPos::default());
assert!(ctx.is_block_pos_solid(BlockPos::new(0, 0, 0)));
assert!(!ctx.is_block_pos_solid(BlockPos::new(0, 1, 0)));
}
#[test]
fn test_is_standable() {
let mut partial_world = PartialWorld::default();
let mut world = ChunkStorage::default();
partial_world
.chunks
.set(&ChunkPos { x: 0, z: 0 }, Some(Chunk::default()), &mut world);
partial_world.chunks.set_block_state(
BlockPos::new(0, 0, 0),
BlockKind::Stone.into(),
&world,
);
partial_world
.chunks
.set_block_state(BlockPos::new(0, 1, 0), BlockState::AIR, &world);
partial_world
.chunks
.set_block_state(BlockPos::new(0, 2, 0), BlockState::AIR, &world);
partial_world
.chunks
.set_block_state(BlockPos::new(0, 3, 0), BlockState::AIR, &world);
let ctx = CachedWorld::new(Arc::new(RwLock::new(world.into())), BlockPos::default());
assert!(ctx.is_standable_at_block_pos(BlockPos::new(0, 1, 0)));
assert!(!ctx.is_standable_at_block_pos(BlockPos::new(0, 0, 0)));
assert!(!ctx.is_standable_at_block_pos(BlockPos::new(0, 2, 0)));
}
}