use crate::cache::ShapeKey;
use crate::ShapedTextResult as BaseShapedTextResult;
use astrelis_core::alloc::HashMap;
use std::sync::Arc;
pub type RequestId = u64;
#[derive(Debug, Clone)]
pub struct TextShapeRequest {
pub id: RequestId,
pub text: String,
pub font_id: u32,
pub font_size: f32,
pub wrap_width: Option<f32>,
}
impl TextShapeRequest {
pub fn new(
id: RequestId,
text: String,
font_id: u32,
font_size: f32,
wrap_width: Option<f32>,
) -> Self {
Self {
id,
text,
font_id,
font_size,
wrap_width,
}
}
pub fn shape_key(&self) -> ShapeKey {
ShapeKey::new(
self.font_id,
self.font_size,
self.text.as_str(),
self.wrap_width,
)
}
}
#[derive(Debug, Clone)]
pub struct ShapedTextResult {
pub request_id: RequestId,
pub inner: BaseShapedTextResult,
pub render_count: u64,
}
impl ShapedTextResult {
pub fn new(request_id: RequestId, inner: BaseShapedTextResult) -> Self {
Self {
request_id,
inner,
render_count: 0,
}
}
pub fn bounds(&self) -> (f32, f32) {
self.inner.bounds
}
pub fn increment_render_count(&mut self) {
self.render_count = self.render_count.saturating_add(1);
}
}
pub trait TextShaper: Send + Sync {
fn shape(&mut self, request: TextShapeRequest) -> ShapedTextResult;
}
pub struct SyncTextShaper;
impl Default for SyncTextShaper {
fn default() -> Self {
Self::new()
}
}
impl SyncTextShaper {
pub fn new() -> Self {
Self { }
}
pub fn shape_with_measurer<F>(request: &TextShapeRequest, shape_fn: F) -> ShapedTextResult
where
F: FnOnce(&str, f32, Option<f32>) -> BaseShapedTextResult,
{
let inner = shape_fn(&request.text, request.font_size, request.wrap_width);
ShapedTextResult::new(request.id, inner)
}
}
pub struct TextPipeline {
pending: HashMap<RequestId, TextShapeRequest>,
completed: HashMap<RequestId, Arc<ShapedTextResult>>,
next_request_id: RequestId,
cache: HashMap<ShapeKey, Arc<ShapedTextResult>>,
pub cache_hits: u64,
pub cache_misses: u64,
pub total_requests: u64,
}
impl TextPipeline {
pub fn new() -> Self {
Self {
pending: HashMap::with_capacity(64),
completed: HashMap::with_capacity(64),
next_request_id: 1,
cache: HashMap::with_capacity(256),
cache_hits: 0,
cache_misses: 0,
total_requests: 0,
}
}
pub fn request_shape(
&mut self,
text: String,
font_id: u32,
font_size: f32,
wrap_width: Option<f32>,
) -> RequestId {
self.total_requests += 1;
let request_id = self.next_request_id;
self.next_request_id += 1;
let request = TextShapeRequest::new(request_id, text, font_id, font_size, wrap_width);
let shape_key = request.shape_key();
if let Some(cached) = self.cache.get(&shape_key).cloned() {
self.cache_hits += 1;
self.completed.insert(request_id, cached);
} else {
self.cache_misses += 1;
self.pending.insert(request_id, request);
}
request_id
}
pub fn process_pending<F>(&mut self, shape_fn: F)
where
F: Fn(&str, f32, Option<f32>) -> BaseShapedTextResult,
{
if self.pending.is_empty() {
return;
}
let mut completed_requests = Vec::new();
for (_request_id, request) in self.pending.drain() {
let result = SyncTextShaper::shape_with_measurer(&request, &shape_fn);
let result_arc = Arc::new(result);
let shape_key = request.shape_key();
self.cache.insert(shape_key, result_arc.clone());
completed_requests.push((request.id, result_arc));
}
for (request_id, result) in completed_requests {
self.completed.insert(request_id, result);
}
}
pub fn take_completed(&mut self, request_id: RequestId) -> Option<Arc<ShapedTextResult>> {
self.completed.remove(&request_id)
}
pub fn get_completed(&self, request_id: RequestId) -> Option<Arc<ShapedTextResult>> {
self.completed.get(&request_id).cloned()
}
pub fn is_pending(&self, request_id: RequestId) -> bool {
self.pending.contains_key(&request_id)
}
pub fn cache_stats(&self) -> (u64, u64, usize) {
(self.cache_hits, self.cache_misses, self.cache.len())
}
pub fn cache_hit_rate(&self) -> f32 {
if self.total_requests == 0 {
return 0.0;
}
(self.cache_hits as f32 / self.total_requests as f32) * 100.0
}
pub fn clear_cache(&mut self) {
self.cache.clear();
self.cache_hits = 0;
self.cache_misses = 0;
}
pub fn prune_cache(&mut self, min_render_count: u64) {
self.cache.retain(|_, result| {
Arc::strong_count(result) > 1 || result.render_count >= min_render_count
});
}
}
impl Default for TextPipeline {
fn default() -> Self {
Self::new()
}
}
#[cfg(test)]
mod tests {
use super::*;
fn mock_shape(_text: &str, _font_size: f32, _wrap_width: Option<f32>) -> BaseShapedTextResult {
BaseShapedTextResult::new((100.0, 20.0), Vec::new())
}
#[test]
fn test_request_and_process() {
let mut pipeline = TextPipeline::new();
let req_id = pipeline.request_shape("Hello".to_string(), 0, 16.0, None);
assert!(pipeline.is_pending(req_id));
pipeline.process_pending(mock_shape);
assert!(!pipeline.is_pending(req_id));
let result = pipeline.take_completed(req_id);
assert!(result.is_some());
assert_eq!(result.unwrap().bounds(), (100.0, 20.0));
}
#[test]
fn test_cache_hit() {
let mut pipeline = TextPipeline::new();
let req_id1 = pipeline.request_shape("Hello".to_string(), 0, 16.0, None);
pipeline.process_pending(mock_shape);
let _ = pipeline.take_completed(req_id1);
assert_eq!(pipeline.cache_hits, 0);
assert_eq!(pipeline.cache_misses, 1);
let req_id2 = pipeline.request_shape("Hello".to_string(), 0, 16.0, None);
assert_eq!(pipeline.cache_hits, 1);
assert_eq!(pipeline.cache_misses, 1);
assert!(!pipeline.is_pending(req_id2));
let result = pipeline.take_completed(req_id2);
assert!(result.is_some());
}
#[test]
fn test_content_invalidation() {
let mut pipeline = TextPipeline::new();
let req_id1 = pipeline.request_shape("Hello".to_string(), 0, 16.0, None);
pipeline.process_pending(mock_shape);
let _ = pipeline.take_completed(req_id1);
assert_eq!(pipeline.cache_misses, 1);
let req_id2 = pipeline.request_shape("Hello World".to_string(), 0, 16.0, None);
assert_eq!(pipeline.cache_misses, 2);
assert!(pipeline.is_pending(req_id2));
}
#[test]
fn test_width_bucketing() {
let mut pipeline = TextPipeline::new();
let req_id1 = pipeline.request_shape("Hello".to_string(), 0, 16.0, Some(402.0));
pipeline.process_pending(mock_shape);
let _ = pipeline.take_completed(req_id1);
let _req_id2 = pipeline.request_shape("Hello".to_string(), 0, 16.0, Some(404.0));
assert_eq!(
pipeline.cache_hits, 1,
"Width bucketing should allow cache hit"
);
}
#[test]
fn test_cache_prune() {
let mut pipeline = TextPipeline::new();
for i in 0..5 {
let req_id = pipeline.request_shape(format!("Text {}", i), 0, 16.0, None);
pipeline.process_pending(mock_shape);
let _ = pipeline.take_completed(req_id);
}
assert_eq!(pipeline.cache.len(), 5);
pipeline.prune_cache(10);
assert_eq!(pipeline.cache.len(), 0, "All entries should be pruned");
}
#[test]
fn test_hit_rate_calculation() {
let mut pipeline = TextPipeline::new();
let req_id = pipeline.request_shape("A".to_string(), 0, 16.0, None);
pipeline.process_pending(mock_shape);
let _ = pipeline.take_completed(req_id);
assert_eq!(pipeline.cache_hit_rate(), 0.0);
let req_id2 = pipeline.request_shape("A".to_string(), 0, 16.0, None);
let _ = pipeline.take_completed(req_id2);
assert_eq!(pipeline.cache_hit_rate(), 50.0);
}
}