pipe_it/ext.rs
1use crate::{Context, handler::Handler, Pipeline};
2use std::future::Future;
3use std::marker::PhantomData;
4
5/// Extension trait for any type that implements the Handler trait.
6/// Provides a way to convert a Handler into a Pipeline and combinators.
7pub trait HandlerExt<I, O, Args>: Handler<I, O, Args> {
8 fn pipe(self) -> Pipe<Self, Args>
9 where
10 Self: Sized,
11 {
12 Pipe {
13 p: self,
14 _marker: PhantomData,
15 }
16 }
17
18 /// Connects two pipelines together. Output of the first becomes the input of the second.
19 ///
20 /// # Example
21 ///
22 /// ```rust
23 /// use pipe_it::{Context, Pipeline, Input, ext::HandlerExt};
24 ///
25 /// async fn add_one(n: Input<i32>) -> i32 { *n + 1 }
26 /// async fn to_string(n: Input<i32>) -> String { n.to_string() }
27 ///
28 /// # #[tokio::main]
29 /// # async fn main() {
30 /// let pipe = add_one.pipe().connect(to_string);
31 /// let result = pipe.apply(Context::empty(1)).await;
32 /// assert_eq!(result, "2");
33 /// # }
34 /// ```
35 fn connect<O2, G, Args2>(self, g: G) -> Connect<Pipe<Self, Args>, Pipe<G, Args2>, I, O, O2>
36 where
37 G: Handler<O, O2, Args2>,
38 Self: Sized,
39 {
40 Connect {
41 f: self.pipe(),
42 g: g.pipe(),
43 _marker: PhantomData,
44 }
45 }
46
47 /// Pulls back the domain of the pipeline.
48 /// This allows a pipeline defined on `I` to be used for input `I2` given a mapping `I2 -> I`.
49 /// The mapping function is now a full Handler (async, supports DI).
50 ///
51 /// # Example
52 ///
53 /// ```rust
54 /// use pipe_it::{Context, Pipeline, Input, ext::HandlerExt};
55 ///
56 /// #[derive(Clone)]
57 /// struct User { age: i32 }
58 ///
59 /// // Pre-processing step: extract age from User.
60 /// // Supports DI and ownership extraction.
61 /// async fn get_age(user: Input<User>) -> i32 {
62 /// // We can try_unwrap to get ownership if needed (optimization)
63 /// user.try_unwrap().map(|u| u.age).unwrap_or_else(|u| u.age)
64 /// }
65 ///
66 /// async fn check_adult(age: Input<i32>) -> bool { *age >= 18 }
67 ///
68 /// # #[tokio::main]
69 /// # async fn main() {
70 /// // Input: User -> (get_age) -> i32 -> (check_adult) -> bool
71 /// let pipe = check_adult.pipe().pullback(get_age);
72 /// let result = pipe.apply(Context::empty(User { age: 20 })).await;
73 /// assert_eq!(result, true);
74 /// # }
75 /// ```
76 fn pullback<I2, H, Args2>(self, handler: H) -> Pullback<Pipe<Self, Args>, Pipe<H, Args2>, I2, I, O>
77 where
78 H: Handler<I2, I, Args2>,
79 Self: Sized,
80 {
81 Pullback {
82 p: self.pipe(),
83 map: handler.pipe(),
84 _marker: PhantomData,
85 }
86 }
87
88 /// Lifts the domain requirement to anything that can be converted into the original input.
89 /// Uses `From` / `Into` trait.
90 ///
91 /// # Example
92 ///
93 /// ```rust
94 /// use pipe_it::{Context, Pipeline, Input, ext::HandlerExt};
95 ///
96 /// async fn process_string(s: Input<String>) -> usize { s.len() }
97 ///
98 /// # #[tokio::main]
99 /// # async fn main() {
100 /// // Accepts &str because String implements From<&str>
101 /// let pipe = process_string.pipe().lift::<&str>();
102 /// let result = pipe.apply(Context::empty("hello")).await;
103 /// assert_eq!(result, 5);
104 /// # }
105 /// ```
106 fn lift<I2>(self) -> Pullback<Pipe<Self, Args>, Pipe<LiftHandler<I, I2>, (crate::Input<I2>,)>, I2, I, O>
107 where
108 I: From<I2> + Send + Sync + 'static,
109 I2: Clone + Send + Sync + 'static,
110 Self: Sized,
111 {
112 self.pullback(LiftHandler(PhantomData))
113 }
114
115 /// Extends the output of the pipeline by applying a transformation.
116 ///
117 /// # Example
118 ///
119 /// ```rust
120 /// use pipe_it::{Context, Pipeline, Input, ext::HandlerExt};
121 ///
122 /// async fn compute(n: Input<i32>) -> i32 { *n * 2 }
123 ///
124 /// # #[tokio::main]
125 /// # async fn main() {
126 /// // Changes output from i32 to String
127 /// let pipe = compute.pipe().extend(|n| format!("Result: {}", n));
128 /// let result = pipe.apply(Context::empty(10)).await;
129 /// assert_eq!(result, "Result: 20");
130 /// # }
131 /// ```
132 fn extend<O2, F>(self, map: F) -> Extend<Pipe<Self, Args>, F, I, O, O2>
133 where
134 F: Fn(O) -> O2 + Send + Sync + 'static,
135 Self: Sized,
136 {
137 Extend {
138 p: self.pipe(),
139 map,
140 _marker: PhantomData,
141 }
142 }
143
144 /// Repeats the pipeline operation n times.
145 /// Input and output types must be the same.
146 ///
147 /// # Example
148 ///
149 /// ```rust
150 /// use pipe_it::{Context, Pipeline, Input, ext::HandlerExt};
151 ///
152 /// async fn add_one(n: Input<i32>) -> i32 { *n + 1 }
153 ///
154 /// # #[tokio::main]
155 /// # async fn main() {
156 /// // Input: 0 -> 1 -> 2 -> 3
157 /// let pipe = add_one.pipe().repeat(3);
158 /// let result = pipe.apply(Context::empty(0)).await;
159 /// assert_eq!(result, 3);
160 /// # }
161 /// ```
162 fn repeat(self, times: usize) -> Repeat<Pipe<Self, Args>, I>
163 where
164 Self: Handler<I, I, Args> + Sized,
165 I: Clone + Send + Sync + 'static,
166 {
167 Repeat {
168 p: self.pipe(),
169 times,
170 _marker: PhantomData,
171 }
172 }
173
174 /// Caches the results of the pipeline using an LRU strategy.
175 /// Requires the input to implement `Hash + Eq + Clone` and output to implement `Clone`.
176 ///
177 /// # Example
178 ///
179 /// ```rust
180 /// use pipe_it::{Context, Pipeline, Input, ext::HandlerExt};
181 /// use std::sync::atomic::{AtomicUsize, Ordering};
182 /// use std::sync::Arc;
183 ///
184 /// static CALL_COUNT: AtomicUsize = AtomicUsize::new(0);
185 ///
186 /// async fn heavy_calc(n: Input<i32>) -> i32 {
187 /// CALL_COUNT.fetch_add(1, Ordering::SeqCst);
188 /// *n * *n
189 /// }
190 ///
191 /// # #[tokio::main]
192 /// # async fn main() {
193 /// let pipe = heavy_calc.pipe().cache(100);
194 /// let result1 = pipe.apply(Context::empty(10)).await;
195 /// assert_eq!(result1, 100);
196 /// let result2 = pipe.apply(Context::empty(10)).await; // Should hit cache
197 /// assert_eq!(result2, 100);
198 /// assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1);
199 /// # }
200 /// ```
201 #[cfg(feature = "cache")]
202 fn cache(self, capacity: usize) -> Cache<Pipe<Self, Args>, I, O>
203 where
204 Self: Sized,
205 I: std::hash::Hash + Eq + Clone + Send + Sync + 'static,
206 O: Clone + Send + Sync + 'static,
207 {
208 Cache {
209 p: self.pipe(),
210 cache: quick_cache::sync::Cache::new(capacity),
211 }
212 }
213
214 /// Maps the pipeline over a collection of inputs concurrently.
215 /// Input for the new pipeline will be `Vec<I>` and output will be `Vec<O>`.
216 ///
217 /// # Example
218 ///
219 /// ```rust
220 /// use pipe_it::{Context, Pipeline, Input, ext::HandlerExt};
221 ///
222 /// async fn process_item(n: Input<i32>) -> String { n.to_string() }
223 ///
224 /// # #[tokio::main]
225 /// # async fn main() {
226 /// // Input: Vec<i32> -> Output: Vec<String>
227 /// let pipe = process_item.pipe().map();
228 /// let result = pipe.apply(Context::empty(vec![1, 2, 3])).await;
229 /// assert_eq!(result, vec!["1", "2", "3"]);
230 /// # }
231 /// ```
232 fn map(self) -> crate::concurrency::Map<Pipe<Self, Args>, I, O>
233 where
234 Self: Sized,
235 {
236 crate::concurrency::Map {
237 p: self.pipe(),
238 _marker: PhantomData,
239 }
240 }
241}
242
243impl<F, I, O, Args> HandlerExt<I, O, Args> for F where F: Handler<I, O, Args> {}
244
245/// Wrapper struct to adapt a Handler into a Pipeline.
246#[derive(Clone, Copy)]
247pub struct Pipe<P, Args> {
248 p: P,
249 _marker: PhantomData<Args>,
250}
251
252impl<P, Args, I, O> Pipeline<I, O> for Pipe<P, Args>
253where
254 P: Handler<I, O, Args>,
255 I: Clone + Send + Sync + 'static,
256 O: Send + 'static,
257 Args: Send + Sync + 'static,
258{
259 fn apply(&self, ctx: Context<I>) -> impl Future<Output = O> + Send {
260 self.p.call(ctx)
261 }
262}
263
264// PipelineExt removed to avoid ambiguity with HandlerExt
265// pub trait PipelineExt<I, O>: Pipeline<I, O> { ... }
266// impl<P, I, O> PipelineExt<I, O> for P where P: Pipeline<I, O> {}
267
268// --- Connect ---
269
270#[derive(Clone, Copy)]
271pub struct Connect<F, G, I, O1, O2> {
272 f: F,
273 g: G,
274 _marker: PhantomData<(I, O1, O2)>,
275}
276
277impl<F, G, I, O1, O2> Pipeline<I, O2> for Connect<F, G, I, O1, O2>
278where
279 F: Pipeline<I, O1>,
280 G: Pipeline<O1, O2>,
281 I: Clone + Send + Sync + 'static,
282 O1: Send + Sync + 'static,
283 O2: Send + Sync + 'static,
284{
285 fn apply(&self, ctx: Context<I>) -> impl Future<Output = O2> + Send {
286 async move {
287 // Optimized to release input ownership for the second stage
288 let (input, shared) = ctx.into_parts();
289 let ctx_f = Context::from_parts(input, shared.clone());
290 let res = self.f.apply(ctx_f).await;
291 self.g.apply(Context::from_parts(std::sync::Arc::new(res), shared)).await
292 }
293 }
294}
295
296// --- Pullback ---
297
298pub struct Pullback<P, H, I2, I, O> {
299 p: P,
300 map: H,
301 _marker: PhantomData<(I2, I, O)>,
302}
303
304impl<P, H, I2, I, O> Pipeline<I2, O> for Pullback<P, H, I2, I, O>
305where
306 P: Pipeline<I, O>,
307 H: Pipeline<I2, I>,
308 I2: Clone + Send + Sync + 'static,
309 I: Send + Sync + 'static,
310 O: Send + Sync + 'static,
311{
312 fn apply(&self, ctx: Context<I2>) -> impl Future<Output = O> + Send {
313 async move {
314 // Effectively H.connect(P)
315 let (input, shared) = ctx.into_parts();
316 // Run Mapper (H) on I2
317 let ctx_h = Context::from_parts(input, shared.clone());
318 let mapped_input = self.map.apply(ctx_h).await;
319 // Run P on mapped input I
320 self.p.apply(Context::from_parts(std::sync::Arc::new(mapped_input), shared)).await
321 }
322 }
323}
324
325/// Helper handler for Lift operation
326pub struct LiftHandler<I, I2>(PhantomData<(I, I2)>);
327
328impl<I, I2> Handler<I2, I, (crate::Input<I2>,)> for LiftHandler<I, I2>
329where
330 I: From<I2> + Send + Sync + 'static,
331 I2: Clone + Send + Sync + 'static,
332{
333 fn call(&self, ctx: Context<I2>) -> impl Future<Output = I> + Send {
334 async move {
335 let (input, _) = ctx.into_parts();
336 let val = match crate::Input::<I2>(input).try_unwrap() {
337 Ok(v) => v,
338 Err(arc) => (*arc).clone(),
339 };
340 val.into()
341 }
342 }
343}
344
345// --- Extend ---
346
347pub struct Extend<P, F, I, O1, O2> {
348 p: P,
349 map: F,
350 _marker: PhantomData<(I, O1, O2)>,
351}
352
353impl<P, F, I, O1, O2> Pipeline<I, O2> for Extend<P, F, I, O1, O2>
354where
355 P: Pipeline<I, O1>,
356 F: Fn(O1) -> O2 + Send + Sync + 'static,
357 I: Clone + Send + Sync + 'static,
358 O1: Send + Sync + 'static,
359 O2: Send + Sync + 'static,
360{
361 fn apply(&self, ctx: Context<I>) -> impl Future<Output = O2> + Send {
362 async move {
363 let res = self.p.apply(ctx).await;
364 (self.map)(res)
365 }
366 }
367}
368
369// --- Repeat ---
370
371pub struct Repeat<P, I> {
372 p: P,
373 times: usize,
374 _marker: PhantomData<I>,
375}
376
377impl<P, I> Pipeline<I, I> for Repeat<P, I>
378where
379 P: Pipeline<I, I>,
380 I: Clone + Send + Sync + 'static,
381{
382 fn apply(&self, ctx: Context<I>) -> impl Future<Output = I> + Send {
383 async move {
384 let (input_arc, shared) = ctx.into_parts();
385 let mut val = match std::sync::Arc::try_unwrap(input_arc) {
386 Ok(v) => v,
387 Err(arc) => (*arc).clone(),
388 };
389
390 for _ in 0..self.times {
391 let iter_ctx = Context::from_parts(std::sync::Arc::new(val), shared.clone());
392 val = self.p.apply(iter_ctx).await;
393 }
394 val
395 }
396 }
397}
398
399// --- Cache ---
400#[cfg(feature = "cache")]
401pub struct Cache<P, I, O> {
402 p: P,
403 cache: quick_cache::sync::Cache<I, O>,
404}
405#[cfg(feature = "cache")]
406impl<P, I, O> Pipeline<I, O> for Cache<P, I, O>
407where
408 P: Pipeline<I, O>,
409 I: std::hash::Hash + Eq + Clone + Send + Sync + 'static,
410 O: Clone + Send + Sync + 'static,
411{
412 fn apply(&self, ctx: Context<I>) -> impl Future<Output = O> + Send {
413 async move {
414 let input_arc = ctx.input();
415 if let Some(val) = self.cache.get(&*input_arc) {
416 return val;
417 }
418
419 let res = self.p.apply(ctx).await;
420 self.cache.insert((*input_arc).clone(), res.clone());
421 res
422 }
423 }
424}
425
426impl<P, I, O> Handler<I, O, ()> for P
427where
428 P: Pipeline<I, O>,
429 I: Clone + Send + Sync + 'static,
430 O: Send + 'static,
431{
432 fn call(&self, ctx: Context<I>) -> impl Future<Output = O> + Send {
433 self.apply(ctx)
434 }
435}