1use crate::file::UserBuffer;
2use alloc::sync::{Arc, Weak};
3use spin::Mutex;
4
5const RING_BUFFER_SIZE: usize = 32;
10
11#[derive(Copy, Clone, PartialEq)]
13enum RingBufferStatus {
14 Full,
16 Empty,
18 Normal,
20}
21
22pub struct PipeRingBuffer {
24 arr: [u8; RING_BUFFER_SIZE],
25 head: usize,
26 tail: usize,
27 status: RingBufferStatus,
28 write_end: Option<Weak<PipeWriter>>,
29}
30
31impl PipeRingBuffer {
32 pub fn new() -> Self {
34 Self {
35 arr: [0; RING_BUFFER_SIZE],
36 head: 0,
37 tail: 0,
38 status: RingBufferStatus::Empty,
39 write_end: None,
40 }
41 }
42
43 fn set_write_end(&mut self, write_end: &Arc<PipeWriter>) {
45 self.write_end = Some(Arc::downgrade(write_end));
46 }
47
48 fn write_byte(&mut self, byte: u8) {
50 self.status = RingBufferStatus::Normal;
51 self.arr[self.tail] = byte;
52 self.tail = (self.tail + 1) % RING_BUFFER_SIZE;
53 if self.tail == self.head {
54 self.status = RingBufferStatus::Full;
55 }
56 }
57
58 fn read_byte(&mut self) -> u8 {
60 self.status = RingBufferStatus::Normal;
61 let c = self.arr[self.head];
62 self.head = (self.head + 1) % RING_BUFFER_SIZE;
63 if self.head == self.tail {
64 self.status = RingBufferStatus::Empty;
65 }
66 c
67 }
68
69 fn available_read(&self) -> usize {
71 if self.status == RingBufferStatus::Empty {
73 0
74 } else if self.tail > self.head {
75 self.tail - self.head
76 } else {
77 self.tail + RING_BUFFER_SIZE - self.head
78 }
79 }
80
81 fn available_write(&self) -> usize {
83 if self.status == RingBufferStatus::Full {
84 0
85 } else {
86 RING_BUFFER_SIZE - self.available_read()
87 }
88 }
89
90 fn all_write_ends_closed(&self) -> bool {
92 self.write_end.as_ref().unwrap().upgrade().is_none()
94 }
95}
96
97#[derive(Clone)]
99pub struct PipeReader {
100 buffer: Arc<Mutex<PipeRingBuffer>>,
101}
102
103pub struct PipeWriter {
105 buffer: Arc<Mutex<PipeRingBuffer>>,
106}
107
108impl PipeReader {
109 pub fn read(&self, buf: UserBuffer) -> isize {
116 let want_to_read = buf.len();
117 let mut buf_iter = buf.into_iter();
118 let mut already_read = 0usize;
119 let mut ring_buffer = self.buffer.lock();
120 let loop_read = ring_buffer.available_read();
121 if loop_read == 0 {
122 if ring_buffer.all_write_ends_closed() {
124 return 0; }
126 return -2; }
128 for _ in 0..loop_read {
130 if let Some(byte_ref) = buf_iter.next() {
131 unsafe {
132 *byte_ref = ring_buffer.read_byte();
133 }
134 already_read += 1;
135 if already_read == want_to_read {
136 return want_to_read as _;
137 }
138 } else {
139 return already_read as _;
140 }
141 }
142 already_read as _
144 }
145}
146
147impl PipeWriter {
148 pub fn write(&self, buf: UserBuffer) -> isize {
154 let want_to_write = buf.len();
155 let mut buf_iter = buf.into_iter();
156 let mut already_write = 0usize;
157 let mut ring_buffer = self.buffer.lock();
158 let loop_write = ring_buffer.available_write();
159 if loop_write == 0 {
160 return -2; }
162 for _ in 0..loop_write {
164 if let Some(byte_ref) = buf_iter.next() {
165 ring_buffer.write_byte(unsafe { *byte_ref });
166 already_write += 1;
167 if already_write == want_to_write {
168 return want_to_write as _;
169 }
170 } else {
171 return already_write as _;
172 }
173 }
174 already_write as _
176 }
177}
178
179pub fn make_pipe() -> (PipeReader, Arc<PipeWriter>) {
181 let buffer = Arc::new(Mutex::new(PipeRingBuffer::new()));
183 let read_end = PipeReader {
184 buffer: buffer.clone(),
185 };
186 let write_end = Arc::new(PipeWriter {
187 buffer: buffer.clone(),
188 });
189 buffer.lock().set_write_end(&write_end);
190 (read_end, write_end)
191}