1use std::collections::HashMap;
5use std::fs::File;
6use std::os::raw::c_void;
7use std::path::{Path, PathBuf};
8use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
9use std::sync::{Arc, OnceLock, RwLock};
10use std::thread;
11
12use crate::{binder::*, error::*, proxy::*, sys::binder, thread_state};
13
14#[derive(Debug, Clone, Copy)]
15pub enum CallRestriction {
16 None,
18 ErrorIfNotOneway,
20 FatalIfNotOneway,
22}
23
24const DEFAULT_MAX_BINDER_THREADS: u32 = 15;
25const DEFAULT_ENABLE_ONEWAY_SPAM_DETECTION: u32 = 1;
26
27struct MemoryMap {
28 ptr: *mut c_void,
29 size: usize,
30}
31unsafe impl Sync for MemoryMap {}
32unsafe impl Send for MemoryMap {}
33
34pub struct ProcessState {
35 max_threads: u32,
36 driver_name: PathBuf,
37 driver: Arc<File>,
38 mmap: RwLock<MemoryMap>,
39 context_manager: RwLock<Option<SIBinder>>,
40 handle_to_proxy: RwLock<HashMap<u32, WIBinder>>,
41 disable_background_scheduling: AtomicBool,
42 call_restriction: RwLock<CallRestriction>,
43 thread_pool_started: AtomicBool,
44 thread_pool_seq: AtomicUsize,
45 kernel_started_threads: AtomicUsize,
46 pub(crate) current_threads: AtomicUsize,
47}
48
49impl ProcessState {
50 fn instance() -> &'static OnceLock<ProcessState> {
51 static INSTANCE: OnceLock<ProcessState> = OnceLock::new();
52 &INSTANCE
53 }
54
55 pub fn as_self() -> &'static ProcessState {
59 Self::instance()
60 .get()
61 .expect("ProcessState is not initialized!")
62 }
63
64 pub fn set_call_restriction(&self, call_restriction: CallRestriction) {
65 let mut self_call_restriction = self
66 .call_restriction
67 .write()
68 .expect("Call restriction lock poisoned");
69 *self_call_restriction = call_restriction;
70 }
71
72 pub(crate) fn call_restriction(&self) -> CallRestriction {
73 *self
74 .call_restriction
75 .read()
76 .expect("Call restriction lock poisoned")
77 }
78
79 fn inner_init(
80 driver_name: &str,
81 max_threads: u32,
82 ) -> std::result::Result<ProcessState, Box<dyn std::error::Error>> {
83 let max_threads = if max_threads != 0 && max_threads < DEFAULT_MAX_BINDER_THREADS {
84 max_threads
85 } else {
86 DEFAULT_MAX_BINDER_THREADS
87 };
88
89 let driver_name = PathBuf::from(driver_name);
90
91 let driver = open_driver(&driver_name, max_threads)?;
92
93 let vm_size = (1024 * 1024) - rustix::param::page_size() * 2;
94 let mmap = unsafe {
97 let vm_start = rustix::mm::mmap(
105 std::ptr::null_mut(),
106 vm_size,
107 rustix::mm::ProtFlags::READ,
108 rustix::mm::MapFlags::PRIVATE | rustix::mm::MapFlags::NORESERVE,
109 &driver,
110 0,
111 )?;
112
113 (vm_start, vm_size)
114 };
115
116 Ok(ProcessState {
117 max_threads,
118 driver_name,
119 driver: driver.into(),
120 mmap: RwLock::new(MemoryMap {
121 ptr: mmap.0,
122 size: mmap.1,
123 }),
124 context_manager: RwLock::new(None),
125 handle_to_proxy: RwLock::new(HashMap::new()),
126 disable_background_scheduling: AtomicBool::new(false),
127 call_restriction: RwLock::new(CallRestriction::None),
128 thread_pool_started: AtomicBool::new(false),
129 thread_pool_seq: AtomicUsize::new(1),
130 kernel_started_threads: AtomicUsize::new(0),
131 current_threads: AtomicUsize::new(0),
132 })
133 }
134
135 pub fn init(driver_name: &str, max_threads: u32) -> &'static ProcessState {
139 Self::instance().get_or_init(|| match Self::inner_init(driver_name, max_threads) {
142 Ok(instance) => instance,
143 Err(e) => {
144 panic!("Error in init(): {e}");
145 }
146 })
147 }
148
149 pub fn init_default() -> &'static ProcessState {
153 Self::init(crate::DEFAULT_BINDER_PATH, 0)
154 }
155
156 pub fn become_context_manager(
158 &self,
159 binder: SIBinder,
160 ) -> std::result::Result<(), Box<dyn std::error::Error>> {
161 let mut context_manager = self
162 .context_manager
163 .write()
164 .expect("Context manager lock poisoned");
165
166 if context_manager.is_none() {
167 let obj = binder::flat_binder_object::new_binder_with_flags(
168 binder::FLAT_BINDER_FLAG_ACCEPTS_FDS,
169 );
170
171 if binder::set_context_mgr_ext(&self.driver, obj).is_err() {
172 if let Err(e) = binder::set_context_mgr(&self.driver, 0) {
175 return Err(
176 format!("Binder ioctl to become context manager failed: {e}").into(),
177 );
178 }
179 }
180 *context_manager = Some(binder);
181 }
182
183 Ok(())
184 }
185
186 pub(crate) fn context_manager(&self) -> Option<SIBinder> {
187 self.context_manager
188 .read()
189 .expect("Context manager lock poisoned")
190 .clone()
191 }
192
193 pub fn context_object(&self) -> Result<SIBinder> {
195 self.strong_proxy_for_handle(0)
196 }
197
198 pub fn strong_proxy_for_handle(&self, handle: u32) -> Result<SIBinder> {
201 self.strong_proxy_for_handle_stability(handle, Default::default())
202 }
203
204 pub(crate) fn strong_proxy_for_handle_stability(
205 &self,
206 handle: u32,
207 stability: Stability,
208 ) -> Result<SIBinder> {
209 if let Some(weak) = self
211 .handle_to_proxy
212 .read()
213 .expect("Handle to proxy lock poisoned")
214 .get(&handle)
215 {
216 return weak.upgrade();
217 }
218
219 let mut handle_to_proxy = self
220 .handle_to_proxy
221 .write()
222 .expect("Handle to proxy lock poisoned");
223 if let Some(weak) = handle_to_proxy.get(&handle) {
224 return weak.upgrade();
225 }
226
227 if handle == 0 {
228 let original_call_restriction = thread_state::call_restriction();
229 thread_state::set_call_restriction(CallRestriction::None);
230
231 thread_state::ping_binder(handle)?;
232
233 thread_state::set_call_restriction(original_call_restriction);
234 }
235
236 let interface: String = thread_state::query_interface(handle).unwrap_or_default();
238
239 let proxy: Arc<dyn IBinder> = ProxyHandle::new(handle, interface, stability);
240 let weak = WIBinder::new(proxy)?;
241
242 handle_to_proxy.insert(handle, weak.clone());
243
244 weak.upgrade()
245 }
246
247 pub(crate) fn send_obituary_for_handle(&self, handle: u32) -> Result<()> {
248 let weak = {
251 let mut handle_to_proxy = self
252 .handle_to_proxy
253 .write()
254 .expect("Handle to proxy lock poisoned");
255 handle_to_proxy.remove(&handle)
256 };
257 if let Some(weak) = weak {
262 match weak.upgrade() {
263 Ok(strong) => {
264 if let Some(proxy) = strong.as_proxy() {
265 proxy.send_obituary(&weak)?;
267 } else {
268 log::debug!("Handle {} is not a proxy during obituary", handle);
269 }
270 }
271 Err(_) => {
272 log::trace!("Object for handle {} already destroyed", handle);
275 }
276 }
277 } else {
278 log::trace!("Handle {} was not in cache during obituary", handle);
279 }
280
281 Ok(())
282 }
283
284 pub(crate) fn expunge_handle(&self, handle: u32) {
292 let mut handle_to_proxy = self
293 .handle_to_proxy
294 .write()
295 .expect("Handle to proxy lock poisoned");
296 handle_to_proxy.remove(&handle);
297 log::trace!("expunge_handle: removed handle {}", handle);
298 }
299
300 pub fn disable_background_scheduling(&self, disable: bool) {
301 self.disable_background_scheduling
302 .store(disable, Ordering::Relaxed);
303 }
304
305 pub fn background_scheduling_disabled(&self) -> bool {
306 self.disable_background_scheduling.load(Ordering::Relaxed)
307 }
308
309 pub fn driver(&self) -> Arc<File> {
310 self.driver.clone()
311 }
312
313 pub fn start_thread_pool() {
314 let this = Self::as_self();
315 if this
316 .thread_pool_started
317 .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
318 .is_ok()
319 {
320 if this.max_threads == 0 {
321 log::warn!("Extra binder thread started, but 0 threads requested.\nDo not use *start_thread_pool when zero threads are requested.");
322 }
323 this.spawn_pooled_thread(true);
324 }
325 }
326
327 fn make_binder_thread_name(&self) -> String {
328 let seq = self.thread_pool_seq.fetch_add(1, Ordering::SeqCst);
329 let pid = std::process::id();
330 let driver_name = self
331 .driver_name
332 .file_name()
333 .and_then(|name| name.to_str())
334 .map(|name| name.to_owned())
335 .unwrap_or("BINDER".to_owned());
336 format!("{driver_name}:{pid}_{seq:X}")
337 }
338
339 pub(crate) fn spawn_pooled_thread(&self, is_main: bool) {
340 if self.thread_pool_started.load(Ordering::Relaxed) {
341 let name = self.make_binder_thread_name();
342 log::info!("Spawning new pooled thread, name={name}");
343 let _ = thread::Builder::new()
344 .name(name)
345 .spawn(move || thread_state::join_thread_pool(is_main));
346
347 self.kernel_started_threads.fetch_add(1, Ordering::SeqCst);
348 }
349 }
355
356 pub fn strong_ref_count_for_node(&self, node: &ProxyHandle) -> Result<usize> {
357 let mut info = binder::binder_node_info_for_ref {
358 handle: node.handle(),
359 strong_count: 0,
360 weak_count: 0,
361 reserved1: 0,
362 reserved2: 0,
363 reserved3: 0,
364 };
365
366 binder::get_node_info_for_ref(&self.driver, &mut info).inspect_err(|&e| {
367 log::error!("Binder ioctl(BINDER_GET_NODE_INFO_FOR_REF) failed: {e:?}");
368 })?;
369 Ok(info.strong_count as usize)
370 }
371
372 pub fn join_thread_pool() -> Result<()> {
373 thread_state::join_thread_pool(true)
374 }
375}
376
377fn open_driver(
378 driver: &Path,
379 max_threads: u32,
380) -> std::result::Result<File, Box<dyn std::error::Error>> {
381 let fd = File::options()
382 .read(true)
383 .write(true)
384 .open(driver)
385 .map_err(|e| format!("Opening '{}' failed: {}\n", driver.to_string_lossy(), e))?;
386
387 let mut vers = binder::binder_version {
388 protocol_version: 0,
389 };
390
391 binder::version(&fd, &mut vers)
392 .map_err(|e| format!("Binder ioctl to obtain version failed: {e}"))?;
393 log::info!("Binder driver protocol version: {}", vers.protocol_version);
394
395 if vers.protocol_version != binder::BINDER_CURRENT_PROTOCOL_VERSION as i32 {
396 return Err(format!(
397 "Binder driver protocol({}) does not match user space protocol({})!",
398 vers.protocol_version,
399 binder::BINDER_CURRENT_PROTOCOL_VERSION
400 )
401 .into());
402 }
403
404 binder::set_max_threads(&fd, max_threads)
405 .map_err(|e| format!("Binder ioctl to set max threads failed: {e}"))?;
406 log::info!("Binder driver max threads set to {max_threads}");
407
408 let enable = DEFAULT_ENABLE_ONEWAY_SPAM_DETECTION;
409 if let Err(e) = binder::enable_oneway_spam_detection(&fd, enable) {
410 log::warn!("Binder ioctl to enable oneway spam detection failed: {e}")
411 }
412
413 Ok(fd)
414}
415
416impl Drop for ProcessState {
417 fn drop(self: &mut ProcessState) {
418 let mmap = self.mmap.read().expect("Mmap lock poisoned");
419 unsafe {
420 rustix::mm::munmap(mmap.ptr, mmap.size).expect("Failed to unmap memory");
421 }
422 }
423}
424
425#[cfg(test)]
426mod tests {
427 use super::*;
428
429 #[test]
430 fn test_process_state() {
431 let process = ProcessState::init_default();
432 assert_eq!(process.max_threads, DEFAULT_MAX_BINDER_THREADS);
433 assert_eq!(
434 process.driver_name,
435 PathBuf::from(crate::DEFAULT_BINDER_PATH)
436 );
437 }
438
439 #[test]
440 fn test_process_state_context_object() {
441 let process = ProcessState::init_default();
442 assert!(process.context_object().is_ok());
443 }
444
445 #[test]
446 fn test_process_state_strong_proxy_for_handle() {
447 let process = ProcessState::init_default();
448 assert!(process.strong_proxy_for_handle(0).is_ok());
449 }
450
451 #[test]
452 fn test_process_state_disable_background_scheduling() {
453 let process = ProcessState::init_default();
454 process.disable_background_scheduling(true);
455 assert!(process.background_scheduling_disabled());
456 }
457
458 #[test]
459 fn test_process_state_start_thread_pool() {
460 test_process_state();
461 ProcessState::start_thread_pool();
462 assert_eq!(
463 ProcessState::as_self()
464 .kernel_started_threads
465 .load(Ordering::SeqCst),
466 1
467 );
468 }
469}