tritonserver_rs/
macros.rs1#[cfg(feature = "gpu")]
2macro_rules! cuda_call {
4 ($expr: expr) => {{
5 #[allow(clippy::macro_metavars_in_unsafe)]
6 let res = unsafe { $expr };
7
8 if res != cuda_driver_sys::CUresult::CUDA_SUCCESS {
9 Err($crate::error::Error::new(
10 $crate::error::ErrorCode::Internal,
11 format!("Cuda result: {:?}", res),
12 ))
13 } else {
14 std::result::Result::<_, $crate::error::Error>::Ok(())
15 }
16 }};
17 ($expr: expr, $val: expr) => {{
18 #[allow(clippy::macro_metavars_in_unsafe)]
19 let res = unsafe { $expr };
20
21 if res != cuda_driver_sys::CUresult::CUDA_SUCCESS {
22 Err($crate::error::Error::new(
23 $crate::error::ErrorCode::Internal,
24 format!("Cuda result: {:?}", res),
25 ))
26 } else {
27 std::result::Result::<_, $crate::error::Error>::Ok($val)
28 }
29 }};
30}
31
32macro_rules! triton_call {
34 ($expr: expr) => {{
35 #[allow(clippy::macro_metavars_in_unsafe)]
36 let res = unsafe { $expr };
37
38 if res.is_null() {
39 std::result::Result::<(), $crate::error::Error>::Ok(())
40 } else {
41 std::result::Result::<(), $crate::error::Error>::Err(res.into())
42 }
43 }};
44 ($expr: expr, $val: expr) => {{
45 #[allow(clippy::macro_metavars_in_unsafe)]
46 let res = unsafe { $expr };
47
48 if res.is_null() {
49 std::result::Result::<_, $crate::error::Error>::Ok($val)
50 } else {
51 std::result::Result::<_, $crate::error::Error>::Err(res.into())
52 }
53 }};
54}
55
56#[macro_export]
62macro_rules! run_in_context {
63 ($val: expr, $expr: expr) => {{
64 #[cfg(feature = "gpu")]
65 {
66 tokio::task::spawn_blocking(move || {
67 let ctx = $crate::get_context($val)?;
68 let _handle = ctx.make_current()?;
69 $expr
70 })
71 .await
72 .expect("tokio failed to join thread")
73 }
74 #[cfg(not(feature = "gpu"))]
75 $expr
76 }};
77}
78
79#[macro_export]
85macro_rules! run_in_context_sync {
86 ($val: expr, $expr: expr) => {{
87 #[cfg(feature = "gpu")]
88 {
89 let ctx = $crate::get_context($val)?;
90 let _handle = ctx.make_current()?;
91 $expr
92 }
93 #[cfg(not(feature = "gpu"))]
94 $expr
95 }};
96}