1#[repr(C)]
4#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
5pub struct __BindgenBitfieldUnit<Storage> {
6 storage: Storage,
7}
8impl<Storage> __BindgenBitfieldUnit<Storage> {
9 #[inline]
10 pub const fn new(storage: Storage) -> Self {
11 Self { storage }
12 }
13}
14impl<Storage> __BindgenBitfieldUnit<Storage>
15where
16 Storage: AsRef<[u8]> + AsMut<[u8]>,
17{
18 #[inline]
19 fn extract_bit(byte: u8, index: usize) -> bool {
20 let bit_index = if cfg!(target_endian = "big") {
21 7 - (index % 8)
22 } else {
23 index % 8
24 };
25 let mask = 1 << bit_index;
26 byte & mask == mask
27 }
28 #[inline]
29 pub fn get_bit(&self, index: usize) -> bool {
30 debug_assert!(index / 8 < self.storage.as_ref().len());
31 let byte_index = index / 8;
32 let byte = self.storage.as_ref()[byte_index];
33 Self::extract_bit(byte, index)
34 }
35 #[inline]
36 pub unsafe fn raw_get_bit(this: *const Self, index: usize) -> bool {
37 debug_assert!(index / 8 < core::mem::size_of::<Storage>());
38 let byte_index = index / 8;
39 let byte = unsafe {
40 *(core::ptr::addr_of!((*this).storage) as *const u8).offset(byte_index as isize)
41 };
42 Self::extract_bit(byte, index)
43 }
44 #[inline]
45 fn change_bit(byte: u8, index: usize, val: bool) -> u8 {
46 let bit_index = if cfg!(target_endian = "big") {
47 7 - (index % 8)
48 } else {
49 index % 8
50 };
51 let mask = 1 << bit_index;
52 if val { byte | mask } else { byte & !mask }
53 }
54 #[inline]
55 pub fn set_bit(&mut self, index: usize, val: bool) {
56 debug_assert!(index / 8 < self.storage.as_ref().len());
57 let byte_index = index / 8;
58 let byte = &mut self.storage.as_mut()[byte_index];
59 *byte = Self::change_bit(*byte, index, val);
60 }
61 #[inline]
62 pub unsafe fn raw_set_bit(this: *mut Self, index: usize, val: bool) {
63 debug_assert!(index / 8 < core::mem::size_of::<Storage>());
64 let byte_index = index / 8;
65 let byte = unsafe {
66 (core::ptr::addr_of_mut!((*this).storage) as *mut u8).offset(byte_index as isize)
67 };
68 unsafe { *byte = Self::change_bit(*byte, index, val) };
69 }
70 #[inline]
71 pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
72 debug_assert!(bit_width <= 64);
73 debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
74 debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
75 let mut val = 0;
76 for i in 0..(bit_width as usize) {
77 if self.get_bit(i + bit_offset) {
78 let index = if cfg!(target_endian = "big") {
79 bit_width as usize - 1 - i
80 } else {
81 i
82 };
83 val |= 1 << index;
84 }
85 }
86 val
87 }
88 #[inline]
89 pub unsafe fn raw_get(this: *const Self, bit_offset: usize, bit_width: u8) -> u64 {
90 debug_assert!(bit_width <= 64);
91 debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
92 debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
93 let mut val = 0;
94 for i in 0..(bit_width as usize) {
95 if unsafe { Self::raw_get_bit(this, i + bit_offset) } {
96 let index = if cfg!(target_endian = "big") {
97 bit_width as usize - 1 - i
98 } else {
99 i
100 };
101 val |= 1 << index;
102 }
103 }
104 val
105 }
106 #[inline]
107 pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
108 debug_assert!(bit_width <= 64);
109 debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
110 debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
111 for i in 0..(bit_width as usize) {
112 let mask = 1 << i;
113 let val_bit_is_set = val & mask == mask;
114 let index = if cfg!(target_endian = "big") {
115 bit_width as usize - 1 - i
116 } else {
117 i
118 };
119 self.set_bit(index + bit_offset, val_bit_is_set);
120 }
121 }
122 #[inline]
123 pub unsafe fn raw_set(this: *mut Self, bit_offset: usize, bit_width: u8, val: u64) {
124 debug_assert!(bit_width <= 64);
125 debug_assert!(bit_offset / 8 < core::mem::size_of::<Storage>());
126 debug_assert!((bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::<Storage>());
127 for i in 0..(bit_width as usize) {
128 let mask = 1 << i;
129 let val_bit_is_set = val & mask == mask;
130 let index = if cfg!(target_endian = "big") {
131 bit_width as usize - 1 - i
132 } else {
133 i
134 };
135 unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) };
136 }
137 }
138}
139#[repr(C)]
140#[derive(Default)]
141pub struct __IncompleteArrayField<T>(::core::marker::PhantomData<T>, [T; 0]);
142impl<T> __IncompleteArrayField<T> {
143 #[inline]
144 pub const fn new() -> Self {
145 __IncompleteArrayField(::core::marker::PhantomData, [])
146 }
147 #[inline]
148 pub fn as_ptr(&self) -> *const T {
149 self as *const _ as *const T
150 }
151 #[inline]
152 pub fn as_mut_ptr(&mut self) -> *mut T {
153 self as *mut _ as *mut T
154 }
155 #[inline]
156 pub unsafe fn as_slice(&self, len: usize) -> &[T] {
157 ::core::slice::from_raw_parts(self.as_ptr(), len)
158 }
159 #[inline]
160 pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
161 ::core::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
162 }
163}
164impl<T> ::core::fmt::Debug for __IncompleteArrayField<T> {
165 fn fmt(&self, fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
166 fmt.write_str("__IncompleteArrayField")
167 }
168}
169pub const BPF_LD: u32 = 0;
170pub const BPF_LDX: u32 = 1;
171pub const BPF_ST: u32 = 2;
172pub const BPF_STX: u32 = 3;
173pub const BPF_ALU: u32 = 4;
174pub const BPF_JMP: u32 = 5;
175pub const BPF_RET: u32 = 6;
176pub const BPF_MISC: u32 = 7;
177pub const BPF_W: u32 = 0;
178pub const BPF_H: u32 = 8;
179pub const BPF_B: u32 = 16;
180pub const BPF_IMM: u32 = 0;
181pub const BPF_ABS: u32 = 32;
182pub const BPF_IND: u32 = 64;
183pub const BPF_MEM: u32 = 96;
184pub const BPF_LEN: u32 = 128;
185pub const BPF_MSH: u32 = 160;
186pub const BPF_ADD: u32 = 0;
187pub const BPF_SUB: u32 = 16;
188pub const BPF_MUL: u32 = 32;
189pub const BPF_DIV: u32 = 48;
190pub const BPF_OR: u32 = 64;
191pub const BPF_AND: u32 = 80;
192pub const BPF_LSH: u32 = 96;
193pub const BPF_RSH: u32 = 112;
194pub const BPF_NEG: u32 = 128;
195pub const BPF_MOD: u32 = 144;
196pub const BPF_XOR: u32 = 160;
197pub const BPF_JA: u32 = 0;
198pub const BPF_JEQ: u32 = 16;
199pub const BPF_JGT: u32 = 32;
200pub const BPF_JGE: u32 = 48;
201pub const BPF_JSET: u32 = 64;
202pub const BPF_K: u32 = 0;
203pub const BPF_X: u32 = 8;
204pub const BPF_MAXINSNS: u32 = 4096;
205pub const BPF_JMP32: u32 = 6;
206pub const BPF_ALU64: u32 = 7;
207pub const BPF_DW: u32 = 24;
208pub const BPF_MEMSX: u32 = 128;
209pub const BPF_ATOMIC: u32 = 192;
210pub const BPF_XADD: u32 = 192;
211pub const BPF_MOV: u32 = 176;
212pub const BPF_ARSH: u32 = 192;
213pub const BPF_END: u32 = 208;
214pub const BPF_TO_LE: u32 = 0;
215pub const BPF_TO_BE: u32 = 8;
216pub const BPF_FROM_LE: u32 = 0;
217pub const BPF_FROM_BE: u32 = 8;
218pub const BPF_JNE: u32 = 80;
219pub const BPF_JLT: u32 = 160;
220pub const BPF_JLE: u32 = 176;
221pub const BPF_JSGT: u32 = 96;
222pub const BPF_JSGE: u32 = 112;
223pub const BPF_JSLT: u32 = 192;
224pub const BPF_JSLE: u32 = 208;
225pub const BPF_JCOND: u32 = 224;
226pub const BPF_CALL: u32 = 128;
227pub const BPF_EXIT: u32 = 144;
228pub const BPF_FETCH: u32 = 1;
229pub const BPF_XCHG: u32 = 225;
230pub const BPF_CMPXCHG: u32 = 241;
231pub const BPF_F_ALLOW_OVERRIDE: u32 = 1;
232pub const BPF_F_ALLOW_MULTI: u32 = 2;
233pub const BPF_F_REPLACE: u32 = 4;
234pub const BPF_F_BEFORE: u32 = 8;
235pub const BPF_F_AFTER: u32 = 16;
236pub const BPF_F_ID: u32 = 32;
237pub const BPF_F_STRICT_ALIGNMENT: u32 = 1;
238pub const BPF_F_ANY_ALIGNMENT: u32 = 2;
239pub const BPF_F_TEST_RND_HI32: u32 = 4;
240pub const BPF_F_TEST_STATE_FREQ: u32 = 8;
241pub const BPF_F_SLEEPABLE: u32 = 16;
242pub const BPF_F_XDP_HAS_FRAGS: u32 = 32;
243pub const BPF_F_XDP_DEV_BOUND_ONLY: u32 = 64;
244pub const BPF_F_TEST_REG_INVARIANTS: u32 = 128;
245pub const BPF_F_NETFILTER_IP_DEFRAG: u32 = 1;
246pub const BPF_PSEUDO_MAP_FD: u32 = 1;
247pub const BPF_PSEUDO_MAP_IDX: u32 = 5;
248pub const BPF_PSEUDO_MAP_VALUE: u32 = 2;
249pub const BPF_PSEUDO_MAP_IDX_VALUE: u32 = 6;
250pub const BPF_PSEUDO_BTF_ID: u32 = 3;
251pub const BPF_PSEUDO_FUNC: u32 = 4;
252pub const BPF_PSEUDO_CALL: u32 = 1;
253pub const BPF_PSEUDO_KFUNC_CALL: u32 = 2;
254pub const BPF_F_QUERY_EFFECTIVE: u32 = 1;
255pub const BPF_F_TEST_RUN_ON_CPU: u32 = 1;
256pub const BPF_F_TEST_XDP_LIVE_FRAMES: u32 = 2;
257pub const BPF_BUILD_ID_SIZE: u32 = 20;
258pub const BPF_OBJ_NAME_LEN: u32 = 16;
259pub const BPF_TAG_SIZE: u32 = 8;
260pub const BTF_INT_SIGNED: u32 = 1;
261pub const BTF_INT_CHAR: u32 = 2;
262pub const BTF_INT_BOOL: u32 = 4;
263pub const NLMSG_ALIGNTO: u32 = 4;
264pub const XDP_FLAGS_UPDATE_IF_NOEXIST: u32 = 1;
265pub const XDP_FLAGS_SKB_MODE: u32 = 2;
266pub const XDP_FLAGS_DRV_MODE: u32 = 4;
267pub const XDP_FLAGS_HW_MODE: u32 = 8;
268pub const XDP_FLAGS_REPLACE: u32 = 16;
269pub const XDP_FLAGS_MODES: u32 = 14;
270pub const XDP_FLAGS_MASK: u32 = 31;
271pub const PERF_EVENT_IOC_ENABLE: u32 = 9216;
272pub const PERF_EVENT_IOC_DISABLE: u32 = 9217;
273pub const PERF_EVENT_IOC_REFRESH: u32 = 9218;
274pub const PERF_EVENT_IOC_RESET: u32 = 9219;
275pub const PERF_EVENT_IOC_PERIOD: u32 = 1074275332;
276pub const PERF_EVENT_IOC_SET_OUTPUT: u32 = 9221;
277pub const PERF_EVENT_IOC_SET_FILTER: u32 = 1074275334;
278pub const PERF_EVENT_IOC_ID: u32 = 2148017159;
279pub const PERF_EVENT_IOC_SET_BPF: u32 = 1074013192;
280pub const PERF_EVENT_IOC_PAUSE_OUTPUT: u32 = 1074013193;
281pub const PERF_EVENT_IOC_QUERY_BPF: u32 = 3221758986;
282pub const PERF_EVENT_IOC_MODIFY_ATTRIBUTES: u32 = 1074275339;
283pub const PERF_MAX_STACK_DEPTH: u32 = 127;
284pub const PERF_MAX_CONTEXTS_PER_STACK: u32 = 8;
285pub const PERF_FLAG_FD_NO_GROUP: u32 = 1;
286pub const PERF_FLAG_FD_OUTPUT: u32 = 2;
287pub const PERF_FLAG_PID_CGROUP: u32 = 4;
288pub const PERF_FLAG_FD_CLOEXEC: u32 = 8;
289pub const TC_H_MAJ_MASK: u32 = 4294901760;
290pub const TC_H_MIN_MASK: u32 = 65535;
291pub const TC_H_UNSPEC: u32 = 0;
292pub const TC_H_ROOT: u32 = 4294967295;
293pub const TC_H_INGRESS: u32 = 4294967281;
294pub const TC_H_CLSACT: u32 = 4294967281;
295pub const TC_H_MIN_PRIORITY: u32 = 65504;
296pub const TC_H_MIN_INGRESS: u32 = 65522;
297pub const TC_H_MIN_EGRESS: u32 = 65523;
298pub const TCA_BPF_FLAG_ACT_DIRECT: u32 = 1;
299pub const SO_ATTACH_BPF: u32 = 50;
300pub const SO_DETACH_BPF: u32 = 27;
301pub type __u8 = ::core::ffi::c_uchar;
302pub type __s16 = ::core::ffi::c_short;
303pub type __u16 = ::core::ffi::c_ushort;
304pub type __s32 = ::core::ffi::c_int;
305pub type __u32 = ::core::ffi::c_uint;
306pub type __s64 = ::core::ffi::c_longlong;
307pub type __u64 = ::core::ffi::c_ulonglong;
308pub const BPF_REG_0: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_0;
309pub const BPF_REG_1: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_1;
310pub const BPF_REG_2: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_2;
311pub const BPF_REG_3: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_3;
312pub const BPF_REG_4: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_4;
313pub const BPF_REG_5: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_5;
314pub const BPF_REG_6: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_6;
315pub const BPF_REG_7: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_7;
316pub const BPF_REG_8: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_8;
317pub const BPF_REG_9: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_9;
318pub const BPF_REG_10: _bindgen_ty_1 = _bindgen_ty_1::BPF_REG_10;
319pub const __MAX_BPF_REG: _bindgen_ty_1 = _bindgen_ty_1::__MAX_BPF_REG;
320#[repr(u32)]
321#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
322pub enum _bindgen_ty_1 {
323 BPF_REG_0 = 0,
324 BPF_REG_1 = 1,
325 BPF_REG_2 = 2,
326 BPF_REG_3 = 3,
327 BPF_REG_4 = 4,
328 BPF_REG_5 = 5,
329 BPF_REG_6 = 6,
330 BPF_REG_7 = 7,
331 BPF_REG_8 = 8,
332 BPF_REG_9 = 9,
333 BPF_REG_10 = 10,
334 __MAX_BPF_REG = 11,
335}
336#[repr(C)]
337#[derive(Debug, Copy, Clone)]
338pub struct bpf_insn {
339 pub code: __u8,
340 pub _bitfield_align_1: [u8; 0],
341 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 1usize]>,
342 pub off: __s16,
343 pub imm: __s32,
344}
345impl bpf_insn {
346 #[inline]
347 pub fn dst_reg(&self) -> __u8 {
348 unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) }
349 }
350 #[inline]
351 pub fn set_dst_reg(&mut self, val: __u8) {
352 unsafe {
353 let val: u8 = ::core::mem::transmute(val);
354 self._bitfield_1.set(0usize, 4u8, val as u64)
355 }
356 }
357 #[inline]
358 pub unsafe fn dst_reg_raw(this: *const Self) -> __u8 {
359 unsafe {
360 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
361 ::core::ptr::addr_of!((*this)._bitfield_1),
362 0usize,
363 4u8,
364 ) as u8)
365 }
366 }
367 #[inline]
368 pub unsafe fn set_dst_reg_raw(this: *mut Self, val: __u8) {
369 unsafe {
370 let val: u8 = ::core::mem::transmute(val);
371 <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
372 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
373 0usize,
374 4u8,
375 val as u64,
376 )
377 }
378 }
379 #[inline]
380 pub fn src_reg(&self) -> __u8 {
381 unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) }
382 }
383 #[inline]
384 pub fn set_src_reg(&mut self, val: __u8) {
385 unsafe {
386 let val: u8 = ::core::mem::transmute(val);
387 self._bitfield_1.set(4usize, 4u8, val as u64)
388 }
389 }
390 #[inline]
391 pub unsafe fn src_reg_raw(this: *const Self) -> __u8 {
392 unsafe {
393 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 1usize]>>::raw_get(
394 ::core::ptr::addr_of!((*this)._bitfield_1),
395 4usize,
396 4u8,
397 ) as u8)
398 }
399 }
400 #[inline]
401 pub unsafe fn set_src_reg_raw(this: *mut Self, val: __u8) {
402 unsafe {
403 let val: u8 = ::core::mem::transmute(val);
404 <__BindgenBitfieldUnit<[u8; 1usize]>>::raw_set(
405 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
406 4usize,
407 4u8,
408 val as u64,
409 )
410 }
411 }
412 #[inline]
413 pub fn new_bitfield_1(dst_reg: __u8, src_reg: __u8) -> __BindgenBitfieldUnit<[u8; 1usize]> {
414 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default();
415 __bindgen_bitfield_unit.set(0usize, 4u8, {
416 let dst_reg: u8 = unsafe { ::core::mem::transmute(dst_reg) };
417 dst_reg as u64
418 });
419 __bindgen_bitfield_unit.set(4usize, 4u8, {
420 let src_reg: u8 = unsafe { ::core::mem::transmute(src_reg) };
421 src_reg as u64
422 });
423 __bindgen_bitfield_unit
424 }
425}
426#[repr(C)]
427#[derive(Debug)]
428pub struct bpf_lpm_trie_key {
429 pub prefixlen: __u32,
430 pub data: __IncompleteArrayField<__u8>,
431}
432#[repr(u32)]
433#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
434pub enum bpf_cgroup_iter_order {
435 BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
436 BPF_CGROUP_ITER_SELF_ONLY = 1,
437 BPF_CGROUP_ITER_DESCENDANTS_PRE = 2,
438 BPF_CGROUP_ITER_DESCENDANTS_POST = 3,
439 BPF_CGROUP_ITER_ANCESTORS_UP = 4,
440}
441impl bpf_cmd {
442 pub const BPF_PROG_RUN: bpf_cmd = bpf_cmd::BPF_PROG_TEST_RUN;
443}
444#[repr(u32)]
445#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
446pub enum bpf_cmd {
447 BPF_MAP_CREATE = 0,
448 BPF_MAP_LOOKUP_ELEM = 1,
449 BPF_MAP_UPDATE_ELEM = 2,
450 BPF_MAP_DELETE_ELEM = 3,
451 BPF_MAP_GET_NEXT_KEY = 4,
452 BPF_PROG_LOAD = 5,
453 BPF_OBJ_PIN = 6,
454 BPF_OBJ_GET = 7,
455 BPF_PROG_ATTACH = 8,
456 BPF_PROG_DETACH = 9,
457 BPF_PROG_TEST_RUN = 10,
458 BPF_PROG_GET_NEXT_ID = 11,
459 BPF_MAP_GET_NEXT_ID = 12,
460 BPF_PROG_GET_FD_BY_ID = 13,
461 BPF_MAP_GET_FD_BY_ID = 14,
462 BPF_OBJ_GET_INFO_BY_FD = 15,
463 BPF_PROG_QUERY = 16,
464 BPF_RAW_TRACEPOINT_OPEN = 17,
465 BPF_BTF_LOAD = 18,
466 BPF_BTF_GET_FD_BY_ID = 19,
467 BPF_TASK_FD_QUERY = 20,
468 BPF_MAP_LOOKUP_AND_DELETE_ELEM = 21,
469 BPF_MAP_FREEZE = 22,
470 BPF_BTF_GET_NEXT_ID = 23,
471 BPF_MAP_LOOKUP_BATCH = 24,
472 BPF_MAP_LOOKUP_AND_DELETE_BATCH = 25,
473 BPF_MAP_UPDATE_BATCH = 26,
474 BPF_MAP_DELETE_BATCH = 27,
475 BPF_LINK_CREATE = 28,
476 BPF_LINK_UPDATE = 29,
477 BPF_LINK_GET_FD_BY_ID = 30,
478 BPF_LINK_GET_NEXT_ID = 31,
479 BPF_ENABLE_STATS = 32,
480 BPF_ITER_CREATE = 33,
481 BPF_LINK_DETACH = 34,
482 BPF_PROG_BIND_MAP = 35,
483 BPF_TOKEN_CREATE = 36,
484 __MAX_BPF_CMD = 37,
485}
486impl bpf_map_type {
487 pub const BPF_MAP_TYPE_CGROUP_STORAGE: bpf_map_type =
488 bpf_map_type::BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED;
489}
490impl bpf_map_type {
491 pub const BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: bpf_map_type =
492 bpf_map_type::BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED;
493}
494#[repr(u32)]
495#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
496pub enum bpf_map_type {
497 BPF_MAP_TYPE_UNSPEC = 0,
498 BPF_MAP_TYPE_HASH = 1,
499 BPF_MAP_TYPE_ARRAY = 2,
500 BPF_MAP_TYPE_PROG_ARRAY = 3,
501 BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4,
502 BPF_MAP_TYPE_PERCPU_HASH = 5,
503 BPF_MAP_TYPE_PERCPU_ARRAY = 6,
504 BPF_MAP_TYPE_STACK_TRACE = 7,
505 BPF_MAP_TYPE_CGROUP_ARRAY = 8,
506 BPF_MAP_TYPE_LRU_HASH = 9,
507 BPF_MAP_TYPE_LRU_PERCPU_HASH = 10,
508 BPF_MAP_TYPE_LPM_TRIE = 11,
509 BPF_MAP_TYPE_ARRAY_OF_MAPS = 12,
510 BPF_MAP_TYPE_HASH_OF_MAPS = 13,
511 BPF_MAP_TYPE_DEVMAP = 14,
512 BPF_MAP_TYPE_SOCKMAP = 15,
513 BPF_MAP_TYPE_CPUMAP = 16,
514 BPF_MAP_TYPE_XSKMAP = 17,
515 BPF_MAP_TYPE_SOCKHASH = 18,
516 BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED = 19,
517 BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20,
518 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE_DEPRECATED = 21,
519 BPF_MAP_TYPE_QUEUE = 22,
520 BPF_MAP_TYPE_STACK = 23,
521 BPF_MAP_TYPE_SK_STORAGE = 24,
522 BPF_MAP_TYPE_DEVMAP_HASH = 25,
523 BPF_MAP_TYPE_STRUCT_OPS = 26,
524 BPF_MAP_TYPE_RINGBUF = 27,
525 BPF_MAP_TYPE_INODE_STORAGE = 28,
526 BPF_MAP_TYPE_TASK_STORAGE = 29,
527 BPF_MAP_TYPE_BLOOM_FILTER = 30,
528 BPF_MAP_TYPE_USER_RINGBUF = 31,
529 BPF_MAP_TYPE_CGRP_STORAGE = 32,
530 BPF_MAP_TYPE_ARENA = 33,
531 __MAX_BPF_MAP_TYPE = 34,
532}
533#[repr(u32)]
534#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
535pub enum bpf_prog_type {
536 BPF_PROG_TYPE_UNSPEC = 0,
537 BPF_PROG_TYPE_SOCKET_FILTER = 1,
538 BPF_PROG_TYPE_KPROBE = 2,
539 BPF_PROG_TYPE_SCHED_CLS = 3,
540 BPF_PROG_TYPE_SCHED_ACT = 4,
541 BPF_PROG_TYPE_TRACEPOINT = 5,
542 BPF_PROG_TYPE_XDP = 6,
543 BPF_PROG_TYPE_PERF_EVENT = 7,
544 BPF_PROG_TYPE_CGROUP_SKB = 8,
545 BPF_PROG_TYPE_CGROUP_SOCK = 9,
546 BPF_PROG_TYPE_LWT_IN = 10,
547 BPF_PROG_TYPE_LWT_OUT = 11,
548 BPF_PROG_TYPE_LWT_XMIT = 12,
549 BPF_PROG_TYPE_SOCK_OPS = 13,
550 BPF_PROG_TYPE_SK_SKB = 14,
551 BPF_PROG_TYPE_CGROUP_DEVICE = 15,
552 BPF_PROG_TYPE_SK_MSG = 16,
553 BPF_PROG_TYPE_RAW_TRACEPOINT = 17,
554 BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18,
555 BPF_PROG_TYPE_LWT_SEG6LOCAL = 19,
556 BPF_PROG_TYPE_LIRC_MODE2 = 20,
557 BPF_PROG_TYPE_SK_REUSEPORT = 21,
558 BPF_PROG_TYPE_FLOW_DISSECTOR = 22,
559 BPF_PROG_TYPE_CGROUP_SYSCTL = 23,
560 BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24,
561 BPF_PROG_TYPE_CGROUP_SOCKOPT = 25,
562 BPF_PROG_TYPE_TRACING = 26,
563 BPF_PROG_TYPE_STRUCT_OPS = 27,
564 BPF_PROG_TYPE_EXT = 28,
565 BPF_PROG_TYPE_LSM = 29,
566 BPF_PROG_TYPE_SK_LOOKUP = 30,
567 BPF_PROG_TYPE_SYSCALL = 31,
568 BPF_PROG_TYPE_NETFILTER = 32,
569 __MAX_BPF_PROG_TYPE = 33,
570}
571#[repr(u32)]
572#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
573pub enum bpf_attach_type {
574 BPF_CGROUP_INET_INGRESS = 0,
575 BPF_CGROUP_INET_EGRESS = 1,
576 BPF_CGROUP_INET_SOCK_CREATE = 2,
577 BPF_CGROUP_SOCK_OPS = 3,
578 BPF_SK_SKB_STREAM_PARSER = 4,
579 BPF_SK_SKB_STREAM_VERDICT = 5,
580 BPF_CGROUP_DEVICE = 6,
581 BPF_SK_MSG_VERDICT = 7,
582 BPF_CGROUP_INET4_BIND = 8,
583 BPF_CGROUP_INET6_BIND = 9,
584 BPF_CGROUP_INET4_CONNECT = 10,
585 BPF_CGROUP_INET6_CONNECT = 11,
586 BPF_CGROUP_INET4_POST_BIND = 12,
587 BPF_CGROUP_INET6_POST_BIND = 13,
588 BPF_CGROUP_UDP4_SENDMSG = 14,
589 BPF_CGROUP_UDP6_SENDMSG = 15,
590 BPF_LIRC_MODE2 = 16,
591 BPF_FLOW_DISSECTOR = 17,
592 BPF_CGROUP_SYSCTL = 18,
593 BPF_CGROUP_UDP4_RECVMSG = 19,
594 BPF_CGROUP_UDP6_RECVMSG = 20,
595 BPF_CGROUP_GETSOCKOPT = 21,
596 BPF_CGROUP_SETSOCKOPT = 22,
597 BPF_TRACE_RAW_TP = 23,
598 BPF_TRACE_FENTRY = 24,
599 BPF_TRACE_FEXIT = 25,
600 BPF_MODIFY_RETURN = 26,
601 BPF_LSM_MAC = 27,
602 BPF_TRACE_ITER = 28,
603 BPF_CGROUP_INET4_GETPEERNAME = 29,
604 BPF_CGROUP_INET6_GETPEERNAME = 30,
605 BPF_CGROUP_INET4_GETSOCKNAME = 31,
606 BPF_CGROUP_INET6_GETSOCKNAME = 32,
607 BPF_XDP_DEVMAP = 33,
608 BPF_CGROUP_INET_SOCK_RELEASE = 34,
609 BPF_XDP_CPUMAP = 35,
610 BPF_SK_LOOKUP = 36,
611 BPF_XDP = 37,
612 BPF_SK_SKB_VERDICT = 38,
613 BPF_SK_REUSEPORT_SELECT = 39,
614 BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40,
615 BPF_PERF_EVENT = 41,
616 BPF_TRACE_KPROBE_MULTI = 42,
617 BPF_LSM_CGROUP = 43,
618 BPF_STRUCT_OPS = 44,
619 BPF_NETFILTER = 45,
620 BPF_TCX_INGRESS = 46,
621 BPF_TCX_EGRESS = 47,
622 BPF_TRACE_UPROBE_MULTI = 48,
623 BPF_CGROUP_UNIX_CONNECT = 49,
624 BPF_CGROUP_UNIX_SENDMSG = 50,
625 BPF_CGROUP_UNIX_RECVMSG = 51,
626 BPF_CGROUP_UNIX_GETPEERNAME = 52,
627 BPF_CGROUP_UNIX_GETSOCKNAME = 53,
628 BPF_NETKIT_PRIMARY = 54,
629 BPF_NETKIT_PEER = 55,
630 __MAX_BPF_ATTACH_TYPE = 56,
631}
632#[repr(u32)]
633#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
634pub enum bpf_link_type {
635 BPF_LINK_TYPE_UNSPEC = 0,
636 BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
637 BPF_LINK_TYPE_TRACING = 2,
638 BPF_LINK_TYPE_CGROUP = 3,
639 BPF_LINK_TYPE_ITER = 4,
640 BPF_LINK_TYPE_NETNS = 5,
641 BPF_LINK_TYPE_XDP = 6,
642 BPF_LINK_TYPE_PERF_EVENT = 7,
643 BPF_LINK_TYPE_KPROBE_MULTI = 8,
644 BPF_LINK_TYPE_STRUCT_OPS = 9,
645 BPF_LINK_TYPE_NETFILTER = 10,
646 BPF_LINK_TYPE_TCX = 11,
647 BPF_LINK_TYPE_UPROBE_MULTI = 12,
648 BPF_LINK_TYPE_NETKIT = 13,
649 __MAX_BPF_LINK_TYPE = 14,
650}
651#[repr(u32)]
652#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
653pub enum bpf_perf_event_type {
654 BPF_PERF_EVENT_UNSPEC = 0,
655 BPF_PERF_EVENT_UPROBE = 1,
656 BPF_PERF_EVENT_URETPROBE = 2,
657 BPF_PERF_EVENT_KPROBE = 3,
658 BPF_PERF_EVENT_KRETPROBE = 4,
659 BPF_PERF_EVENT_TRACEPOINT = 5,
660 BPF_PERF_EVENT_EVENT = 6,
661}
662pub const BPF_F_KPROBE_MULTI_RETURN: _bindgen_ty_2 = 1;
663pub type _bindgen_ty_2 = ::core::ffi::c_uint;
664pub const BPF_F_UPROBE_MULTI_RETURN: _bindgen_ty_3 = 1;
665pub type _bindgen_ty_3 = ::core::ffi::c_uint;
666pub const BPF_ANY: _bindgen_ty_4 = 0;
667pub const BPF_NOEXIST: _bindgen_ty_4 = 1;
668pub const BPF_EXIST: _bindgen_ty_4 = 2;
669pub const BPF_F_LOCK: _bindgen_ty_4 = 4;
670pub type _bindgen_ty_4 = ::core::ffi::c_uint;
671pub const BPF_F_NO_PREALLOC: _bindgen_ty_5 = 1;
672pub const BPF_F_NO_COMMON_LRU: _bindgen_ty_5 = 2;
673pub const BPF_F_NUMA_NODE: _bindgen_ty_5 = 4;
674pub const BPF_F_RDONLY: _bindgen_ty_5 = 8;
675pub const BPF_F_WRONLY: _bindgen_ty_5 = 16;
676pub const BPF_F_STACK_BUILD_ID: _bindgen_ty_5 = 32;
677pub const BPF_F_ZERO_SEED: _bindgen_ty_5 = 64;
678pub const BPF_F_RDONLY_PROG: _bindgen_ty_5 = 128;
679pub const BPF_F_WRONLY_PROG: _bindgen_ty_5 = 256;
680pub const BPF_F_CLONE: _bindgen_ty_5 = 512;
681pub const BPF_F_MMAPABLE: _bindgen_ty_5 = 1024;
682pub const BPF_F_PRESERVE_ELEMS: _bindgen_ty_5 = 2048;
683pub const BPF_F_INNER_MAP: _bindgen_ty_5 = 4096;
684pub const BPF_F_LINK: _bindgen_ty_5 = 8192;
685pub const BPF_F_PATH_FD: _bindgen_ty_5 = 16384;
686pub const BPF_F_VTYPE_BTF_OBJ_FD: _bindgen_ty_5 = 32768;
687pub const BPF_F_TOKEN_FD: _bindgen_ty_5 = 65536;
688pub const BPF_F_SEGV_ON_FAULT: _bindgen_ty_5 = 131072;
689pub const BPF_F_NO_USER_CONV: _bindgen_ty_5 = 262144;
690pub type _bindgen_ty_5 = ::core::ffi::c_uint;
691#[repr(u32)]
692#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
693pub enum bpf_stats_type {
694 BPF_STATS_RUN_TIME = 0,
695}
696#[repr(C)]
697#[derive(Copy, Clone)]
698pub union bpf_attr {
699 pub __bindgen_anon_1: bpf_attr__bindgen_ty_1,
700 pub __bindgen_anon_2: bpf_attr__bindgen_ty_2,
701 pub batch: bpf_attr__bindgen_ty_3,
702 pub __bindgen_anon_3: bpf_attr__bindgen_ty_4,
703 pub __bindgen_anon_4: bpf_attr__bindgen_ty_5,
704 pub __bindgen_anon_5: bpf_attr__bindgen_ty_6,
705 pub test: bpf_attr__bindgen_ty_7,
706 pub __bindgen_anon_6: bpf_attr__bindgen_ty_8,
707 pub info: bpf_attr__bindgen_ty_9,
708 pub query: bpf_attr__bindgen_ty_10,
709 pub raw_tracepoint: bpf_attr__bindgen_ty_11,
710 pub __bindgen_anon_7: bpf_attr__bindgen_ty_12,
711 pub task_fd_query: bpf_attr__bindgen_ty_13,
712 pub link_create: bpf_attr__bindgen_ty_14,
713 pub link_update: bpf_attr__bindgen_ty_15,
714 pub link_detach: bpf_attr__bindgen_ty_16,
715 pub enable_stats: bpf_attr__bindgen_ty_17,
716 pub iter_create: bpf_attr__bindgen_ty_18,
717 pub prog_bind_map: bpf_attr__bindgen_ty_19,
718 pub token_create: bpf_attr__bindgen_ty_20,
719}
720#[repr(C)]
721#[derive(Debug, Copy, Clone)]
722pub struct bpf_attr__bindgen_ty_1 {
723 pub map_type: __u32,
724 pub key_size: __u32,
725 pub value_size: __u32,
726 pub max_entries: __u32,
727 pub map_flags: __u32,
728 pub inner_map_fd: __u32,
729 pub numa_node: __u32,
730 pub map_name: [::core::ffi::c_char; 16usize],
731 pub map_ifindex: __u32,
732 pub btf_fd: __u32,
733 pub btf_key_type_id: __u32,
734 pub btf_value_type_id: __u32,
735 pub btf_vmlinux_value_type_id: __u32,
736 pub map_extra: __u64,
737 pub value_type_btf_obj_fd: __s32,
738 pub map_token_fd: __s32,
739}
740#[repr(C)]
741#[derive(Copy, Clone)]
742pub struct bpf_attr__bindgen_ty_2 {
743 pub map_fd: __u32,
744 pub key: __u64,
745 pub __bindgen_anon_1: bpf_attr__bindgen_ty_2__bindgen_ty_1,
746 pub flags: __u64,
747}
748#[repr(C)]
749#[derive(Copy, Clone)]
750pub union bpf_attr__bindgen_ty_2__bindgen_ty_1 {
751 pub value: __u64,
752 pub next_key: __u64,
753}
754#[repr(C)]
755#[derive(Debug, Copy, Clone)]
756pub struct bpf_attr__bindgen_ty_3 {
757 pub in_batch: __u64,
758 pub out_batch: __u64,
759 pub keys: __u64,
760 pub values: __u64,
761 pub count: __u32,
762 pub map_fd: __u32,
763 pub elem_flags: __u64,
764 pub flags: __u64,
765}
766#[repr(C)]
767#[derive(Copy, Clone)]
768pub struct bpf_attr__bindgen_ty_4 {
769 pub prog_type: __u32,
770 pub insn_cnt: __u32,
771 pub insns: __u64,
772 pub license: __u64,
773 pub log_level: __u32,
774 pub log_size: __u32,
775 pub log_buf: __u64,
776 pub kern_version: __u32,
777 pub prog_flags: __u32,
778 pub prog_name: [::core::ffi::c_char; 16usize],
779 pub prog_ifindex: __u32,
780 pub expected_attach_type: __u32,
781 pub prog_btf_fd: __u32,
782 pub func_info_rec_size: __u32,
783 pub func_info: __u64,
784 pub func_info_cnt: __u32,
785 pub line_info_rec_size: __u32,
786 pub line_info: __u64,
787 pub line_info_cnt: __u32,
788 pub attach_btf_id: __u32,
789 pub __bindgen_anon_1: bpf_attr__bindgen_ty_4__bindgen_ty_1,
790 pub core_relo_cnt: __u32,
791 pub fd_array: __u64,
792 pub core_relos: __u64,
793 pub core_relo_rec_size: __u32,
794 pub log_true_size: __u32,
795 pub prog_token_fd: __s32,
796}
797#[repr(C)]
798#[derive(Copy, Clone)]
799pub union bpf_attr__bindgen_ty_4__bindgen_ty_1 {
800 pub attach_prog_fd: __u32,
801 pub attach_btf_obj_fd: __u32,
802}
803#[repr(C)]
804#[derive(Debug, Copy, Clone)]
805pub struct bpf_attr__bindgen_ty_5 {
806 pub pathname: __u64,
807 pub bpf_fd: __u32,
808 pub file_flags: __u32,
809 pub path_fd: __s32,
810}
811#[repr(C)]
812#[derive(Copy, Clone)]
813pub struct bpf_attr__bindgen_ty_6 {
814 pub __bindgen_anon_1: bpf_attr__bindgen_ty_6__bindgen_ty_1,
815 pub attach_bpf_fd: __u32,
816 pub attach_type: __u32,
817 pub attach_flags: __u32,
818 pub replace_bpf_fd: __u32,
819 pub __bindgen_anon_2: bpf_attr__bindgen_ty_6__bindgen_ty_2,
820 pub expected_revision: __u64,
821}
822#[repr(C)]
823#[derive(Copy, Clone)]
824pub union bpf_attr__bindgen_ty_6__bindgen_ty_1 {
825 pub target_fd: __u32,
826 pub target_ifindex: __u32,
827}
828#[repr(C)]
829#[derive(Copy, Clone)]
830pub union bpf_attr__bindgen_ty_6__bindgen_ty_2 {
831 pub relative_fd: __u32,
832 pub relative_id: __u32,
833}
834#[repr(C)]
835#[derive(Debug, Copy, Clone)]
836pub struct bpf_attr__bindgen_ty_7 {
837 pub prog_fd: __u32,
838 pub retval: __u32,
839 pub data_size_in: __u32,
840 pub data_size_out: __u32,
841 pub data_in: __u64,
842 pub data_out: __u64,
843 pub repeat: __u32,
844 pub duration: __u32,
845 pub ctx_size_in: __u32,
846 pub ctx_size_out: __u32,
847 pub ctx_in: __u64,
848 pub ctx_out: __u64,
849 pub flags: __u32,
850 pub cpu: __u32,
851 pub batch_size: __u32,
852}
853#[repr(C)]
854#[derive(Copy, Clone)]
855pub struct bpf_attr__bindgen_ty_8 {
856 pub __bindgen_anon_1: bpf_attr__bindgen_ty_8__bindgen_ty_1,
857 pub next_id: __u32,
858 pub open_flags: __u32,
859}
860#[repr(C)]
861#[derive(Copy, Clone)]
862pub union bpf_attr__bindgen_ty_8__bindgen_ty_1 {
863 pub start_id: __u32,
864 pub prog_id: __u32,
865 pub map_id: __u32,
866 pub btf_id: __u32,
867 pub link_id: __u32,
868}
869#[repr(C)]
870#[derive(Debug, Copy, Clone)]
871pub struct bpf_attr__bindgen_ty_9 {
872 pub bpf_fd: __u32,
873 pub info_len: __u32,
874 pub info: __u64,
875}
876#[repr(C)]
877#[derive(Copy, Clone)]
878pub struct bpf_attr__bindgen_ty_10 {
879 pub __bindgen_anon_1: bpf_attr__bindgen_ty_10__bindgen_ty_1,
880 pub attach_type: __u32,
881 pub query_flags: __u32,
882 pub attach_flags: __u32,
883 pub prog_ids: __u64,
884 pub __bindgen_anon_2: bpf_attr__bindgen_ty_10__bindgen_ty_2,
885 pub _bitfield_align_1: [u8; 0],
886 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>,
887 pub prog_attach_flags: __u64,
888 pub link_ids: __u64,
889 pub link_attach_flags: __u64,
890 pub revision: __u64,
891}
892#[repr(C)]
893#[derive(Copy, Clone)]
894pub union bpf_attr__bindgen_ty_10__bindgen_ty_1 {
895 pub target_fd: __u32,
896 pub target_ifindex: __u32,
897}
898#[repr(C)]
899#[derive(Copy, Clone)]
900pub union bpf_attr__bindgen_ty_10__bindgen_ty_2 {
901 pub prog_cnt: __u32,
902 pub count: __u32,
903}
904impl bpf_attr__bindgen_ty_10 {
905 #[inline]
906 pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> {
907 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
908 __bindgen_bitfield_unit
909 }
910}
911#[repr(C)]
912#[derive(Debug, Copy, Clone)]
913pub struct bpf_attr__bindgen_ty_11 {
914 pub name: __u64,
915 pub prog_fd: __u32,
916 pub _bitfield_align_1: [u8; 0],
917 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>,
918 pub cookie: __u64,
919}
920impl bpf_attr__bindgen_ty_11 {
921 #[inline]
922 pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> {
923 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
924 __bindgen_bitfield_unit
925 }
926}
927#[repr(C)]
928#[derive(Debug, Copy, Clone)]
929pub struct bpf_attr__bindgen_ty_12 {
930 pub btf: __u64,
931 pub btf_log_buf: __u64,
932 pub btf_size: __u32,
933 pub btf_log_size: __u32,
934 pub btf_log_level: __u32,
935 pub btf_log_true_size: __u32,
936 pub btf_flags: __u32,
937 pub btf_token_fd: __s32,
938}
939#[repr(C)]
940#[derive(Debug, Copy, Clone)]
941pub struct bpf_attr__bindgen_ty_13 {
942 pub pid: __u32,
943 pub fd: __u32,
944 pub flags: __u32,
945 pub buf_len: __u32,
946 pub buf: __u64,
947 pub prog_id: __u32,
948 pub fd_type: __u32,
949 pub probe_offset: __u64,
950 pub probe_addr: __u64,
951}
952#[repr(C)]
953#[derive(Copy, Clone)]
954pub struct bpf_attr__bindgen_ty_14 {
955 pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_1,
956 pub __bindgen_anon_2: bpf_attr__bindgen_ty_14__bindgen_ty_2,
957 pub attach_type: __u32,
958 pub flags: __u32,
959 pub __bindgen_anon_3: bpf_attr__bindgen_ty_14__bindgen_ty_3,
960}
961#[repr(C)]
962#[derive(Copy, Clone)]
963pub union bpf_attr__bindgen_ty_14__bindgen_ty_1 {
964 pub prog_fd: __u32,
965 pub map_fd: __u32,
966}
967#[repr(C)]
968#[derive(Copy, Clone)]
969pub union bpf_attr__bindgen_ty_14__bindgen_ty_2 {
970 pub target_fd: __u32,
971 pub target_ifindex: __u32,
972}
973#[repr(C)]
974#[derive(Copy, Clone)]
975pub union bpf_attr__bindgen_ty_14__bindgen_ty_3 {
976 pub target_btf_id: __u32,
977 pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_1,
978 pub perf_event: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_2,
979 pub kprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_3,
980 pub tracing: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_4,
981 pub netfilter: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_5,
982 pub tcx: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6,
983 pub uprobe_multi: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_7,
984 pub netkit: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8,
985}
986#[repr(C)]
987#[derive(Debug, Copy, Clone)]
988pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_1 {
989 pub iter_info: __u64,
990 pub iter_info_len: __u32,
991}
992#[repr(C)]
993#[derive(Debug, Copy, Clone)]
994pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_2 {
995 pub bpf_cookie: __u64,
996}
997#[repr(C)]
998#[derive(Debug, Copy, Clone)]
999pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_3 {
1000 pub flags: __u32,
1001 pub cnt: __u32,
1002 pub syms: __u64,
1003 pub addrs: __u64,
1004 pub cookies: __u64,
1005}
1006#[repr(C)]
1007#[derive(Debug, Copy, Clone)]
1008pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_4 {
1009 pub target_btf_id: __u32,
1010 pub cookie: __u64,
1011}
1012#[repr(C)]
1013#[derive(Debug, Copy, Clone)]
1014pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_5 {
1015 pub pf: __u32,
1016 pub hooknum: __u32,
1017 pub priority: __s32,
1018 pub flags: __u32,
1019}
1020#[repr(C)]
1021#[derive(Copy, Clone)]
1022pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6 {
1023 pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6__bindgen_ty_1,
1024 pub expected_revision: __u64,
1025}
1026#[repr(C)]
1027#[derive(Copy, Clone)]
1028pub union bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_6__bindgen_ty_1 {
1029 pub relative_fd: __u32,
1030 pub relative_id: __u32,
1031}
1032#[repr(C)]
1033#[derive(Debug, Copy, Clone)]
1034pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_7 {
1035 pub path: __u64,
1036 pub offsets: __u64,
1037 pub ref_ctr_offsets: __u64,
1038 pub cookies: __u64,
1039 pub cnt: __u32,
1040 pub flags: __u32,
1041 pub pid: __u32,
1042}
1043#[repr(C)]
1044#[derive(Copy, Clone)]
1045pub struct bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8 {
1046 pub __bindgen_anon_1: bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8__bindgen_ty_1,
1047 pub expected_revision: __u64,
1048}
1049#[repr(C)]
1050#[derive(Copy, Clone)]
1051pub union bpf_attr__bindgen_ty_14__bindgen_ty_3__bindgen_ty_8__bindgen_ty_1 {
1052 pub relative_fd: __u32,
1053 pub relative_id: __u32,
1054}
1055#[repr(C)]
1056#[derive(Copy, Clone)]
1057pub struct bpf_attr__bindgen_ty_15 {
1058 pub link_fd: __u32,
1059 pub __bindgen_anon_1: bpf_attr__bindgen_ty_15__bindgen_ty_1,
1060 pub flags: __u32,
1061 pub __bindgen_anon_2: bpf_attr__bindgen_ty_15__bindgen_ty_2,
1062}
1063#[repr(C)]
1064#[derive(Copy, Clone)]
1065pub union bpf_attr__bindgen_ty_15__bindgen_ty_1 {
1066 pub new_prog_fd: __u32,
1067 pub new_map_fd: __u32,
1068}
1069#[repr(C)]
1070#[derive(Copy, Clone)]
1071pub union bpf_attr__bindgen_ty_15__bindgen_ty_2 {
1072 pub old_prog_fd: __u32,
1073 pub old_map_fd: __u32,
1074}
1075#[repr(C)]
1076#[derive(Debug, Copy, Clone)]
1077pub struct bpf_attr__bindgen_ty_16 {
1078 pub link_fd: __u32,
1079}
1080#[repr(C)]
1081#[derive(Debug, Copy, Clone)]
1082pub struct bpf_attr__bindgen_ty_17 {
1083 pub type_: __u32,
1084}
1085#[repr(C)]
1086#[derive(Debug, Copy, Clone)]
1087pub struct bpf_attr__bindgen_ty_18 {
1088 pub link_fd: __u32,
1089 pub flags: __u32,
1090}
1091#[repr(C)]
1092#[derive(Debug, Copy, Clone)]
1093pub struct bpf_attr__bindgen_ty_19 {
1094 pub prog_fd: __u32,
1095 pub map_fd: __u32,
1096 pub flags: __u32,
1097}
1098#[repr(C)]
1099#[derive(Debug, Copy, Clone)]
1100pub struct bpf_attr__bindgen_ty_20 {
1101 pub flags: __u32,
1102 pub bpffs_fd: __u32,
1103}
1104#[repr(u32)]
1105#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1106pub enum bpf_func_id {
1107 BPF_FUNC_unspec = 0,
1108 BPF_FUNC_map_lookup_elem = 1,
1109 BPF_FUNC_map_update_elem = 2,
1110 BPF_FUNC_map_delete_elem = 3,
1111 BPF_FUNC_probe_read = 4,
1112 BPF_FUNC_ktime_get_ns = 5,
1113 BPF_FUNC_trace_printk = 6,
1114 BPF_FUNC_get_prandom_u32 = 7,
1115 BPF_FUNC_get_smp_processor_id = 8,
1116 BPF_FUNC_skb_store_bytes = 9,
1117 BPF_FUNC_l3_csum_replace = 10,
1118 BPF_FUNC_l4_csum_replace = 11,
1119 BPF_FUNC_tail_call = 12,
1120 BPF_FUNC_clone_redirect = 13,
1121 BPF_FUNC_get_current_pid_tgid = 14,
1122 BPF_FUNC_get_current_uid_gid = 15,
1123 BPF_FUNC_get_current_comm = 16,
1124 BPF_FUNC_get_cgroup_classid = 17,
1125 BPF_FUNC_skb_vlan_push = 18,
1126 BPF_FUNC_skb_vlan_pop = 19,
1127 BPF_FUNC_skb_get_tunnel_key = 20,
1128 BPF_FUNC_skb_set_tunnel_key = 21,
1129 BPF_FUNC_perf_event_read = 22,
1130 BPF_FUNC_redirect = 23,
1131 BPF_FUNC_get_route_realm = 24,
1132 BPF_FUNC_perf_event_output = 25,
1133 BPF_FUNC_skb_load_bytes = 26,
1134 BPF_FUNC_get_stackid = 27,
1135 BPF_FUNC_csum_diff = 28,
1136 BPF_FUNC_skb_get_tunnel_opt = 29,
1137 BPF_FUNC_skb_set_tunnel_opt = 30,
1138 BPF_FUNC_skb_change_proto = 31,
1139 BPF_FUNC_skb_change_type = 32,
1140 BPF_FUNC_skb_under_cgroup = 33,
1141 BPF_FUNC_get_hash_recalc = 34,
1142 BPF_FUNC_get_current_task = 35,
1143 BPF_FUNC_probe_write_user = 36,
1144 BPF_FUNC_current_task_under_cgroup = 37,
1145 BPF_FUNC_skb_change_tail = 38,
1146 BPF_FUNC_skb_pull_data = 39,
1147 BPF_FUNC_csum_update = 40,
1148 BPF_FUNC_set_hash_invalid = 41,
1149 BPF_FUNC_get_numa_node_id = 42,
1150 BPF_FUNC_skb_change_head = 43,
1151 BPF_FUNC_xdp_adjust_head = 44,
1152 BPF_FUNC_probe_read_str = 45,
1153 BPF_FUNC_get_socket_cookie = 46,
1154 BPF_FUNC_get_socket_uid = 47,
1155 BPF_FUNC_set_hash = 48,
1156 BPF_FUNC_setsockopt = 49,
1157 BPF_FUNC_skb_adjust_room = 50,
1158 BPF_FUNC_redirect_map = 51,
1159 BPF_FUNC_sk_redirect_map = 52,
1160 BPF_FUNC_sock_map_update = 53,
1161 BPF_FUNC_xdp_adjust_meta = 54,
1162 BPF_FUNC_perf_event_read_value = 55,
1163 BPF_FUNC_perf_prog_read_value = 56,
1164 BPF_FUNC_getsockopt = 57,
1165 BPF_FUNC_override_return = 58,
1166 BPF_FUNC_sock_ops_cb_flags_set = 59,
1167 BPF_FUNC_msg_redirect_map = 60,
1168 BPF_FUNC_msg_apply_bytes = 61,
1169 BPF_FUNC_msg_cork_bytes = 62,
1170 BPF_FUNC_msg_pull_data = 63,
1171 BPF_FUNC_bind = 64,
1172 BPF_FUNC_xdp_adjust_tail = 65,
1173 BPF_FUNC_skb_get_xfrm_state = 66,
1174 BPF_FUNC_get_stack = 67,
1175 BPF_FUNC_skb_load_bytes_relative = 68,
1176 BPF_FUNC_fib_lookup = 69,
1177 BPF_FUNC_sock_hash_update = 70,
1178 BPF_FUNC_msg_redirect_hash = 71,
1179 BPF_FUNC_sk_redirect_hash = 72,
1180 BPF_FUNC_lwt_push_encap = 73,
1181 BPF_FUNC_lwt_seg6_store_bytes = 74,
1182 BPF_FUNC_lwt_seg6_adjust_srh = 75,
1183 BPF_FUNC_lwt_seg6_action = 76,
1184 BPF_FUNC_rc_repeat = 77,
1185 BPF_FUNC_rc_keydown = 78,
1186 BPF_FUNC_skb_cgroup_id = 79,
1187 BPF_FUNC_get_current_cgroup_id = 80,
1188 BPF_FUNC_get_local_storage = 81,
1189 BPF_FUNC_sk_select_reuseport = 82,
1190 BPF_FUNC_skb_ancestor_cgroup_id = 83,
1191 BPF_FUNC_sk_lookup_tcp = 84,
1192 BPF_FUNC_sk_lookup_udp = 85,
1193 BPF_FUNC_sk_release = 86,
1194 BPF_FUNC_map_push_elem = 87,
1195 BPF_FUNC_map_pop_elem = 88,
1196 BPF_FUNC_map_peek_elem = 89,
1197 BPF_FUNC_msg_push_data = 90,
1198 BPF_FUNC_msg_pop_data = 91,
1199 BPF_FUNC_rc_pointer_rel = 92,
1200 BPF_FUNC_spin_lock = 93,
1201 BPF_FUNC_spin_unlock = 94,
1202 BPF_FUNC_sk_fullsock = 95,
1203 BPF_FUNC_tcp_sock = 96,
1204 BPF_FUNC_skb_ecn_set_ce = 97,
1205 BPF_FUNC_get_listener_sock = 98,
1206 BPF_FUNC_skc_lookup_tcp = 99,
1207 BPF_FUNC_tcp_check_syncookie = 100,
1208 BPF_FUNC_sysctl_get_name = 101,
1209 BPF_FUNC_sysctl_get_current_value = 102,
1210 BPF_FUNC_sysctl_get_new_value = 103,
1211 BPF_FUNC_sysctl_set_new_value = 104,
1212 BPF_FUNC_strtol = 105,
1213 BPF_FUNC_strtoul = 106,
1214 BPF_FUNC_sk_storage_get = 107,
1215 BPF_FUNC_sk_storage_delete = 108,
1216 BPF_FUNC_send_signal = 109,
1217 BPF_FUNC_tcp_gen_syncookie = 110,
1218 BPF_FUNC_skb_output = 111,
1219 BPF_FUNC_probe_read_user = 112,
1220 BPF_FUNC_probe_read_kernel = 113,
1221 BPF_FUNC_probe_read_user_str = 114,
1222 BPF_FUNC_probe_read_kernel_str = 115,
1223 BPF_FUNC_tcp_send_ack = 116,
1224 BPF_FUNC_send_signal_thread = 117,
1225 BPF_FUNC_jiffies64 = 118,
1226 BPF_FUNC_read_branch_records = 119,
1227 BPF_FUNC_get_ns_current_pid_tgid = 120,
1228 BPF_FUNC_xdp_output = 121,
1229 BPF_FUNC_get_netns_cookie = 122,
1230 BPF_FUNC_get_current_ancestor_cgroup_id = 123,
1231 BPF_FUNC_sk_assign = 124,
1232 BPF_FUNC_ktime_get_boot_ns = 125,
1233 BPF_FUNC_seq_printf = 126,
1234 BPF_FUNC_seq_write = 127,
1235 BPF_FUNC_sk_cgroup_id = 128,
1236 BPF_FUNC_sk_ancestor_cgroup_id = 129,
1237 BPF_FUNC_ringbuf_output = 130,
1238 BPF_FUNC_ringbuf_reserve = 131,
1239 BPF_FUNC_ringbuf_submit = 132,
1240 BPF_FUNC_ringbuf_discard = 133,
1241 BPF_FUNC_ringbuf_query = 134,
1242 BPF_FUNC_csum_level = 135,
1243 BPF_FUNC_skc_to_tcp6_sock = 136,
1244 BPF_FUNC_skc_to_tcp_sock = 137,
1245 BPF_FUNC_skc_to_tcp_timewait_sock = 138,
1246 BPF_FUNC_skc_to_tcp_request_sock = 139,
1247 BPF_FUNC_skc_to_udp6_sock = 140,
1248 BPF_FUNC_get_task_stack = 141,
1249 BPF_FUNC_load_hdr_opt = 142,
1250 BPF_FUNC_store_hdr_opt = 143,
1251 BPF_FUNC_reserve_hdr_opt = 144,
1252 BPF_FUNC_inode_storage_get = 145,
1253 BPF_FUNC_inode_storage_delete = 146,
1254 BPF_FUNC_d_path = 147,
1255 BPF_FUNC_copy_from_user = 148,
1256 BPF_FUNC_snprintf_btf = 149,
1257 BPF_FUNC_seq_printf_btf = 150,
1258 BPF_FUNC_skb_cgroup_classid = 151,
1259 BPF_FUNC_redirect_neigh = 152,
1260 BPF_FUNC_per_cpu_ptr = 153,
1261 BPF_FUNC_this_cpu_ptr = 154,
1262 BPF_FUNC_redirect_peer = 155,
1263 BPF_FUNC_task_storage_get = 156,
1264 BPF_FUNC_task_storage_delete = 157,
1265 BPF_FUNC_get_current_task_btf = 158,
1266 BPF_FUNC_bprm_opts_set = 159,
1267 BPF_FUNC_ktime_get_coarse_ns = 160,
1268 BPF_FUNC_ima_inode_hash = 161,
1269 BPF_FUNC_sock_from_file = 162,
1270 BPF_FUNC_check_mtu = 163,
1271 BPF_FUNC_for_each_map_elem = 164,
1272 BPF_FUNC_snprintf = 165,
1273 BPF_FUNC_sys_bpf = 166,
1274 BPF_FUNC_btf_find_by_name_kind = 167,
1275 BPF_FUNC_sys_close = 168,
1276 BPF_FUNC_timer_init = 169,
1277 BPF_FUNC_timer_set_callback = 170,
1278 BPF_FUNC_timer_start = 171,
1279 BPF_FUNC_timer_cancel = 172,
1280 BPF_FUNC_get_func_ip = 173,
1281 BPF_FUNC_get_attach_cookie = 174,
1282 BPF_FUNC_task_pt_regs = 175,
1283 BPF_FUNC_get_branch_snapshot = 176,
1284 BPF_FUNC_trace_vprintk = 177,
1285 BPF_FUNC_skc_to_unix_sock = 178,
1286 BPF_FUNC_kallsyms_lookup_name = 179,
1287 BPF_FUNC_find_vma = 180,
1288 BPF_FUNC_loop = 181,
1289 BPF_FUNC_strncmp = 182,
1290 BPF_FUNC_get_func_arg = 183,
1291 BPF_FUNC_get_func_ret = 184,
1292 BPF_FUNC_get_func_arg_cnt = 185,
1293 BPF_FUNC_get_retval = 186,
1294 BPF_FUNC_set_retval = 187,
1295 BPF_FUNC_xdp_get_buff_len = 188,
1296 BPF_FUNC_xdp_load_bytes = 189,
1297 BPF_FUNC_xdp_store_bytes = 190,
1298 BPF_FUNC_copy_from_user_task = 191,
1299 BPF_FUNC_skb_set_tstamp = 192,
1300 BPF_FUNC_ima_file_hash = 193,
1301 BPF_FUNC_kptr_xchg = 194,
1302 BPF_FUNC_map_lookup_percpu_elem = 195,
1303 BPF_FUNC_skc_to_mptcp_sock = 196,
1304 BPF_FUNC_dynptr_from_mem = 197,
1305 BPF_FUNC_ringbuf_reserve_dynptr = 198,
1306 BPF_FUNC_ringbuf_submit_dynptr = 199,
1307 BPF_FUNC_ringbuf_discard_dynptr = 200,
1308 BPF_FUNC_dynptr_read = 201,
1309 BPF_FUNC_dynptr_write = 202,
1310 BPF_FUNC_dynptr_data = 203,
1311 BPF_FUNC_tcp_raw_gen_syncookie_ipv4 = 204,
1312 BPF_FUNC_tcp_raw_gen_syncookie_ipv6 = 205,
1313 BPF_FUNC_tcp_raw_check_syncookie_ipv4 = 206,
1314 BPF_FUNC_tcp_raw_check_syncookie_ipv6 = 207,
1315 BPF_FUNC_ktime_get_tai_ns = 208,
1316 BPF_FUNC_user_ringbuf_drain = 209,
1317 BPF_FUNC_cgrp_storage_get = 210,
1318 BPF_FUNC_cgrp_storage_delete = 211,
1319 __BPF_FUNC_MAX_ID = 212,
1320}
1321pub const BPF_F_RECOMPUTE_CSUM: _bindgen_ty_6 = 1;
1322pub const BPF_F_INVALIDATE_HASH: _bindgen_ty_6 = 2;
1323pub type _bindgen_ty_6 = ::core::ffi::c_uint;
1324pub const BPF_F_HDR_FIELD_MASK: _bindgen_ty_7 = 15;
1325pub type _bindgen_ty_7 = ::core::ffi::c_uint;
1326pub const BPF_F_PSEUDO_HDR: _bindgen_ty_8 = 16;
1327pub const BPF_F_MARK_MANGLED_0: _bindgen_ty_8 = 32;
1328pub const BPF_F_MARK_ENFORCE: _bindgen_ty_8 = 64;
1329pub type _bindgen_ty_8 = ::core::ffi::c_uint;
1330pub const BPF_F_INGRESS: _bindgen_ty_9 = 1;
1331pub type _bindgen_ty_9 = ::core::ffi::c_uint;
1332pub const BPF_F_TUNINFO_IPV6: _bindgen_ty_10 = 1;
1333pub type _bindgen_ty_10 = ::core::ffi::c_uint;
1334pub const BPF_F_SKIP_FIELD_MASK: _bindgen_ty_11 = 255;
1335pub const BPF_F_USER_STACK: _bindgen_ty_11 = 256;
1336pub const BPF_F_FAST_STACK_CMP: _bindgen_ty_11 = 512;
1337pub const BPF_F_REUSE_STACKID: _bindgen_ty_11 = 1024;
1338pub const BPF_F_USER_BUILD_ID: _bindgen_ty_11 = 2048;
1339pub type _bindgen_ty_11 = ::core::ffi::c_uint;
1340pub const BPF_F_ZERO_CSUM_TX: _bindgen_ty_12 = 2;
1341pub const BPF_F_DONT_FRAGMENT: _bindgen_ty_12 = 4;
1342pub const BPF_F_SEQ_NUMBER: _bindgen_ty_12 = 8;
1343pub const BPF_F_NO_TUNNEL_KEY: _bindgen_ty_12 = 16;
1344pub type _bindgen_ty_12 = ::core::ffi::c_uint;
1345pub const BPF_F_TUNINFO_FLAGS: _bindgen_ty_13 = 16;
1346pub type _bindgen_ty_13 = ::core::ffi::c_uint;
1347pub const BPF_F_INDEX_MASK: _bindgen_ty_14 = 4294967295;
1348pub const BPF_F_CURRENT_CPU: _bindgen_ty_14 = 4294967295;
1349pub const BPF_F_CTXLEN_MASK: _bindgen_ty_14 = 4503595332403200;
1350pub type _bindgen_ty_14 = ::core::ffi::c_ulong;
1351pub const BPF_F_CURRENT_NETNS: _bindgen_ty_15 = -1;
1352pub type _bindgen_ty_15 = ::core::ffi::c_int;
1353pub const BPF_CSUM_LEVEL_QUERY: _bindgen_ty_16 = _bindgen_ty_16::BPF_CSUM_LEVEL_QUERY;
1354pub const BPF_CSUM_LEVEL_INC: _bindgen_ty_16 = _bindgen_ty_16::BPF_CSUM_LEVEL_INC;
1355pub const BPF_CSUM_LEVEL_DEC: _bindgen_ty_16 = _bindgen_ty_16::BPF_CSUM_LEVEL_DEC;
1356pub const BPF_CSUM_LEVEL_RESET: _bindgen_ty_16 = _bindgen_ty_16::BPF_CSUM_LEVEL_RESET;
1357#[repr(u32)]
1358#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1359pub enum _bindgen_ty_16 {
1360 BPF_CSUM_LEVEL_QUERY = 0,
1361 BPF_CSUM_LEVEL_INC = 1,
1362 BPF_CSUM_LEVEL_DEC = 2,
1363 BPF_CSUM_LEVEL_RESET = 3,
1364}
1365pub const BPF_F_ADJ_ROOM_FIXED_GSO: _bindgen_ty_17 = 1;
1366pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV4: _bindgen_ty_17 = 2;
1367pub const BPF_F_ADJ_ROOM_ENCAP_L3_IPV6: _bindgen_ty_17 = 4;
1368pub const BPF_F_ADJ_ROOM_ENCAP_L4_GRE: _bindgen_ty_17 = 8;
1369pub const BPF_F_ADJ_ROOM_ENCAP_L4_UDP: _bindgen_ty_17 = 16;
1370pub const BPF_F_ADJ_ROOM_NO_CSUM_RESET: _bindgen_ty_17 = 32;
1371pub const BPF_F_ADJ_ROOM_ENCAP_L2_ETH: _bindgen_ty_17 = 64;
1372pub const BPF_F_ADJ_ROOM_DECAP_L3_IPV4: _bindgen_ty_17 = 128;
1373pub const BPF_F_ADJ_ROOM_DECAP_L3_IPV6: _bindgen_ty_17 = 256;
1374pub type _bindgen_ty_17 = ::core::ffi::c_uint;
1375pub const BPF_ADJ_ROOM_ENCAP_L2_MASK: _bindgen_ty_18 = _bindgen_ty_18::BPF_ADJ_ROOM_ENCAP_L2_MASK;
1376pub const BPF_ADJ_ROOM_ENCAP_L2_SHIFT: _bindgen_ty_18 = _bindgen_ty_18::BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
1377#[repr(u32)]
1378#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1379pub enum _bindgen_ty_18 {
1380 BPF_ADJ_ROOM_ENCAP_L2_MASK = 255,
1381 BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 56,
1382}
1383pub const BPF_F_SYSCTL_BASE_NAME: _bindgen_ty_19 = 1;
1384pub type _bindgen_ty_19 = ::core::ffi::c_uint;
1385pub const BPF_LOCAL_STORAGE_GET_F_CREATE: _bindgen_ty_20 =
1386 _bindgen_ty_20::BPF_LOCAL_STORAGE_GET_F_CREATE;
1387pub const BPF_SK_STORAGE_GET_F_CREATE: _bindgen_ty_20 =
1388 _bindgen_ty_20::BPF_LOCAL_STORAGE_GET_F_CREATE;
1389#[repr(u32)]
1390#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1391pub enum _bindgen_ty_20 {
1392 BPF_LOCAL_STORAGE_GET_F_CREATE = 1,
1393}
1394pub const BPF_F_GET_BRANCH_RECORDS_SIZE: _bindgen_ty_21 = 1;
1395pub type _bindgen_ty_21 = ::core::ffi::c_uint;
1396pub const BPF_RB_NO_WAKEUP: _bindgen_ty_22 = _bindgen_ty_22::BPF_RB_NO_WAKEUP;
1397pub const BPF_RB_FORCE_WAKEUP: _bindgen_ty_22 = _bindgen_ty_22::BPF_RB_FORCE_WAKEUP;
1398#[repr(u32)]
1399#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1400pub enum _bindgen_ty_22 {
1401 BPF_RB_NO_WAKEUP = 1,
1402 BPF_RB_FORCE_WAKEUP = 2,
1403}
1404pub const BPF_RB_AVAIL_DATA: _bindgen_ty_23 = _bindgen_ty_23::BPF_RB_AVAIL_DATA;
1405pub const BPF_RB_RING_SIZE: _bindgen_ty_23 = _bindgen_ty_23::BPF_RB_RING_SIZE;
1406pub const BPF_RB_CONS_POS: _bindgen_ty_23 = _bindgen_ty_23::BPF_RB_CONS_POS;
1407pub const BPF_RB_PROD_POS: _bindgen_ty_23 = _bindgen_ty_23::BPF_RB_PROD_POS;
1408#[repr(u32)]
1409#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1410pub enum _bindgen_ty_23 {
1411 BPF_RB_AVAIL_DATA = 0,
1412 BPF_RB_RING_SIZE = 1,
1413 BPF_RB_CONS_POS = 2,
1414 BPF_RB_PROD_POS = 3,
1415}
1416pub const BPF_RINGBUF_BUSY_BIT: _bindgen_ty_24 = 2147483648;
1417pub const BPF_RINGBUF_DISCARD_BIT: _bindgen_ty_24 = 1073741824;
1418pub const BPF_RINGBUF_HDR_SZ: _bindgen_ty_24 = 8;
1419pub type _bindgen_ty_24 = ::core::ffi::c_uint;
1420pub const BPF_SK_LOOKUP_F_REPLACE: _bindgen_ty_25 = _bindgen_ty_25::BPF_SK_LOOKUP_F_REPLACE;
1421pub const BPF_SK_LOOKUP_F_NO_REUSEPORT: _bindgen_ty_25 =
1422 _bindgen_ty_25::BPF_SK_LOOKUP_F_NO_REUSEPORT;
1423#[repr(u32)]
1424#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1425pub enum _bindgen_ty_25 {
1426 BPF_SK_LOOKUP_F_REPLACE = 1,
1427 BPF_SK_LOOKUP_F_NO_REUSEPORT = 2,
1428}
1429pub const BPF_F_BPRM_SECUREEXEC: _bindgen_ty_26 = 1;
1430pub type _bindgen_ty_26 = ::core::ffi::c_uint;
1431pub const BPF_F_BROADCAST: _bindgen_ty_27 = 8;
1432pub const BPF_F_EXCLUDE_INGRESS: _bindgen_ty_27 = 16;
1433pub type _bindgen_ty_27 = ::core::ffi::c_uint;
1434pub const BPF_SKB_TSTAMP_UNSPEC: _bindgen_ty_28 = _bindgen_ty_28::BPF_SKB_TSTAMP_UNSPEC;
1435pub const BPF_SKB_TSTAMP_DELIVERY_MONO: _bindgen_ty_28 =
1436 _bindgen_ty_28::BPF_SKB_TSTAMP_DELIVERY_MONO;
1437#[repr(u32)]
1438#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1439pub enum _bindgen_ty_28 {
1440 BPF_SKB_TSTAMP_UNSPEC = 0,
1441 BPF_SKB_TSTAMP_DELIVERY_MONO = 1,
1442}
1443#[repr(C)]
1444#[derive(Copy, Clone)]
1445pub struct bpf_devmap_val {
1446 pub ifindex: __u32,
1447 pub bpf_prog: bpf_devmap_val__bindgen_ty_1,
1448}
1449#[repr(C)]
1450#[derive(Copy, Clone)]
1451pub union bpf_devmap_val__bindgen_ty_1 {
1452 pub fd: ::core::ffi::c_int,
1453 pub id: __u32,
1454}
1455#[repr(C)]
1456#[derive(Copy, Clone)]
1457pub struct bpf_cpumap_val {
1458 pub qsize: __u32,
1459 pub bpf_prog: bpf_cpumap_val__bindgen_ty_1,
1460}
1461#[repr(C)]
1462#[derive(Copy, Clone)]
1463pub union bpf_cpumap_val__bindgen_ty_1 {
1464 pub fd: ::core::ffi::c_int,
1465 pub id: __u32,
1466}
1467#[repr(C)]
1468#[derive(Debug, Copy, Clone)]
1469pub struct bpf_prog_info {
1470 pub type_: __u32,
1471 pub id: __u32,
1472 pub tag: [__u8; 8usize],
1473 pub jited_prog_len: __u32,
1474 pub xlated_prog_len: __u32,
1475 pub jited_prog_insns: __u64,
1476 pub xlated_prog_insns: __u64,
1477 pub load_time: __u64,
1478 pub created_by_uid: __u32,
1479 pub nr_map_ids: __u32,
1480 pub map_ids: __u64,
1481 pub name: [::core::ffi::c_char; 16usize],
1482 pub ifindex: __u32,
1483 pub _bitfield_align_1: [u8; 0],
1484 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>,
1485 pub netns_dev: __u64,
1486 pub netns_ino: __u64,
1487 pub nr_jited_ksyms: __u32,
1488 pub nr_jited_func_lens: __u32,
1489 pub jited_ksyms: __u64,
1490 pub jited_func_lens: __u64,
1491 pub btf_id: __u32,
1492 pub func_info_rec_size: __u32,
1493 pub func_info: __u64,
1494 pub nr_func_info: __u32,
1495 pub nr_line_info: __u32,
1496 pub line_info: __u64,
1497 pub jited_line_info: __u64,
1498 pub nr_jited_line_info: __u32,
1499 pub line_info_rec_size: __u32,
1500 pub jited_line_info_rec_size: __u32,
1501 pub nr_prog_tags: __u32,
1502 pub prog_tags: __u64,
1503 pub run_time_ns: __u64,
1504 pub run_cnt: __u64,
1505 pub recursion_misses: __u64,
1506 pub verified_insns: __u32,
1507 pub attach_btf_obj_id: __u32,
1508 pub attach_btf_id: __u32,
1509}
1510impl bpf_prog_info {
1511 #[inline]
1512 pub fn gpl_compatible(&self) -> __u32 {
1513 unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) }
1514 }
1515 #[inline]
1516 pub fn set_gpl_compatible(&mut self, val: __u32) {
1517 unsafe {
1518 let val: u32 = ::core::mem::transmute(val);
1519 self._bitfield_1.set(0usize, 1u8, val as u64)
1520 }
1521 }
1522 #[inline]
1523 pub unsafe fn gpl_compatible_raw(this: *const Self) -> __u32 {
1524 unsafe {
1525 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 4usize]>>::raw_get(
1526 ::core::ptr::addr_of!((*this)._bitfield_1),
1527 0usize,
1528 1u8,
1529 ) as u32)
1530 }
1531 }
1532 #[inline]
1533 pub unsafe fn set_gpl_compatible_raw(this: *mut Self, val: __u32) {
1534 unsafe {
1535 let val: u32 = ::core::mem::transmute(val);
1536 <__BindgenBitfieldUnit<[u8; 4usize]>>::raw_set(
1537 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
1538 0usize,
1539 1u8,
1540 val as u64,
1541 )
1542 }
1543 }
1544 #[inline]
1545 pub fn new_bitfield_1(gpl_compatible: __u32) -> __BindgenBitfieldUnit<[u8; 4usize]> {
1546 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
1547 __bindgen_bitfield_unit.set(0usize, 1u8, {
1548 let gpl_compatible: u32 = unsafe { ::core::mem::transmute(gpl_compatible) };
1549 gpl_compatible as u64
1550 });
1551 __bindgen_bitfield_unit
1552 }
1553}
1554#[repr(C)]
1555#[derive(Debug, Copy, Clone)]
1556pub struct bpf_map_info {
1557 pub type_: __u32,
1558 pub id: __u32,
1559 pub key_size: __u32,
1560 pub value_size: __u32,
1561 pub max_entries: __u32,
1562 pub map_flags: __u32,
1563 pub name: [::core::ffi::c_char; 16usize],
1564 pub ifindex: __u32,
1565 pub btf_vmlinux_value_type_id: __u32,
1566 pub netns_dev: __u64,
1567 pub netns_ino: __u64,
1568 pub btf_id: __u32,
1569 pub btf_key_type_id: __u32,
1570 pub btf_value_type_id: __u32,
1571 pub btf_vmlinux_id: __u32,
1572 pub map_extra: __u64,
1573}
1574#[repr(C)]
1575#[derive(Debug, Copy, Clone)]
1576pub struct bpf_btf_info {
1577 pub btf: __u64,
1578 pub btf_size: __u32,
1579 pub id: __u32,
1580 pub name: __u64,
1581 pub name_len: __u32,
1582 pub kernel_btf: __u32,
1583}
1584#[repr(C)]
1585#[derive(Copy, Clone)]
1586pub struct bpf_link_info {
1587 pub type_: __u32,
1588 pub id: __u32,
1589 pub prog_id: __u32,
1590 pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1,
1591}
1592#[repr(C)]
1593#[derive(Copy, Clone)]
1594pub union bpf_link_info__bindgen_ty_1 {
1595 pub raw_tracepoint: bpf_link_info__bindgen_ty_1__bindgen_ty_1,
1596 pub tracing: bpf_link_info__bindgen_ty_1__bindgen_ty_2,
1597 pub cgroup: bpf_link_info__bindgen_ty_1__bindgen_ty_3,
1598 pub iter: bpf_link_info__bindgen_ty_1__bindgen_ty_4,
1599 pub netns: bpf_link_info__bindgen_ty_1__bindgen_ty_5,
1600 pub xdp: bpf_link_info__bindgen_ty_1__bindgen_ty_6,
1601 pub struct_ops: bpf_link_info__bindgen_ty_1__bindgen_ty_7,
1602 pub netfilter: bpf_link_info__bindgen_ty_1__bindgen_ty_8,
1603 pub kprobe_multi: bpf_link_info__bindgen_ty_1__bindgen_ty_9,
1604 pub uprobe_multi: bpf_link_info__bindgen_ty_1__bindgen_ty_10,
1605 pub perf_event: bpf_link_info__bindgen_ty_1__bindgen_ty_11,
1606 pub tcx: bpf_link_info__bindgen_ty_1__bindgen_ty_12,
1607 pub netkit: bpf_link_info__bindgen_ty_1__bindgen_ty_13,
1608}
1609#[repr(C)]
1610#[derive(Debug, Copy, Clone)]
1611pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_1 {
1612 pub tp_name: __u64,
1613 pub tp_name_len: __u32,
1614}
1615#[repr(C)]
1616#[derive(Debug, Copy, Clone)]
1617pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_2 {
1618 pub attach_type: __u32,
1619 pub target_obj_id: __u32,
1620 pub target_btf_id: __u32,
1621}
1622#[repr(C)]
1623#[derive(Debug, Copy, Clone)]
1624pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_3 {
1625 pub cgroup_id: __u64,
1626 pub attach_type: __u32,
1627}
1628#[repr(C)]
1629#[derive(Copy, Clone)]
1630pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4 {
1631 pub target_name: __u64,
1632 pub target_name_len: __u32,
1633 pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1,
1634 pub __bindgen_anon_2: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2,
1635}
1636#[repr(C)]
1637#[derive(Copy, Clone)]
1638pub union bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1 {
1639 pub map: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1,
1640}
1641#[repr(C)]
1642#[derive(Debug, Copy, Clone)]
1643pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_1__bindgen_ty_1 {
1644 pub map_id: __u32,
1645}
1646#[repr(C)]
1647#[derive(Copy, Clone)]
1648pub union bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2 {
1649 pub cgroup: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_1,
1650 pub task: bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_2,
1651}
1652#[repr(C)]
1653#[derive(Debug, Copy, Clone)]
1654pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_1 {
1655 pub cgroup_id: __u64,
1656 pub order: __u32,
1657}
1658#[repr(C)]
1659#[derive(Debug, Copy, Clone)]
1660pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_4__bindgen_ty_2__bindgen_ty_2 {
1661 pub tid: __u32,
1662 pub pid: __u32,
1663}
1664#[repr(C)]
1665#[derive(Debug, Copy, Clone)]
1666pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_5 {
1667 pub netns_ino: __u32,
1668 pub attach_type: __u32,
1669}
1670#[repr(C)]
1671#[derive(Debug, Copy, Clone)]
1672pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_6 {
1673 pub ifindex: __u32,
1674}
1675#[repr(C)]
1676#[derive(Debug, Copy, Clone)]
1677pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_7 {
1678 pub map_id: __u32,
1679}
1680#[repr(C)]
1681#[derive(Debug, Copy, Clone)]
1682pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_8 {
1683 pub pf: __u32,
1684 pub hooknum: __u32,
1685 pub priority: __s32,
1686 pub flags: __u32,
1687}
1688#[repr(C)]
1689#[derive(Debug, Copy, Clone)]
1690pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_9 {
1691 pub addrs: __u64,
1692 pub count: __u32,
1693 pub flags: __u32,
1694 pub missed: __u64,
1695 pub cookies: __u64,
1696}
1697#[repr(C)]
1698#[derive(Debug, Copy, Clone)]
1699pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_10 {
1700 pub path: __u64,
1701 pub offsets: __u64,
1702 pub ref_ctr_offsets: __u64,
1703 pub cookies: __u64,
1704 pub path_size: __u32,
1705 pub count: __u32,
1706 pub flags: __u32,
1707 pub pid: __u32,
1708}
1709#[repr(C)]
1710#[derive(Copy, Clone)]
1711pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11 {
1712 pub type_: __u32,
1713 pub _bitfield_align_1: [u8; 0],
1714 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>,
1715 pub __bindgen_anon_1: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1,
1716}
1717#[repr(C)]
1718#[derive(Copy, Clone)]
1719pub union bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1 {
1720 pub uprobe: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_1,
1721 pub kprobe: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_2,
1722 pub tracepoint: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3,
1723 pub event: bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4,
1724}
1725#[repr(C)]
1726#[derive(Debug, Copy, Clone)]
1727pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_1 {
1728 pub file_name: __u64,
1729 pub name_len: __u32,
1730 pub offset: __u32,
1731 pub cookie: __u64,
1732}
1733#[repr(C)]
1734#[derive(Debug, Copy, Clone)]
1735pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_2 {
1736 pub func_name: __u64,
1737 pub name_len: __u32,
1738 pub offset: __u32,
1739 pub addr: __u64,
1740 pub missed: __u64,
1741 pub cookie: __u64,
1742}
1743#[repr(C)]
1744#[derive(Debug, Copy, Clone)]
1745pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3 {
1746 pub tp_name: __u64,
1747 pub name_len: __u32,
1748 pub _bitfield_align_1: [u8; 0],
1749 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>,
1750 pub cookie: __u64,
1751}
1752impl bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_3 {
1753 #[inline]
1754 pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> {
1755 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
1756 __bindgen_bitfield_unit
1757 }
1758}
1759#[repr(C)]
1760#[derive(Debug, Copy, Clone)]
1761pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4 {
1762 pub config: __u64,
1763 pub type_: __u32,
1764 pub _bitfield_align_1: [u8; 0],
1765 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>,
1766 pub cookie: __u64,
1767}
1768impl bpf_link_info__bindgen_ty_1__bindgen_ty_11__bindgen_ty_1__bindgen_ty_4 {
1769 #[inline]
1770 pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> {
1771 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
1772 __bindgen_bitfield_unit
1773 }
1774}
1775impl bpf_link_info__bindgen_ty_1__bindgen_ty_11 {
1776 #[inline]
1777 pub fn new_bitfield_1() -> __BindgenBitfieldUnit<[u8; 4usize]> {
1778 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default();
1779 __bindgen_bitfield_unit
1780 }
1781}
1782#[repr(C)]
1783#[derive(Debug, Copy, Clone)]
1784pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_12 {
1785 pub ifindex: __u32,
1786 pub attach_type: __u32,
1787}
1788#[repr(C)]
1789#[derive(Debug, Copy, Clone)]
1790pub struct bpf_link_info__bindgen_ty_1__bindgen_ty_13 {
1791 pub ifindex: __u32,
1792 pub attach_type: __u32,
1793}
1794pub const BPF_SOCK_OPS_RTO_CB_FLAG: _bindgen_ty_29 = _bindgen_ty_29::BPF_SOCK_OPS_RTO_CB_FLAG;
1795pub const BPF_SOCK_OPS_RETRANS_CB_FLAG: _bindgen_ty_29 =
1796 _bindgen_ty_29::BPF_SOCK_OPS_RETRANS_CB_FLAG;
1797pub const BPF_SOCK_OPS_STATE_CB_FLAG: _bindgen_ty_29 = _bindgen_ty_29::BPF_SOCK_OPS_STATE_CB_FLAG;
1798pub const BPF_SOCK_OPS_RTT_CB_FLAG: _bindgen_ty_29 = _bindgen_ty_29::BPF_SOCK_OPS_RTT_CB_FLAG;
1799pub const BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG: _bindgen_ty_29 =
1800 _bindgen_ty_29::BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG;
1801pub const BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG: _bindgen_ty_29 =
1802 _bindgen_ty_29::BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG;
1803pub const BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG: _bindgen_ty_29 =
1804 _bindgen_ty_29::BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG;
1805pub const BPF_SOCK_OPS_ALL_CB_FLAGS: _bindgen_ty_29 = _bindgen_ty_29::BPF_SOCK_OPS_ALL_CB_FLAGS;
1806#[repr(u32)]
1807#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1808pub enum _bindgen_ty_29 {
1809 BPF_SOCK_OPS_RTO_CB_FLAG = 1,
1810 BPF_SOCK_OPS_RETRANS_CB_FLAG = 2,
1811 BPF_SOCK_OPS_STATE_CB_FLAG = 4,
1812 BPF_SOCK_OPS_RTT_CB_FLAG = 8,
1813 BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 16,
1814 BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 32,
1815 BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 64,
1816 BPF_SOCK_OPS_ALL_CB_FLAGS = 127,
1817}
1818pub const BPF_SOCK_OPS_VOID: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_VOID;
1819pub const BPF_SOCK_OPS_TIMEOUT_INIT: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_TIMEOUT_INIT;
1820pub const BPF_SOCK_OPS_RWND_INIT: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_RWND_INIT;
1821pub const BPF_SOCK_OPS_TCP_CONNECT_CB: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_TCP_CONNECT_CB;
1822pub const BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: _bindgen_ty_30 =
1823 _bindgen_ty_30::BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
1824pub const BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: _bindgen_ty_30 =
1825 _bindgen_ty_30::BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB;
1826pub const BPF_SOCK_OPS_NEEDS_ECN: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_NEEDS_ECN;
1827pub const BPF_SOCK_OPS_BASE_RTT: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_BASE_RTT;
1828pub const BPF_SOCK_OPS_RTO_CB: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_RTO_CB;
1829pub const BPF_SOCK_OPS_RETRANS_CB: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_RETRANS_CB;
1830pub const BPF_SOCK_OPS_STATE_CB: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_STATE_CB;
1831pub const BPF_SOCK_OPS_TCP_LISTEN_CB: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_TCP_LISTEN_CB;
1832pub const BPF_SOCK_OPS_RTT_CB: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_RTT_CB;
1833pub const BPF_SOCK_OPS_PARSE_HDR_OPT_CB: _bindgen_ty_30 =
1834 _bindgen_ty_30::BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
1835pub const BPF_SOCK_OPS_HDR_OPT_LEN_CB: _bindgen_ty_30 = _bindgen_ty_30::BPF_SOCK_OPS_HDR_OPT_LEN_CB;
1836pub const BPF_SOCK_OPS_WRITE_HDR_OPT_CB: _bindgen_ty_30 =
1837 _bindgen_ty_30::BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
1838#[repr(u32)]
1839#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1840pub enum _bindgen_ty_30 {
1841 BPF_SOCK_OPS_VOID = 0,
1842 BPF_SOCK_OPS_TIMEOUT_INIT = 1,
1843 BPF_SOCK_OPS_RWND_INIT = 2,
1844 BPF_SOCK_OPS_TCP_CONNECT_CB = 3,
1845 BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 4,
1846 BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 5,
1847 BPF_SOCK_OPS_NEEDS_ECN = 6,
1848 BPF_SOCK_OPS_BASE_RTT = 7,
1849 BPF_SOCK_OPS_RTO_CB = 8,
1850 BPF_SOCK_OPS_RETRANS_CB = 9,
1851 BPF_SOCK_OPS_STATE_CB = 10,
1852 BPF_SOCK_OPS_TCP_LISTEN_CB = 11,
1853 BPF_SOCK_OPS_RTT_CB = 12,
1854 BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 13,
1855 BPF_SOCK_OPS_HDR_OPT_LEN_CB = 14,
1856 BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 15,
1857}
1858pub const BPF_TCP_ESTABLISHED: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_ESTABLISHED;
1859pub const BPF_TCP_SYN_SENT: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_SYN_SENT;
1860pub const BPF_TCP_SYN_RECV: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_SYN_RECV;
1861pub const BPF_TCP_FIN_WAIT1: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_FIN_WAIT1;
1862pub const BPF_TCP_FIN_WAIT2: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_FIN_WAIT2;
1863pub const BPF_TCP_TIME_WAIT: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_TIME_WAIT;
1864pub const BPF_TCP_CLOSE: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_CLOSE;
1865pub const BPF_TCP_CLOSE_WAIT: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_CLOSE_WAIT;
1866pub const BPF_TCP_LAST_ACK: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_LAST_ACK;
1867pub const BPF_TCP_LISTEN: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_LISTEN;
1868pub const BPF_TCP_CLOSING: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_CLOSING;
1869pub const BPF_TCP_NEW_SYN_RECV: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_NEW_SYN_RECV;
1870pub const BPF_TCP_BOUND_INACTIVE: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_BOUND_INACTIVE;
1871pub const BPF_TCP_MAX_STATES: _bindgen_ty_31 = _bindgen_ty_31::BPF_TCP_MAX_STATES;
1872#[repr(u32)]
1873#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1874pub enum _bindgen_ty_31 {
1875 BPF_TCP_ESTABLISHED = 1,
1876 BPF_TCP_SYN_SENT = 2,
1877 BPF_TCP_SYN_RECV = 3,
1878 BPF_TCP_FIN_WAIT1 = 4,
1879 BPF_TCP_FIN_WAIT2 = 5,
1880 BPF_TCP_TIME_WAIT = 6,
1881 BPF_TCP_CLOSE = 7,
1882 BPF_TCP_CLOSE_WAIT = 8,
1883 BPF_TCP_LAST_ACK = 9,
1884 BPF_TCP_LISTEN = 10,
1885 BPF_TCP_CLOSING = 11,
1886 BPF_TCP_NEW_SYN_RECV = 12,
1887 BPF_TCP_BOUND_INACTIVE = 13,
1888 BPF_TCP_MAX_STATES = 14,
1889}
1890pub const BPF_LOAD_HDR_OPT_TCP_SYN: _bindgen_ty_33 = _bindgen_ty_33::BPF_LOAD_HDR_OPT_TCP_SYN;
1891#[repr(u32)]
1892#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1893pub enum _bindgen_ty_33 {
1894 BPF_LOAD_HDR_OPT_TCP_SYN = 1,
1895}
1896pub const BPF_WRITE_HDR_TCP_CURRENT_MSS: _bindgen_ty_34 =
1897 _bindgen_ty_34::BPF_WRITE_HDR_TCP_CURRENT_MSS;
1898pub const BPF_WRITE_HDR_TCP_SYNACK_COOKIE: _bindgen_ty_34 =
1899 _bindgen_ty_34::BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
1900#[repr(u32)]
1901#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1902pub enum _bindgen_ty_34 {
1903 BPF_WRITE_HDR_TCP_CURRENT_MSS = 1,
1904 BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2,
1905}
1906pub const BPF_DEVCG_ACC_MKNOD: _bindgen_ty_35 = _bindgen_ty_35::BPF_DEVCG_ACC_MKNOD;
1907pub const BPF_DEVCG_ACC_READ: _bindgen_ty_35 = _bindgen_ty_35::BPF_DEVCG_ACC_READ;
1908pub const BPF_DEVCG_ACC_WRITE: _bindgen_ty_35 = _bindgen_ty_35::BPF_DEVCG_ACC_WRITE;
1909#[repr(u32)]
1910#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1911pub enum _bindgen_ty_35 {
1912 BPF_DEVCG_ACC_MKNOD = 1,
1913 BPF_DEVCG_ACC_READ = 2,
1914 BPF_DEVCG_ACC_WRITE = 4,
1915}
1916pub const BPF_DEVCG_DEV_BLOCK: _bindgen_ty_36 = _bindgen_ty_36::BPF_DEVCG_DEV_BLOCK;
1917pub const BPF_DEVCG_DEV_CHAR: _bindgen_ty_36 = _bindgen_ty_36::BPF_DEVCG_DEV_CHAR;
1918#[repr(u32)]
1919#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1920pub enum _bindgen_ty_36 {
1921 BPF_DEVCG_DEV_BLOCK = 1,
1922 BPF_DEVCG_DEV_CHAR = 2,
1923}
1924pub const BPF_FIB_LOOKUP_DIRECT: _bindgen_ty_37 = _bindgen_ty_37::BPF_FIB_LOOKUP_DIRECT;
1925pub const BPF_FIB_LOOKUP_OUTPUT: _bindgen_ty_37 = _bindgen_ty_37::BPF_FIB_LOOKUP_OUTPUT;
1926pub const BPF_FIB_LOOKUP_SKIP_NEIGH: _bindgen_ty_37 = _bindgen_ty_37::BPF_FIB_LOOKUP_SKIP_NEIGH;
1927pub const BPF_FIB_LOOKUP_TBID: _bindgen_ty_37 = _bindgen_ty_37::BPF_FIB_LOOKUP_TBID;
1928pub const BPF_FIB_LOOKUP_SRC: _bindgen_ty_37 = _bindgen_ty_37::BPF_FIB_LOOKUP_SRC;
1929#[repr(u32)]
1930#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1931pub enum _bindgen_ty_37 {
1932 BPF_FIB_LOOKUP_DIRECT = 1,
1933 BPF_FIB_LOOKUP_OUTPUT = 2,
1934 BPF_FIB_LOOKUP_SKIP_NEIGH = 4,
1935 BPF_FIB_LOOKUP_TBID = 8,
1936 BPF_FIB_LOOKUP_SRC = 16,
1937}
1938pub const BPF_FIB_LKUP_RET_SUCCESS: _bindgen_ty_38 = _bindgen_ty_38::BPF_FIB_LKUP_RET_SUCCESS;
1939pub const BPF_FIB_LKUP_RET_BLACKHOLE: _bindgen_ty_38 = _bindgen_ty_38::BPF_FIB_LKUP_RET_BLACKHOLE;
1940pub const BPF_FIB_LKUP_RET_UNREACHABLE: _bindgen_ty_38 =
1941 _bindgen_ty_38::BPF_FIB_LKUP_RET_UNREACHABLE;
1942pub const BPF_FIB_LKUP_RET_PROHIBIT: _bindgen_ty_38 = _bindgen_ty_38::BPF_FIB_LKUP_RET_PROHIBIT;
1943pub const BPF_FIB_LKUP_RET_NOT_FWDED: _bindgen_ty_38 = _bindgen_ty_38::BPF_FIB_LKUP_RET_NOT_FWDED;
1944pub const BPF_FIB_LKUP_RET_FWD_DISABLED: _bindgen_ty_38 =
1945 _bindgen_ty_38::BPF_FIB_LKUP_RET_FWD_DISABLED;
1946pub const BPF_FIB_LKUP_RET_UNSUPP_LWT: _bindgen_ty_38 = _bindgen_ty_38::BPF_FIB_LKUP_RET_UNSUPP_LWT;
1947pub const BPF_FIB_LKUP_RET_NO_NEIGH: _bindgen_ty_38 = _bindgen_ty_38::BPF_FIB_LKUP_RET_NO_NEIGH;
1948pub const BPF_FIB_LKUP_RET_FRAG_NEEDED: _bindgen_ty_38 =
1949 _bindgen_ty_38::BPF_FIB_LKUP_RET_FRAG_NEEDED;
1950pub const BPF_FIB_LKUP_RET_NO_SRC_ADDR: _bindgen_ty_38 =
1951 _bindgen_ty_38::BPF_FIB_LKUP_RET_NO_SRC_ADDR;
1952#[repr(u32)]
1953#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1954pub enum _bindgen_ty_38 {
1955 BPF_FIB_LKUP_RET_SUCCESS = 0,
1956 BPF_FIB_LKUP_RET_BLACKHOLE = 1,
1957 BPF_FIB_LKUP_RET_UNREACHABLE = 2,
1958 BPF_FIB_LKUP_RET_PROHIBIT = 3,
1959 BPF_FIB_LKUP_RET_NOT_FWDED = 4,
1960 BPF_FIB_LKUP_RET_FWD_DISABLED = 5,
1961 BPF_FIB_LKUP_RET_UNSUPP_LWT = 6,
1962 BPF_FIB_LKUP_RET_NO_NEIGH = 7,
1963 BPF_FIB_LKUP_RET_FRAG_NEEDED = 8,
1964 BPF_FIB_LKUP_RET_NO_SRC_ADDR = 9,
1965}
1966#[repr(u32)]
1967#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1968pub enum bpf_task_fd_type {
1969 BPF_FD_TYPE_RAW_TRACEPOINT = 0,
1970 BPF_FD_TYPE_TRACEPOINT = 1,
1971 BPF_FD_TYPE_KPROBE = 2,
1972 BPF_FD_TYPE_KRETPROBE = 3,
1973 BPF_FD_TYPE_UPROBE = 4,
1974 BPF_FD_TYPE_URETPROBE = 5,
1975}
1976pub const BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG: _bindgen_ty_39 =
1977 _bindgen_ty_39::BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG;
1978pub const BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL: _bindgen_ty_39 =
1979 _bindgen_ty_39::BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL;
1980pub const BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP: _bindgen_ty_39 =
1981 _bindgen_ty_39::BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1982#[repr(u32)]
1983#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
1984pub enum _bindgen_ty_39 {
1985 BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 1,
1986 BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 2,
1987 BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 4,
1988}
1989#[repr(C)]
1990#[derive(Debug, Copy, Clone)]
1991pub struct bpf_func_info {
1992 pub insn_off: __u32,
1993 pub type_id: __u32,
1994}
1995#[repr(C)]
1996#[derive(Debug, Copy, Clone)]
1997pub struct bpf_line_info {
1998 pub insn_off: __u32,
1999 pub file_name_off: __u32,
2000 pub line_off: __u32,
2001 pub line_col: __u32,
2002}
2003pub const BPF_F_TIMER_ABS: _bindgen_ty_41 = 1;
2004pub const BPF_F_TIMER_CPU_PIN: _bindgen_ty_41 = 2;
2005pub type _bindgen_ty_41 = ::core::ffi::c_uint;
2006#[repr(C)]
2007#[derive(Debug, Copy, Clone)]
2008pub struct btf_header {
2009 pub magic: __u16,
2010 pub version: __u8,
2011 pub flags: __u8,
2012 pub hdr_len: __u32,
2013 pub type_off: __u32,
2014 pub type_len: __u32,
2015 pub str_off: __u32,
2016 pub str_len: __u32,
2017}
2018#[repr(C)]
2019#[derive(Copy, Clone)]
2020pub struct btf_type {
2021 pub name_off: __u32,
2022 pub info: __u32,
2023 pub __bindgen_anon_1: btf_type__bindgen_ty_1,
2024}
2025#[repr(C)]
2026#[derive(Copy, Clone)]
2027pub union btf_type__bindgen_ty_1 {
2028 pub size: __u32,
2029 pub type_: __u32,
2030}
2031pub const BTF_KIND_UNKN: _bindgen_ty_42 = 0;
2032pub const BTF_KIND_INT: _bindgen_ty_42 = 1;
2033pub const BTF_KIND_PTR: _bindgen_ty_42 = 2;
2034pub const BTF_KIND_ARRAY: _bindgen_ty_42 = 3;
2035pub const BTF_KIND_STRUCT: _bindgen_ty_42 = 4;
2036pub const BTF_KIND_UNION: _bindgen_ty_42 = 5;
2037pub const BTF_KIND_ENUM: _bindgen_ty_42 = 6;
2038pub const BTF_KIND_FWD: _bindgen_ty_42 = 7;
2039pub const BTF_KIND_TYPEDEF: _bindgen_ty_42 = 8;
2040pub const BTF_KIND_VOLATILE: _bindgen_ty_42 = 9;
2041pub const BTF_KIND_CONST: _bindgen_ty_42 = 10;
2042pub const BTF_KIND_RESTRICT: _bindgen_ty_42 = 11;
2043pub const BTF_KIND_FUNC: _bindgen_ty_42 = 12;
2044pub const BTF_KIND_FUNC_PROTO: _bindgen_ty_42 = 13;
2045pub const BTF_KIND_VAR: _bindgen_ty_42 = 14;
2046pub const BTF_KIND_DATASEC: _bindgen_ty_42 = 15;
2047pub const BTF_KIND_FLOAT: _bindgen_ty_42 = 16;
2048pub const BTF_KIND_DECL_TAG: _bindgen_ty_42 = 17;
2049pub const BTF_KIND_TYPE_TAG: _bindgen_ty_42 = 18;
2050pub const BTF_KIND_ENUM64: _bindgen_ty_42 = 19;
2051pub const NR_BTF_KINDS: _bindgen_ty_42 = 20;
2052pub const BTF_KIND_MAX: _bindgen_ty_42 = 19;
2053pub type _bindgen_ty_42 = ::core::ffi::c_uint;
2054#[repr(C)]
2055#[derive(Debug, Copy, Clone)]
2056pub struct btf_enum {
2057 pub name_off: __u32,
2058 pub val: __s32,
2059}
2060#[repr(C)]
2061#[derive(Debug, Copy, Clone)]
2062pub struct btf_array {
2063 pub type_: __u32,
2064 pub index_type: __u32,
2065 pub nelems: __u32,
2066}
2067#[repr(C)]
2068#[derive(Debug, Copy, Clone)]
2069pub struct btf_member {
2070 pub name_off: __u32,
2071 pub type_: __u32,
2072 pub offset: __u32,
2073}
2074#[repr(C)]
2075#[derive(Debug, Copy, Clone)]
2076pub struct btf_param {
2077 pub name_off: __u32,
2078 pub type_: __u32,
2079}
2080pub const BTF_VAR_STATIC: _bindgen_ty_43 = 0;
2081pub const BTF_VAR_GLOBAL_ALLOCATED: _bindgen_ty_43 = 1;
2082pub const BTF_VAR_GLOBAL_EXTERN: _bindgen_ty_43 = 2;
2083pub type _bindgen_ty_43 = ::core::ffi::c_uint;
2084#[repr(u32)]
2085#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2086pub enum btf_func_linkage {
2087 BTF_FUNC_STATIC = 0,
2088 BTF_FUNC_GLOBAL = 1,
2089 BTF_FUNC_EXTERN = 2,
2090}
2091#[repr(C)]
2092#[derive(Debug, Copy, Clone)]
2093pub struct btf_var {
2094 pub linkage: __u32,
2095}
2096#[repr(C)]
2097#[derive(Debug, Copy, Clone)]
2098pub struct btf_var_secinfo {
2099 pub type_: __u32,
2100 pub offset: __u32,
2101 pub size: __u32,
2102}
2103#[repr(C)]
2104#[derive(Debug, Copy, Clone)]
2105pub struct btf_decl_tag {
2106 pub component_idx: __s32,
2107}
2108pub const HW_BREAKPOINT_LEN_1: _bindgen_ty_44 = 1;
2109pub const HW_BREAKPOINT_LEN_2: _bindgen_ty_44 = 2;
2110pub const HW_BREAKPOINT_LEN_3: _bindgen_ty_44 = 3;
2111pub const HW_BREAKPOINT_LEN_4: _bindgen_ty_44 = 4;
2112pub const HW_BREAKPOINT_LEN_5: _bindgen_ty_44 = 5;
2113pub const HW_BREAKPOINT_LEN_6: _bindgen_ty_44 = 6;
2114pub const HW_BREAKPOINT_LEN_7: _bindgen_ty_44 = 7;
2115pub const HW_BREAKPOINT_LEN_8: _bindgen_ty_44 = 8;
2116pub type _bindgen_ty_44 = ::core::ffi::c_uint;
2117pub const HW_BREAKPOINT_EMPTY: _bindgen_ty_45 = 0;
2118pub const HW_BREAKPOINT_R: _bindgen_ty_45 = 1;
2119pub const HW_BREAKPOINT_W: _bindgen_ty_45 = 2;
2120pub const HW_BREAKPOINT_RW: _bindgen_ty_45 = 3;
2121pub const HW_BREAKPOINT_X: _bindgen_ty_45 = 4;
2122pub const HW_BREAKPOINT_INVALID: _bindgen_ty_45 = 7;
2123pub type _bindgen_ty_45 = ::core::ffi::c_uint;
2124impl nlmsgerr_attrs {
2125 pub const NLMSGERR_ATTR_MAX: nlmsgerr_attrs = nlmsgerr_attrs::NLMSGERR_ATTR_COOKIE;
2126}
2127#[repr(u32)]
2128#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2129pub enum nlmsgerr_attrs {
2130 NLMSGERR_ATTR_UNUSED = 0,
2131 NLMSGERR_ATTR_MSG = 1,
2132 NLMSGERR_ATTR_OFFS = 2,
2133 NLMSGERR_ATTR_COOKIE = 3,
2134 __NLMSGERR_ATTR_MAX = 4,
2135}
2136pub const IFLA_XDP_UNSPEC: _bindgen_ty_94 = 0;
2137pub const IFLA_XDP_FD: _bindgen_ty_94 = 1;
2138pub const IFLA_XDP_ATTACHED: _bindgen_ty_94 = 2;
2139pub const IFLA_XDP_FLAGS: _bindgen_ty_94 = 3;
2140pub const IFLA_XDP_PROG_ID: _bindgen_ty_94 = 4;
2141pub const IFLA_XDP_DRV_PROG_ID: _bindgen_ty_94 = 5;
2142pub const IFLA_XDP_SKB_PROG_ID: _bindgen_ty_94 = 6;
2143pub const IFLA_XDP_HW_PROG_ID: _bindgen_ty_94 = 7;
2144pub const IFLA_XDP_EXPECTED_FD: _bindgen_ty_94 = 8;
2145pub const __IFLA_XDP_MAX: _bindgen_ty_94 = 9;
2146pub type _bindgen_ty_94 = ::core::ffi::c_uint;
2147impl nf_inet_hooks {
2148 pub const NF_INET_INGRESS: nf_inet_hooks = nf_inet_hooks::NF_INET_NUMHOOKS;
2149}
2150#[repr(u32)]
2151#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2152pub enum nf_inet_hooks {
2153 NF_INET_PRE_ROUTING = 0,
2154 NF_INET_LOCAL_IN = 1,
2155 NF_INET_FORWARD = 2,
2156 NF_INET_LOCAL_OUT = 3,
2157 NF_INET_POST_ROUTING = 4,
2158 NF_INET_NUMHOOKS = 5,
2159}
2160pub const NFPROTO_UNSPEC: _bindgen_ty_101 = 0;
2161pub const NFPROTO_INET: _bindgen_ty_101 = 1;
2162pub const NFPROTO_IPV4: _bindgen_ty_101 = 2;
2163pub const NFPROTO_ARP: _bindgen_ty_101 = 3;
2164pub const NFPROTO_NETDEV: _bindgen_ty_101 = 5;
2165pub const NFPROTO_BRIDGE: _bindgen_ty_101 = 7;
2166pub const NFPROTO_IPV6: _bindgen_ty_101 = 10;
2167pub const NFPROTO_DECNET: _bindgen_ty_101 = 12;
2168pub const NFPROTO_NUMPROTO: _bindgen_ty_101 = 13;
2169pub type _bindgen_ty_101 = ::core::ffi::c_uint;
2170#[repr(u32)]
2171#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2172pub enum perf_type_id {
2173 PERF_TYPE_HARDWARE = 0,
2174 PERF_TYPE_SOFTWARE = 1,
2175 PERF_TYPE_TRACEPOINT = 2,
2176 PERF_TYPE_HW_CACHE = 3,
2177 PERF_TYPE_RAW = 4,
2178 PERF_TYPE_BREAKPOINT = 5,
2179 PERF_TYPE_MAX = 6,
2180}
2181#[repr(u32)]
2182#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2183pub enum perf_hw_id {
2184 PERF_COUNT_HW_CPU_CYCLES = 0,
2185 PERF_COUNT_HW_INSTRUCTIONS = 1,
2186 PERF_COUNT_HW_CACHE_REFERENCES = 2,
2187 PERF_COUNT_HW_CACHE_MISSES = 3,
2188 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
2189 PERF_COUNT_HW_BRANCH_MISSES = 5,
2190 PERF_COUNT_HW_BUS_CYCLES = 6,
2191 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
2192 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
2193 PERF_COUNT_HW_REF_CPU_CYCLES = 9,
2194 PERF_COUNT_HW_MAX = 10,
2195}
2196#[repr(u32)]
2197#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2198pub enum perf_hw_cache_id {
2199 PERF_COUNT_HW_CACHE_L1D = 0,
2200 PERF_COUNT_HW_CACHE_L1I = 1,
2201 PERF_COUNT_HW_CACHE_LL = 2,
2202 PERF_COUNT_HW_CACHE_DTLB = 3,
2203 PERF_COUNT_HW_CACHE_ITLB = 4,
2204 PERF_COUNT_HW_CACHE_BPU = 5,
2205 PERF_COUNT_HW_CACHE_NODE = 6,
2206 PERF_COUNT_HW_CACHE_MAX = 7,
2207}
2208#[repr(u32)]
2209#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2210pub enum perf_hw_cache_op_id {
2211 PERF_COUNT_HW_CACHE_OP_READ = 0,
2212 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
2213 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
2214 PERF_COUNT_HW_CACHE_OP_MAX = 3,
2215}
2216#[repr(u32)]
2217#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2218pub enum perf_hw_cache_op_result_id {
2219 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
2220 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
2221 PERF_COUNT_HW_CACHE_RESULT_MAX = 2,
2222}
2223#[repr(u32)]
2224#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2225pub enum perf_sw_ids {
2226 PERF_COUNT_SW_CPU_CLOCK = 0,
2227 PERF_COUNT_SW_TASK_CLOCK = 1,
2228 PERF_COUNT_SW_PAGE_FAULTS = 2,
2229 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
2230 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
2231 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
2232 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
2233 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
2234 PERF_COUNT_SW_EMULATION_FAULTS = 8,
2235 PERF_COUNT_SW_DUMMY = 9,
2236 PERF_COUNT_SW_BPF_OUTPUT = 10,
2237 PERF_COUNT_SW_CGROUP_SWITCHES = 11,
2238 PERF_COUNT_SW_MAX = 12,
2239}
2240#[repr(u32)]
2241#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
2242pub enum perf_event_sample_format {
2243 PERF_SAMPLE_IP = 1,
2244 PERF_SAMPLE_TID = 2,
2245 PERF_SAMPLE_TIME = 4,
2246 PERF_SAMPLE_ADDR = 8,
2247 PERF_SAMPLE_READ = 16,
2248 PERF_SAMPLE_CALLCHAIN = 32,
2249 PERF_SAMPLE_ID = 64,
2250 PERF_SAMPLE_CPU = 128,
2251 PERF_SAMPLE_PERIOD = 256,
2252 PERF_SAMPLE_STREAM_ID = 512,
2253 PERF_SAMPLE_RAW = 1024,
2254 PERF_SAMPLE_BRANCH_STACK = 2048,
2255 PERF_SAMPLE_REGS_USER = 4096,
2256 PERF_SAMPLE_STACK_USER = 8192,
2257 PERF_SAMPLE_WEIGHT = 16384,
2258 PERF_SAMPLE_DATA_SRC = 32768,
2259 PERF_SAMPLE_IDENTIFIER = 65536,
2260 PERF_SAMPLE_TRANSACTION = 131072,
2261 PERF_SAMPLE_REGS_INTR = 262144,
2262 PERF_SAMPLE_PHYS_ADDR = 524288,
2263 PERF_SAMPLE_AUX = 1048576,
2264 PERF_SAMPLE_CGROUP = 2097152,
2265 PERF_SAMPLE_DATA_PAGE_SIZE = 4194304,
2266 PERF_SAMPLE_CODE_PAGE_SIZE = 8388608,
2267 PERF_SAMPLE_WEIGHT_STRUCT = 16777216,
2268 PERF_SAMPLE_MAX = 33554432,
2269}
2270#[repr(C)]
2271#[derive(Copy, Clone)]
2272pub struct perf_event_attr {
2273 pub type_: __u32,
2274 pub size: __u32,
2275 pub config: __u64,
2276 pub __bindgen_anon_1: perf_event_attr__bindgen_ty_1,
2277 pub sample_type: __u64,
2278 pub read_format: __u64,
2279 pub _bitfield_align_1: [u32; 0],
2280 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
2281 pub __bindgen_anon_2: perf_event_attr__bindgen_ty_2,
2282 pub bp_type: __u32,
2283 pub __bindgen_anon_3: perf_event_attr__bindgen_ty_3,
2284 pub __bindgen_anon_4: perf_event_attr__bindgen_ty_4,
2285 pub branch_sample_type: __u64,
2286 pub sample_regs_user: __u64,
2287 pub sample_stack_user: __u32,
2288 pub clockid: __s32,
2289 pub sample_regs_intr: __u64,
2290 pub aux_watermark: __u32,
2291 pub sample_max_stack: __u16,
2292 pub __reserved_2: __u16,
2293 pub aux_sample_size: __u32,
2294 pub __reserved_3: __u32,
2295 pub sig_data: __u64,
2296 pub config3: __u64,
2297}
2298#[repr(C)]
2299#[derive(Copy, Clone)]
2300pub union perf_event_attr__bindgen_ty_1 {
2301 pub sample_period: __u64,
2302 pub sample_freq: __u64,
2303}
2304#[repr(C)]
2305#[derive(Copy, Clone)]
2306pub union perf_event_attr__bindgen_ty_2 {
2307 pub wakeup_events: __u32,
2308 pub wakeup_watermark: __u32,
2309}
2310#[repr(C)]
2311#[derive(Copy, Clone)]
2312pub union perf_event_attr__bindgen_ty_3 {
2313 pub bp_addr: __u64,
2314 pub kprobe_func: __u64,
2315 pub uprobe_path: __u64,
2316 pub config1: __u64,
2317}
2318#[repr(C)]
2319#[derive(Copy, Clone)]
2320pub union perf_event_attr__bindgen_ty_4 {
2321 pub bp_len: __u64,
2322 pub kprobe_addr: __u64,
2323 pub probe_offset: __u64,
2324 pub config2: __u64,
2325}
2326impl perf_event_attr {
2327 #[inline]
2328 pub fn disabled(&self) -> __u64 {
2329 unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) }
2330 }
2331 #[inline]
2332 pub fn set_disabled(&mut self, val: __u64) {
2333 unsafe {
2334 let val: u64 = ::core::mem::transmute(val);
2335 self._bitfield_1.set(0usize, 1u8, val as u64)
2336 }
2337 }
2338 #[inline]
2339 pub unsafe fn disabled_raw(this: *const Self) -> __u64 {
2340 unsafe {
2341 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2342 ::core::ptr::addr_of!((*this)._bitfield_1),
2343 0usize,
2344 1u8,
2345 ) as u64)
2346 }
2347 }
2348 #[inline]
2349 pub unsafe fn set_disabled_raw(this: *mut Self, val: __u64) {
2350 unsafe {
2351 let val: u64 = ::core::mem::transmute(val);
2352 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2353 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2354 0usize,
2355 1u8,
2356 val as u64,
2357 )
2358 }
2359 }
2360 #[inline]
2361 pub fn inherit(&self) -> __u64 {
2362 unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) }
2363 }
2364 #[inline]
2365 pub fn set_inherit(&mut self, val: __u64) {
2366 unsafe {
2367 let val: u64 = ::core::mem::transmute(val);
2368 self._bitfield_1.set(1usize, 1u8, val as u64)
2369 }
2370 }
2371 #[inline]
2372 pub unsafe fn inherit_raw(this: *const Self) -> __u64 {
2373 unsafe {
2374 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2375 ::core::ptr::addr_of!((*this)._bitfield_1),
2376 1usize,
2377 1u8,
2378 ) as u64)
2379 }
2380 }
2381 #[inline]
2382 pub unsafe fn set_inherit_raw(this: *mut Self, val: __u64) {
2383 unsafe {
2384 let val: u64 = ::core::mem::transmute(val);
2385 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2386 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2387 1usize,
2388 1u8,
2389 val as u64,
2390 )
2391 }
2392 }
2393 #[inline]
2394 pub fn pinned(&self) -> __u64 {
2395 unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) }
2396 }
2397 #[inline]
2398 pub fn set_pinned(&mut self, val: __u64) {
2399 unsafe {
2400 let val: u64 = ::core::mem::transmute(val);
2401 self._bitfield_1.set(2usize, 1u8, val as u64)
2402 }
2403 }
2404 #[inline]
2405 pub unsafe fn pinned_raw(this: *const Self) -> __u64 {
2406 unsafe {
2407 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2408 ::core::ptr::addr_of!((*this)._bitfield_1),
2409 2usize,
2410 1u8,
2411 ) as u64)
2412 }
2413 }
2414 #[inline]
2415 pub unsafe fn set_pinned_raw(this: *mut Self, val: __u64) {
2416 unsafe {
2417 let val: u64 = ::core::mem::transmute(val);
2418 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2419 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2420 2usize,
2421 1u8,
2422 val as u64,
2423 )
2424 }
2425 }
2426 #[inline]
2427 pub fn exclusive(&self) -> __u64 {
2428 unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) }
2429 }
2430 #[inline]
2431 pub fn set_exclusive(&mut self, val: __u64) {
2432 unsafe {
2433 let val: u64 = ::core::mem::transmute(val);
2434 self._bitfield_1.set(3usize, 1u8, val as u64)
2435 }
2436 }
2437 #[inline]
2438 pub unsafe fn exclusive_raw(this: *const Self) -> __u64 {
2439 unsafe {
2440 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2441 ::core::ptr::addr_of!((*this)._bitfield_1),
2442 3usize,
2443 1u8,
2444 ) as u64)
2445 }
2446 }
2447 #[inline]
2448 pub unsafe fn set_exclusive_raw(this: *mut Self, val: __u64) {
2449 unsafe {
2450 let val: u64 = ::core::mem::transmute(val);
2451 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2452 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2453 3usize,
2454 1u8,
2455 val as u64,
2456 )
2457 }
2458 }
2459 #[inline]
2460 pub fn exclude_user(&self) -> __u64 {
2461 unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) }
2462 }
2463 #[inline]
2464 pub fn set_exclude_user(&mut self, val: __u64) {
2465 unsafe {
2466 let val: u64 = ::core::mem::transmute(val);
2467 self._bitfield_1.set(4usize, 1u8, val as u64)
2468 }
2469 }
2470 #[inline]
2471 pub unsafe fn exclude_user_raw(this: *const Self) -> __u64 {
2472 unsafe {
2473 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2474 ::core::ptr::addr_of!((*this)._bitfield_1),
2475 4usize,
2476 1u8,
2477 ) as u64)
2478 }
2479 }
2480 #[inline]
2481 pub unsafe fn set_exclude_user_raw(this: *mut Self, val: __u64) {
2482 unsafe {
2483 let val: u64 = ::core::mem::transmute(val);
2484 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2485 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2486 4usize,
2487 1u8,
2488 val as u64,
2489 )
2490 }
2491 }
2492 #[inline]
2493 pub fn exclude_kernel(&self) -> __u64 {
2494 unsafe { ::core::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) }
2495 }
2496 #[inline]
2497 pub fn set_exclude_kernel(&mut self, val: __u64) {
2498 unsafe {
2499 let val: u64 = ::core::mem::transmute(val);
2500 self._bitfield_1.set(5usize, 1u8, val as u64)
2501 }
2502 }
2503 #[inline]
2504 pub unsafe fn exclude_kernel_raw(this: *const Self) -> __u64 {
2505 unsafe {
2506 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2507 ::core::ptr::addr_of!((*this)._bitfield_1),
2508 5usize,
2509 1u8,
2510 ) as u64)
2511 }
2512 }
2513 #[inline]
2514 pub unsafe fn set_exclude_kernel_raw(this: *mut Self, val: __u64) {
2515 unsafe {
2516 let val: u64 = ::core::mem::transmute(val);
2517 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2518 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2519 5usize,
2520 1u8,
2521 val as u64,
2522 )
2523 }
2524 }
2525 #[inline]
2526 pub fn exclude_hv(&self) -> __u64 {
2527 unsafe { ::core::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u64) }
2528 }
2529 #[inline]
2530 pub fn set_exclude_hv(&mut self, val: __u64) {
2531 unsafe {
2532 let val: u64 = ::core::mem::transmute(val);
2533 self._bitfield_1.set(6usize, 1u8, val as u64)
2534 }
2535 }
2536 #[inline]
2537 pub unsafe fn exclude_hv_raw(this: *const Self) -> __u64 {
2538 unsafe {
2539 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2540 ::core::ptr::addr_of!((*this)._bitfield_1),
2541 6usize,
2542 1u8,
2543 ) as u64)
2544 }
2545 }
2546 #[inline]
2547 pub unsafe fn set_exclude_hv_raw(this: *mut Self, val: __u64) {
2548 unsafe {
2549 let val: u64 = ::core::mem::transmute(val);
2550 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2551 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2552 6usize,
2553 1u8,
2554 val as u64,
2555 )
2556 }
2557 }
2558 #[inline]
2559 pub fn exclude_idle(&self) -> __u64 {
2560 unsafe { ::core::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u64) }
2561 }
2562 #[inline]
2563 pub fn set_exclude_idle(&mut self, val: __u64) {
2564 unsafe {
2565 let val: u64 = ::core::mem::transmute(val);
2566 self._bitfield_1.set(7usize, 1u8, val as u64)
2567 }
2568 }
2569 #[inline]
2570 pub unsafe fn exclude_idle_raw(this: *const Self) -> __u64 {
2571 unsafe {
2572 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2573 ::core::ptr::addr_of!((*this)._bitfield_1),
2574 7usize,
2575 1u8,
2576 ) as u64)
2577 }
2578 }
2579 #[inline]
2580 pub unsafe fn set_exclude_idle_raw(this: *mut Self, val: __u64) {
2581 unsafe {
2582 let val: u64 = ::core::mem::transmute(val);
2583 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2584 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2585 7usize,
2586 1u8,
2587 val as u64,
2588 )
2589 }
2590 }
2591 #[inline]
2592 pub fn mmap(&self) -> __u64 {
2593 unsafe { ::core::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u64) }
2594 }
2595 #[inline]
2596 pub fn set_mmap(&mut self, val: __u64) {
2597 unsafe {
2598 let val: u64 = ::core::mem::transmute(val);
2599 self._bitfield_1.set(8usize, 1u8, val as u64)
2600 }
2601 }
2602 #[inline]
2603 pub unsafe fn mmap_raw(this: *const Self) -> __u64 {
2604 unsafe {
2605 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2606 ::core::ptr::addr_of!((*this)._bitfield_1),
2607 8usize,
2608 1u8,
2609 ) as u64)
2610 }
2611 }
2612 #[inline]
2613 pub unsafe fn set_mmap_raw(this: *mut Self, val: __u64) {
2614 unsafe {
2615 let val: u64 = ::core::mem::transmute(val);
2616 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2617 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2618 8usize,
2619 1u8,
2620 val as u64,
2621 )
2622 }
2623 }
2624 #[inline]
2625 pub fn comm(&self) -> __u64 {
2626 unsafe { ::core::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u64) }
2627 }
2628 #[inline]
2629 pub fn set_comm(&mut self, val: __u64) {
2630 unsafe {
2631 let val: u64 = ::core::mem::transmute(val);
2632 self._bitfield_1.set(9usize, 1u8, val as u64)
2633 }
2634 }
2635 #[inline]
2636 pub unsafe fn comm_raw(this: *const Self) -> __u64 {
2637 unsafe {
2638 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2639 ::core::ptr::addr_of!((*this)._bitfield_1),
2640 9usize,
2641 1u8,
2642 ) as u64)
2643 }
2644 }
2645 #[inline]
2646 pub unsafe fn set_comm_raw(this: *mut Self, val: __u64) {
2647 unsafe {
2648 let val: u64 = ::core::mem::transmute(val);
2649 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2650 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2651 9usize,
2652 1u8,
2653 val as u64,
2654 )
2655 }
2656 }
2657 #[inline]
2658 pub fn freq(&self) -> __u64 {
2659 unsafe { ::core::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u64) }
2660 }
2661 #[inline]
2662 pub fn set_freq(&mut self, val: __u64) {
2663 unsafe {
2664 let val: u64 = ::core::mem::transmute(val);
2665 self._bitfield_1.set(10usize, 1u8, val as u64)
2666 }
2667 }
2668 #[inline]
2669 pub unsafe fn freq_raw(this: *const Self) -> __u64 {
2670 unsafe {
2671 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2672 ::core::ptr::addr_of!((*this)._bitfield_1),
2673 10usize,
2674 1u8,
2675 ) as u64)
2676 }
2677 }
2678 #[inline]
2679 pub unsafe fn set_freq_raw(this: *mut Self, val: __u64) {
2680 unsafe {
2681 let val: u64 = ::core::mem::transmute(val);
2682 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2683 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2684 10usize,
2685 1u8,
2686 val as u64,
2687 )
2688 }
2689 }
2690 #[inline]
2691 pub fn inherit_stat(&self) -> __u64 {
2692 unsafe { ::core::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u64) }
2693 }
2694 #[inline]
2695 pub fn set_inherit_stat(&mut self, val: __u64) {
2696 unsafe {
2697 let val: u64 = ::core::mem::transmute(val);
2698 self._bitfield_1.set(11usize, 1u8, val as u64)
2699 }
2700 }
2701 #[inline]
2702 pub unsafe fn inherit_stat_raw(this: *const Self) -> __u64 {
2703 unsafe {
2704 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2705 ::core::ptr::addr_of!((*this)._bitfield_1),
2706 11usize,
2707 1u8,
2708 ) as u64)
2709 }
2710 }
2711 #[inline]
2712 pub unsafe fn set_inherit_stat_raw(this: *mut Self, val: __u64) {
2713 unsafe {
2714 let val: u64 = ::core::mem::transmute(val);
2715 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2716 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2717 11usize,
2718 1u8,
2719 val as u64,
2720 )
2721 }
2722 }
2723 #[inline]
2724 pub fn enable_on_exec(&self) -> __u64 {
2725 unsafe { ::core::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u64) }
2726 }
2727 #[inline]
2728 pub fn set_enable_on_exec(&mut self, val: __u64) {
2729 unsafe {
2730 let val: u64 = ::core::mem::transmute(val);
2731 self._bitfield_1.set(12usize, 1u8, val as u64)
2732 }
2733 }
2734 #[inline]
2735 pub unsafe fn enable_on_exec_raw(this: *const Self) -> __u64 {
2736 unsafe {
2737 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2738 ::core::ptr::addr_of!((*this)._bitfield_1),
2739 12usize,
2740 1u8,
2741 ) as u64)
2742 }
2743 }
2744 #[inline]
2745 pub unsafe fn set_enable_on_exec_raw(this: *mut Self, val: __u64) {
2746 unsafe {
2747 let val: u64 = ::core::mem::transmute(val);
2748 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2749 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2750 12usize,
2751 1u8,
2752 val as u64,
2753 )
2754 }
2755 }
2756 #[inline]
2757 pub fn task(&self) -> __u64 {
2758 unsafe { ::core::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u64) }
2759 }
2760 #[inline]
2761 pub fn set_task(&mut self, val: __u64) {
2762 unsafe {
2763 let val: u64 = ::core::mem::transmute(val);
2764 self._bitfield_1.set(13usize, 1u8, val as u64)
2765 }
2766 }
2767 #[inline]
2768 pub unsafe fn task_raw(this: *const Self) -> __u64 {
2769 unsafe {
2770 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2771 ::core::ptr::addr_of!((*this)._bitfield_1),
2772 13usize,
2773 1u8,
2774 ) as u64)
2775 }
2776 }
2777 #[inline]
2778 pub unsafe fn set_task_raw(this: *mut Self, val: __u64) {
2779 unsafe {
2780 let val: u64 = ::core::mem::transmute(val);
2781 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2782 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2783 13usize,
2784 1u8,
2785 val as u64,
2786 )
2787 }
2788 }
2789 #[inline]
2790 pub fn watermark(&self) -> __u64 {
2791 unsafe { ::core::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u64) }
2792 }
2793 #[inline]
2794 pub fn set_watermark(&mut self, val: __u64) {
2795 unsafe {
2796 let val: u64 = ::core::mem::transmute(val);
2797 self._bitfield_1.set(14usize, 1u8, val as u64)
2798 }
2799 }
2800 #[inline]
2801 pub unsafe fn watermark_raw(this: *const Self) -> __u64 {
2802 unsafe {
2803 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2804 ::core::ptr::addr_of!((*this)._bitfield_1),
2805 14usize,
2806 1u8,
2807 ) as u64)
2808 }
2809 }
2810 #[inline]
2811 pub unsafe fn set_watermark_raw(this: *mut Self, val: __u64) {
2812 unsafe {
2813 let val: u64 = ::core::mem::transmute(val);
2814 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2815 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2816 14usize,
2817 1u8,
2818 val as u64,
2819 )
2820 }
2821 }
2822 #[inline]
2823 pub fn precise_ip(&self) -> __u64 {
2824 unsafe { ::core::mem::transmute(self._bitfield_1.get(15usize, 2u8) as u64) }
2825 }
2826 #[inline]
2827 pub fn set_precise_ip(&mut self, val: __u64) {
2828 unsafe {
2829 let val: u64 = ::core::mem::transmute(val);
2830 self._bitfield_1.set(15usize, 2u8, val as u64)
2831 }
2832 }
2833 #[inline]
2834 pub unsafe fn precise_ip_raw(this: *const Self) -> __u64 {
2835 unsafe {
2836 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2837 ::core::ptr::addr_of!((*this)._bitfield_1),
2838 15usize,
2839 2u8,
2840 ) as u64)
2841 }
2842 }
2843 #[inline]
2844 pub unsafe fn set_precise_ip_raw(this: *mut Self, val: __u64) {
2845 unsafe {
2846 let val: u64 = ::core::mem::transmute(val);
2847 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2848 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2849 15usize,
2850 2u8,
2851 val as u64,
2852 )
2853 }
2854 }
2855 #[inline]
2856 pub fn mmap_data(&self) -> __u64 {
2857 unsafe { ::core::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u64) }
2858 }
2859 #[inline]
2860 pub fn set_mmap_data(&mut self, val: __u64) {
2861 unsafe {
2862 let val: u64 = ::core::mem::transmute(val);
2863 self._bitfield_1.set(17usize, 1u8, val as u64)
2864 }
2865 }
2866 #[inline]
2867 pub unsafe fn mmap_data_raw(this: *const Self) -> __u64 {
2868 unsafe {
2869 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2870 ::core::ptr::addr_of!((*this)._bitfield_1),
2871 17usize,
2872 1u8,
2873 ) as u64)
2874 }
2875 }
2876 #[inline]
2877 pub unsafe fn set_mmap_data_raw(this: *mut Self, val: __u64) {
2878 unsafe {
2879 let val: u64 = ::core::mem::transmute(val);
2880 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2881 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2882 17usize,
2883 1u8,
2884 val as u64,
2885 )
2886 }
2887 }
2888 #[inline]
2889 pub fn sample_id_all(&self) -> __u64 {
2890 unsafe { ::core::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u64) }
2891 }
2892 #[inline]
2893 pub fn set_sample_id_all(&mut self, val: __u64) {
2894 unsafe {
2895 let val: u64 = ::core::mem::transmute(val);
2896 self._bitfield_1.set(18usize, 1u8, val as u64)
2897 }
2898 }
2899 #[inline]
2900 pub unsafe fn sample_id_all_raw(this: *const Self) -> __u64 {
2901 unsafe {
2902 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2903 ::core::ptr::addr_of!((*this)._bitfield_1),
2904 18usize,
2905 1u8,
2906 ) as u64)
2907 }
2908 }
2909 #[inline]
2910 pub unsafe fn set_sample_id_all_raw(this: *mut Self, val: __u64) {
2911 unsafe {
2912 let val: u64 = ::core::mem::transmute(val);
2913 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2914 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2915 18usize,
2916 1u8,
2917 val as u64,
2918 )
2919 }
2920 }
2921 #[inline]
2922 pub fn exclude_host(&self) -> __u64 {
2923 unsafe { ::core::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u64) }
2924 }
2925 #[inline]
2926 pub fn set_exclude_host(&mut self, val: __u64) {
2927 unsafe {
2928 let val: u64 = ::core::mem::transmute(val);
2929 self._bitfield_1.set(19usize, 1u8, val as u64)
2930 }
2931 }
2932 #[inline]
2933 pub unsafe fn exclude_host_raw(this: *const Self) -> __u64 {
2934 unsafe {
2935 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2936 ::core::ptr::addr_of!((*this)._bitfield_1),
2937 19usize,
2938 1u8,
2939 ) as u64)
2940 }
2941 }
2942 #[inline]
2943 pub unsafe fn set_exclude_host_raw(this: *mut Self, val: __u64) {
2944 unsafe {
2945 let val: u64 = ::core::mem::transmute(val);
2946 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2947 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2948 19usize,
2949 1u8,
2950 val as u64,
2951 )
2952 }
2953 }
2954 #[inline]
2955 pub fn exclude_guest(&self) -> __u64 {
2956 unsafe { ::core::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u64) }
2957 }
2958 #[inline]
2959 pub fn set_exclude_guest(&mut self, val: __u64) {
2960 unsafe {
2961 let val: u64 = ::core::mem::transmute(val);
2962 self._bitfield_1.set(20usize, 1u8, val as u64)
2963 }
2964 }
2965 #[inline]
2966 pub unsafe fn exclude_guest_raw(this: *const Self) -> __u64 {
2967 unsafe {
2968 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
2969 ::core::ptr::addr_of!((*this)._bitfield_1),
2970 20usize,
2971 1u8,
2972 ) as u64)
2973 }
2974 }
2975 #[inline]
2976 pub unsafe fn set_exclude_guest_raw(this: *mut Self, val: __u64) {
2977 unsafe {
2978 let val: u64 = ::core::mem::transmute(val);
2979 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
2980 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
2981 20usize,
2982 1u8,
2983 val as u64,
2984 )
2985 }
2986 }
2987 #[inline]
2988 pub fn exclude_callchain_kernel(&self) -> __u64 {
2989 unsafe { ::core::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u64) }
2990 }
2991 #[inline]
2992 pub fn set_exclude_callchain_kernel(&mut self, val: __u64) {
2993 unsafe {
2994 let val: u64 = ::core::mem::transmute(val);
2995 self._bitfield_1.set(21usize, 1u8, val as u64)
2996 }
2997 }
2998 #[inline]
2999 pub unsafe fn exclude_callchain_kernel_raw(this: *const Self) -> __u64 {
3000 unsafe {
3001 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3002 ::core::ptr::addr_of!((*this)._bitfield_1),
3003 21usize,
3004 1u8,
3005 ) as u64)
3006 }
3007 }
3008 #[inline]
3009 pub unsafe fn set_exclude_callchain_kernel_raw(this: *mut Self, val: __u64) {
3010 unsafe {
3011 let val: u64 = ::core::mem::transmute(val);
3012 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3013 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3014 21usize,
3015 1u8,
3016 val as u64,
3017 )
3018 }
3019 }
3020 #[inline]
3021 pub fn exclude_callchain_user(&self) -> __u64 {
3022 unsafe { ::core::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u64) }
3023 }
3024 #[inline]
3025 pub fn set_exclude_callchain_user(&mut self, val: __u64) {
3026 unsafe {
3027 let val: u64 = ::core::mem::transmute(val);
3028 self._bitfield_1.set(22usize, 1u8, val as u64)
3029 }
3030 }
3031 #[inline]
3032 pub unsafe fn exclude_callchain_user_raw(this: *const Self) -> __u64 {
3033 unsafe {
3034 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3035 ::core::ptr::addr_of!((*this)._bitfield_1),
3036 22usize,
3037 1u8,
3038 ) as u64)
3039 }
3040 }
3041 #[inline]
3042 pub unsafe fn set_exclude_callchain_user_raw(this: *mut Self, val: __u64) {
3043 unsafe {
3044 let val: u64 = ::core::mem::transmute(val);
3045 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3046 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3047 22usize,
3048 1u8,
3049 val as u64,
3050 )
3051 }
3052 }
3053 #[inline]
3054 pub fn mmap2(&self) -> __u64 {
3055 unsafe { ::core::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u64) }
3056 }
3057 #[inline]
3058 pub fn set_mmap2(&mut self, val: __u64) {
3059 unsafe {
3060 let val: u64 = ::core::mem::transmute(val);
3061 self._bitfield_1.set(23usize, 1u8, val as u64)
3062 }
3063 }
3064 #[inline]
3065 pub unsafe fn mmap2_raw(this: *const Self) -> __u64 {
3066 unsafe {
3067 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3068 ::core::ptr::addr_of!((*this)._bitfield_1),
3069 23usize,
3070 1u8,
3071 ) as u64)
3072 }
3073 }
3074 #[inline]
3075 pub unsafe fn set_mmap2_raw(this: *mut Self, val: __u64) {
3076 unsafe {
3077 let val: u64 = ::core::mem::transmute(val);
3078 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3079 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3080 23usize,
3081 1u8,
3082 val as u64,
3083 )
3084 }
3085 }
3086 #[inline]
3087 pub fn comm_exec(&self) -> __u64 {
3088 unsafe { ::core::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u64) }
3089 }
3090 #[inline]
3091 pub fn set_comm_exec(&mut self, val: __u64) {
3092 unsafe {
3093 let val: u64 = ::core::mem::transmute(val);
3094 self._bitfield_1.set(24usize, 1u8, val as u64)
3095 }
3096 }
3097 #[inline]
3098 pub unsafe fn comm_exec_raw(this: *const Self) -> __u64 {
3099 unsafe {
3100 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3101 ::core::ptr::addr_of!((*this)._bitfield_1),
3102 24usize,
3103 1u8,
3104 ) as u64)
3105 }
3106 }
3107 #[inline]
3108 pub unsafe fn set_comm_exec_raw(this: *mut Self, val: __u64) {
3109 unsafe {
3110 let val: u64 = ::core::mem::transmute(val);
3111 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3112 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3113 24usize,
3114 1u8,
3115 val as u64,
3116 )
3117 }
3118 }
3119 #[inline]
3120 pub fn use_clockid(&self) -> __u64 {
3121 unsafe { ::core::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u64) }
3122 }
3123 #[inline]
3124 pub fn set_use_clockid(&mut self, val: __u64) {
3125 unsafe {
3126 let val: u64 = ::core::mem::transmute(val);
3127 self._bitfield_1.set(25usize, 1u8, val as u64)
3128 }
3129 }
3130 #[inline]
3131 pub unsafe fn use_clockid_raw(this: *const Self) -> __u64 {
3132 unsafe {
3133 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3134 ::core::ptr::addr_of!((*this)._bitfield_1),
3135 25usize,
3136 1u8,
3137 ) as u64)
3138 }
3139 }
3140 #[inline]
3141 pub unsafe fn set_use_clockid_raw(this: *mut Self, val: __u64) {
3142 unsafe {
3143 let val: u64 = ::core::mem::transmute(val);
3144 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3145 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3146 25usize,
3147 1u8,
3148 val as u64,
3149 )
3150 }
3151 }
3152 #[inline]
3153 pub fn context_switch(&self) -> __u64 {
3154 unsafe { ::core::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u64) }
3155 }
3156 #[inline]
3157 pub fn set_context_switch(&mut self, val: __u64) {
3158 unsafe {
3159 let val: u64 = ::core::mem::transmute(val);
3160 self._bitfield_1.set(26usize, 1u8, val as u64)
3161 }
3162 }
3163 #[inline]
3164 pub unsafe fn context_switch_raw(this: *const Self) -> __u64 {
3165 unsafe {
3166 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3167 ::core::ptr::addr_of!((*this)._bitfield_1),
3168 26usize,
3169 1u8,
3170 ) as u64)
3171 }
3172 }
3173 #[inline]
3174 pub unsafe fn set_context_switch_raw(this: *mut Self, val: __u64) {
3175 unsafe {
3176 let val: u64 = ::core::mem::transmute(val);
3177 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3178 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3179 26usize,
3180 1u8,
3181 val as u64,
3182 )
3183 }
3184 }
3185 #[inline]
3186 pub fn write_backward(&self) -> __u64 {
3187 unsafe { ::core::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u64) }
3188 }
3189 #[inline]
3190 pub fn set_write_backward(&mut self, val: __u64) {
3191 unsafe {
3192 let val: u64 = ::core::mem::transmute(val);
3193 self._bitfield_1.set(27usize, 1u8, val as u64)
3194 }
3195 }
3196 #[inline]
3197 pub unsafe fn write_backward_raw(this: *const Self) -> __u64 {
3198 unsafe {
3199 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3200 ::core::ptr::addr_of!((*this)._bitfield_1),
3201 27usize,
3202 1u8,
3203 ) as u64)
3204 }
3205 }
3206 #[inline]
3207 pub unsafe fn set_write_backward_raw(this: *mut Self, val: __u64) {
3208 unsafe {
3209 let val: u64 = ::core::mem::transmute(val);
3210 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3211 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3212 27usize,
3213 1u8,
3214 val as u64,
3215 )
3216 }
3217 }
3218 #[inline]
3219 pub fn namespaces(&self) -> __u64 {
3220 unsafe { ::core::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u64) }
3221 }
3222 #[inline]
3223 pub fn set_namespaces(&mut self, val: __u64) {
3224 unsafe {
3225 let val: u64 = ::core::mem::transmute(val);
3226 self._bitfield_1.set(28usize, 1u8, val as u64)
3227 }
3228 }
3229 #[inline]
3230 pub unsafe fn namespaces_raw(this: *const Self) -> __u64 {
3231 unsafe {
3232 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3233 ::core::ptr::addr_of!((*this)._bitfield_1),
3234 28usize,
3235 1u8,
3236 ) as u64)
3237 }
3238 }
3239 #[inline]
3240 pub unsafe fn set_namespaces_raw(this: *mut Self, val: __u64) {
3241 unsafe {
3242 let val: u64 = ::core::mem::transmute(val);
3243 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3244 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3245 28usize,
3246 1u8,
3247 val as u64,
3248 )
3249 }
3250 }
3251 #[inline]
3252 pub fn ksymbol(&self) -> __u64 {
3253 unsafe { ::core::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u64) }
3254 }
3255 #[inline]
3256 pub fn set_ksymbol(&mut self, val: __u64) {
3257 unsafe {
3258 let val: u64 = ::core::mem::transmute(val);
3259 self._bitfield_1.set(29usize, 1u8, val as u64)
3260 }
3261 }
3262 #[inline]
3263 pub unsafe fn ksymbol_raw(this: *const Self) -> __u64 {
3264 unsafe {
3265 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3266 ::core::ptr::addr_of!((*this)._bitfield_1),
3267 29usize,
3268 1u8,
3269 ) as u64)
3270 }
3271 }
3272 #[inline]
3273 pub unsafe fn set_ksymbol_raw(this: *mut Self, val: __u64) {
3274 unsafe {
3275 let val: u64 = ::core::mem::transmute(val);
3276 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3277 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3278 29usize,
3279 1u8,
3280 val as u64,
3281 )
3282 }
3283 }
3284 #[inline]
3285 pub fn bpf_event(&self) -> __u64 {
3286 unsafe { ::core::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u64) }
3287 }
3288 #[inline]
3289 pub fn set_bpf_event(&mut self, val: __u64) {
3290 unsafe {
3291 let val: u64 = ::core::mem::transmute(val);
3292 self._bitfield_1.set(30usize, 1u8, val as u64)
3293 }
3294 }
3295 #[inline]
3296 pub unsafe fn bpf_event_raw(this: *const Self) -> __u64 {
3297 unsafe {
3298 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3299 ::core::ptr::addr_of!((*this)._bitfield_1),
3300 30usize,
3301 1u8,
3302 ) as u64)
3303 }
3304 }
3305 #[inline]
3306 pub unsafe fn set_bpf_event_raw(this: *mut Self, val: __u64) {
3307 unsafe {
3308 let val: u64 = ::core::mem::transmute(val);
3309 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3310 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3311 30usize,
3312 1u8,
3313 val as u64,
3314 )
3315 }
3316 }
3317 #[inline]
3318 pub fn aux_output(&self) -> __u64 {
3319 unsafe { ::core::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u64) }
3320 }
3321 #[inline]
3322 pub fn set_aux_output(&mut self, val: __u64) {
3323 unsafe {
3324 let val: u64 = ::core::mem::transmute(val);
3325 self._bitfield_1.set(31usize, 1u8, val as u64)
3326 }
3327 }
3328 #[inline]
3329 pub unsafe fn aux_output_raw(this: *const Self) -> __u64 {
3330 unsafe {
3331 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3332 ::core::ptr::addr_of!((*this)._bitfield_1),
3333 31usize,
3334 1u8,
3335 ) as u64)
3336 }
3337 }
3338 #[inline]
3339 pub unsafe fn set_aux_output_raw(this: *mut Self, val: __u64) {
3340 unsafe {
3341 let val: u64 = ::core::mem::transmute(val);
3342 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3343 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3344 31usize,
3345 1u8,
3346 val as u64,
3347 )
3348 }
3349 }
3350 #[inline]
3351 pub fn cgroup(&self) -> __u64 {
3352 unsafe { ::core::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u64) }
3353 }
3354 #[inline]
3355 pub fn set_cgroup(&mut self, val: __u64) {
3356 unsafe {
3357 let val: u64 = ::core::mem::transmute(val);
3358 self._bitfield_1.set(32usize, 1u8, val as u64)
3359 }
3360 }
3361 #[inline]
3362 pub unsafe fn cgroup_raw(this: *const Self) -> __u64 {
3363 unsafe {
3364 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3365 ::core::ptr::addr_of!((*this)._bitfield_1),
3366 32usize,
3367 1u8,
3368 ) as u64)
3369 }
3370 }
3371 #[inline]
3372 pub unsafe fn set_cgroup_raw(this: *mut Self, val: __u64) {
3373 unsafe {
3374 let val: u64 = ::core::mem::transmute(val);
3375 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3376 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3377 32usize,
3378 1u8,
3379 val as u64,
3380 )
3381 }
3382 }
3383 #[inline]
3384 pub fn text_poke(&self) -> __u64 {
3385 unsafe { ::core::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u64) }
3386 }
3387 #[inline]
3388 pub fn set_text_poke(&mut self, val: __u64) {
3389 unsafe {
3390 let val: u64 = ::core::mem::transmute(val);
3391 self._bitfield_1.set(33usize, 1u8, val as u64)
3392 }
3393 }
3394 #[inline]
3395 pub unsafe fn text_poke_raw(this: *const Self) -> __u64 {
3396 unsafe {
3397 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3398 ::core::ptr::addr_of!((*this)._bitfield_1),
3399 33usize,
3400 1u8,
3401 ) as u64)
3402 }
3403 }
3404 #[inline]
3405 pub unsafe fn set_text_poke_raw(this: *mut Self, val: __u64) {
3406 unsafe {
3407 let val: u64 = ::core::mem::transmute(val);
3408 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3409 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3410 33usize,
3411 1u8,
3412 val as u64,
3413 )
3414 }
3415 }
3416 #[inline]
3417 pub fn build_id(&self) -> __u64 {
3418 unsafe { ::core::mem::transmute(self._bitfield_1.get(34usize, 1u8) as u64) }
3419 }
3420 #[inline]
3421 pub fn set_build_id(&mut self, val: __u64) {
3422 unsafe {
3423 let val: u64 = ::core::mem::transmute(val);
3424 self._bitfield_1.set(34usize, 1u8, val as u64)
3425 }
3426 }
3427 #[inline]
3428 pub unsafe fn build_id_raw(this: *const Self) -> __u64 {
3429 unsafe {
3430 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3431 ::core::ptr::addr_of!((*this)._bitfield_1),
3432 34usize,
3433 1u8,
3434 ) as u64)
3435 }
3436 }
3437 #[inline]
3438 pub unsafe fn set_build_id_raw(this: *mut Self, val: __u64) {
3439 unsafe {
3440 let val: u64 = ::core::mem::transmute(val);
3441 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3442 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3443 34usize,
3444 1u8,
3445 val as u64,
3446 )
3447 }
3448 }
3449 #[inline]
3450 pub fn inherit_thread(&self) -> __u64 {
3451 unsafe { ::core::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u64) }
3452 }
3453 #[inline]
3454 pub fn set_inherit_thread(&mut self, val: __u64) {
3455 unsafe {
3456 let val: u64 = ::core::mem::transmute(val);
3457 self._bitfield_1.set(35usize, 1u8, val as u64)
3458 }
3459 }
3460 #[inline]
3461 pub unsafe fn inherit_thread_raw(this: *const Self) -> __u64 {
3462 unsafe {
3463 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3464 ::core::ptr::addr_of!((*this)._bitfield_1),
3465 35usize,
3466 1u8,
3467 ) as u64)
3468 }
3469 }
3470 #[inline]
3471 pub unsafe fn set_inherit_thread_raw(this: *mut Self, val: __u64) {
3472 unsafe {
3473 let val: u64 = ::core::mem::transmute(val);
3474 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3475 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3476 35usize,
3477 1u8,
3478 val as u64,
3479 )
3480 }
3481 }
3482 #[inline]
3483 pub fn remove_on_exec(&self) -> __u64 {
3484 unsafe { ::core::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u64) }
3485 }
3486 #[inline]
3487 pub fn set_remove_on_exec(&mut self, val: __u64) {
3488 unsafe {
3489 let val: u64 = ::core::mem::transmute(val);
3490 self._bitfield_1.set(36usize, 1u8, val as u64)
3491 }
3492 }
3493 #[inline]
3494 pub unsafe fn remove_on_exec_raw(this: *const Self) -> __u64 {
3495 unsafe {
3496 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3497 ::core::ptr::addr_of!((*this)._bitfield_1),
3498 36usize,
3499 1u8,
3500 ) as u64)
3501 }
3502 }
3503 #[inline]
3504 pub unsafe fn set_remove_on_exec_raw(this: *mut Self, val: __u64) {
3505 unsafe {
3506 let val: u64 = ::core::mem::transmute(val);
3507 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3508 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3509 36usize,
3510 1u8,
3511 val as u64,
3512 )
3513 }
3514 }
3515 #[inline]
3516 pub fn sigtrap(&self) -> __u64 {
3517 unsafe { ::core::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u64) }
3518 }
3519 #[inline]
3520 pub fn set_sigtrap(&mut self, val: __u64) {
3521 unsafe {
3522 let val: u64 = ::core::mem::transmute(val);
3523 self._bitfield_1.set(37usize, 1u8, val as u64)
3524 }
3525 }
3526 #[inline]
3527 pub unsafe fn sigtrap_raw(this: *const Self) -> __u64 {
3528 unsafe {
3529 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3530 ::core::ptr::addr_of!((*this)._bitfield_1),
3531 37usize,
3532 1u8,
3533 ) as u64)
3534 }
3535 }
3536 #[inline]
3537 pub unsafe fn set_sigtrap_raw(this: *mut Self, val: __u64) {
3538 unsafe {
3539 let val: u64 = ::core::mem::transmute(val);
3540 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3541 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3542 37usize,
3543 1u8,
3544 val as u64,
3545 )
3546 }
3547 }
3548 #[inline]
3549 pub fn __reserved_1(&self) -> __u64 {
3550 unsafe { ::core::mem::transmute(self._bitfield_1.get(38usize, 26u8) as u64) }
3551 }
3552 #[inline]
3553 pub fn set___reserved_1(&mut self, val: __u64) {
3554 unsafe {
3555 let val: u64 = ::core::mem::transmute(val);
3556 self._bitfield_1.set(38usize, 26u8, val as u64)
3557 }
3558 }
3559 #[inline]
3560 pub unsafe fn __reserved_1_raw(this: *const Self) -> __u64 {
3561 unsafe {
3562 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3563 ::core::ptr::addr_of!((*this)._bitfield_1),
3564 38usize,
3565 26u8,
3566 ) as u64)
3567 }
3568 }
3569 #[inline]
3570 pub unsafe fn set___reserved_1_raw(this: *mut Self, val: __u64) {
3571 unsafe {
3572 let val: u64 = ::core::mem::transmute(val);
3573 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3574 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3575 38usize,
3576 26u8,
3577 val as u64,
3578 )
3579 }
3580 }
3581 #[inline]
3582 pub fn new_bitfield_1(
3583 disabled: __u64,
3584 inherit: __u64,
3585 pinned: __u64,
3586 exclusive: __u64,
3587 exclude_user: __u64,
3588 exclude_kernel: __u64,
3589 exclude_hv: __u64,
3590 exclude_idle: __u64,
3591 mmap: __u64,
3592 comm: __u64,
3593 freq: __u64,
3594 inherit_stat: __u64,
3595 enable_on_exec: __u64,
3596 task: __u64,
3597 watermark: __u64,
3598 precise_ip: __u64,
3599 mmap_data: __u64,
3600 sample_id_all: __u64,
3601 exclude_host: __u64,
3602 exclude_guest: __u64,
3603 exclude_callchain_kernel: __u64,
3604 exclude_callchain_user: __u64,
3605 mmap2: __u64,
3606 comm_exec: __u64,
3607 use_clockid: __u64,
3608 context_switch: __u64,
3609 write_backward: __u64,
3610 namespaces: __u64,
3611 ksymbol: __u64,
3612 bpf_event: __u64,
3613 aux_output: __u64,
3614 cgroup: __u64,
3615 text_poke: __u64,
3616 build_id: __u64,
3617 inherit_thread: __u64,
3618 remove_on_exec: __u64,
3619 sigtrap: __u64,
3620 __reserved_1: __u64,
3621 ) -> __BindgenBitfieldUnit<[u8; 8usize]> {
3622 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
3623 __bindgen_bitfield_unit.set(0usize, 1u8, {
3624 let disabled: u64 = unsafe { ::core::mem::transmute(disabled) };
3625 disabled as u64
3626 });
3627 __bindgen_bitfield_unit.set(1usize, 1u8, {
3628 let inherit: u64 = unsafe { ::core::mem::transmute(inherit) };
3629 inherit as u64
3630 });
3631 __bindgen_bitfield_unit.set(2usize, 1u8, {
3632 let pinned: u64 = unsafe { ::core::mem::transmute(pinned) };
3633 pinned as u64
3634 });
3635 __bindgen_bitfield_unit.set(3usize, 1u8, {
3636 let exclusive: u64 = unsafe { ::core::mem::transmute(exclusive) };
3637 exclusive as u64
3638 });
3639 __bindgen_bitfield_unit.set(4usize, 1u8, {
3640 let exclude_user: u64 = unsafe { ::core::mem::transmute(exclude_user) };
3641 exclude_user as u64
3642 });
3643 __bindgen_bitfield_unit.set(5usize, 1u8, {
3644 let exclude_kernel: u64 = unsafe { ::core::mem::transmute(exclude_kernel) };
3645 exclude_kernel as u64
3646 });
3647 __bindgen_bitfield_unit.set(6usize, 1u8, {
3648 let exclude_hv: u64 = unsafe { ::core::mem::transmute(exclude_hv) };
3649 exclude_hv as u64
3650 });
3651 __bindgen_bitfield_unit.set(7usize, 1u8, {
3652 let exclude_idle: u64 = unsafe { ::core::mem::transmute(exclude_idle) };
3653 exclude_idle as u64
3654 });
3655 __bindgen_bitfield_unit.set(8usize, 1u8, {
3656 let mmap: u64 = unsafe { ::core::mem::transmute(mmap) };
3657 mmap as u64
3658 });
3659 __bindgen_bitfield_unit.set(9usize, 1u8, {
3660 let comm: u64 = unsafe { ::core::mem::transmute(comm) };
3661 comm as u64
3662 });
3663 __bindgen_bitfield_unit.set(10usize, 1u8, {
3664 let freq: u64 = unsafe { ::core::mem::transmute(freq) };
3665 freq as u64
3666 });
3667 __bindgen_bitfield_unit.set(11usize, 1u8, {
3668 let inherit_stat: u64 = unsafe { ::core::mem::transmute(inherit_stat) };
3669 inherit_stat as u64
3670 });
3671 __bindgen_bitfield_unit.set(12usize, 1u8, {
3672 let enable_on_exec: u64 = unsafe { ::core::mem::transmute(enable_on_exec) };
3673 enable_on_exec as u64
3674 });
3675 __bindgen_bitfield_unit.set(13usize, 1u8, {
3676 let task: u64 = unsafe { ::core::mem::transmute(task) };
3677 task as u64
3678 });
3679 __bindgen_bitfield_unit.set(14usize, 1u8, {
3680 let watermark: u64 = unsafe { ::core::mem::transmute(watermark) };
3681 watermark as u64
3682 });
3683 __bindgen_bitfield_unit.set(15usize, 2u8, {
3684 let precise_ip: u64 = unsafe { ::core::mem::transmute(precise_ip) };
3685 precise_ip as u64
3686 });
3687 __bindgen_bitfield_unit.set(17usize, 1u8, {
3688 let mmap_data: u64 = unsafe { ::core::mem::transmute(mmap_data) };
3689 mmap_data as u64
3690 });
3691 __bindgen_bitfield_unit.set(18usize, 1u8, {
3692 let sample_id_all: u64 = unsafe { ::core::mem::transmute(sample_id_all) };
3693 sample_id_all as u64
3694 });
3695 __bindgen_bitfield_unit.set(19usize, 1u8, {
3696 let exclude_host: u64 = unsafe { ::core::mem::transmute(exclude_host) };
3697 exclude_host as u64
3698 });
3699 __bindgen_bitfield_unit.set(20usize, 1u8, {
3700 let exclude_guest: u64 = unsafe { ::core::mem::transmute(exclude_guest) };
3701 exclude_guest as u64
3702 });
3703 __bindgen_bitfield_unit.set(21usize, 1u8, {
3704 let exclude_callchain_kernel: u64 =
3705 unsafe { ::core::mem::transmute(exclude_callchain_kernel) };
3706 exclude_callchain_kernel as u64
3707 });
3708 __bindgen_bitfield_unit.set(22usize, 1u8, {
3709 let exclude_callchain_user: u64 =
3710 unsafe { ::core::mem::transmute(exclude_callchain_user) };
3711 exclude_callchain_user as u64
3712 });
3713 __bindgen_bitfield_unit.set(23usize, 1u8, {
3714 let mmap2: u64 = unsafe { ::core::mem::transmute(mmap2) };
3715 mmap2 as u64
3716 });
3717 __bindgen_bitfield_unit.set(24usize, 1u8, {
3718 let comm_exec: u64 = unsafe { ::core::mem::transmute(comm_exec) };
3719 comm_exec as u64
3720 });
3721 __bindgen_bitfield_unit.set(25usize, 1u8, {
3722 let use_clockid: u64 = unsafe { ::core::mem::transmute(use_clockid) };
3723 use_clockid as u64
3724 });
3725 __bindgen_bitfield_unit.set(26usize, 1u8, {
3726 let context_switch: u64 = unsafe { ::core::mem::transmute(context_switch) };
3727 context_switch as u64
3728 });
3729 __bindgen_bitfield_unit.set(27usize, 1u8, {
3730 let write_backward: u64 = unsafe { ::core::mem::transmute(write_backward) };
3731 write_backward as u64
3732 });
3733 __bindgen_bitfield_unit.set(28usize, 1u8, {
3734 let namespaces: u64 = unsafe { ::core::mem::transmute(namespaces) };
3735 namespaces as u64
3736 });
3737 __bindgen_bitfield_unit.set(29usize, 1u8, {
3738 let ksymbol: u64 = unsafe { ::core::mem::transmute(ksymbol) };
3739 ksymbol as u64
3740 });
3741 __bindgen_bitfield_unit.set(30usize, 1u8, {
3742 let bpf_event: u64 = unsafe { ::core::mem::transmute(bpf_event) };
3743 bpf_event as u64
3744 });
3745 __bindgen_bitfield_unit.set(31usize, 1u8, {
3746 let aux_output: u64 = unsafe { ::core::mem::transmute(aux_output) };
3747 aux_output as u64
3748 });
3749 __bindgen_bitfield_unit.set(32usize, 1u8, {
3750 let cgroup: u64 = unsafe { ::core::mem::transmute(cgroup) };
3751 cgroup as u64
3752 });
3753 __bindgen_bitfield_unit.set(33usize, 1u8, {
3754 let text_poke: u64 = unsafe { ::core::mem::transmute(text_poke) };
3755 text_poke as u64
3756 });
3757 __bindgen_bitfield_unit.set(34usize, 1u8, {
3758 let build_id: u64 = unsafe { ::core::mem::transmute(build_id) };
3759 build_id as u64
3760 });
3761 __bindgen_bitfield_unit.set(35usize, 1u8, {
3762 let inherit_thread: u64 = unsafe { ::core::mem::transmute(inherit_thread) };
3763 inherit_thread as u64
3764 });
3765 __bindgen_bitfield_unit.set(36usize, 1u8, {
3766 let remove_on_exec: u64 = unsafe { ::core::mem::transmute(remove_on_exec) };
3767 remove_on_exec as u64
3768 });
3769 __bindgen_bitfield_unit.set(37usize, 1u8, {
3770 let sigtrap: u64 = unsafe { ::core::mem::transmute(sigtrap) };
3771 sigtrap as u64
3772 });
3773 __bindgen_bitfield_unit.set(38usize, 26u8, {
3774 let __reserved_1: u64 = unsafe { ::core::mem::transmute(__reserved_1) };
3775 __reserved_1 as u64
3776 });
3777 __bindgen_bitfield_unit
3778 }
3779}
3780#[repr(C)]
3781#[derive(Copy, Clone)]
3782pub struct perf_event_mmap_page {
3783 pub version: __u32,
3784 pub compat_version: __u32,
3785 pub lock: __u32,
3786 pub index: __u32,
3787 pub offset: __s64,
3788 pub time_enabled: __u64,
3789 pub time_running: __u64,
3790 pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1,
3791 pub pmc_width: __u16,
3792 pub time_shift: __u16,
3793 pub time_mult: __u32,
3794 pub time_offset: __u64,
3795 pub time_zero: __u64,
3796 pub size: __u32,
3797 pub __reserved_1: __u32,
3798 pub time_cycles: __u64,
3799 pub time_mask: __u64,
3800 pub __reserved: [__u8; 928usize],
3801 pub data_head: __u64,
3802 pub data_tail: __u64,
3803 pub data_offset: __u64,
3804 pub data_size: __u64,
3805 pub aux_head: __u64,
3806 pub aux_tail: __u64,
3807 pub aux_offset: __u64,
3808 pub aux_size: __u64,
3809}
3810#[repr(C)]
3811#[derive(Copy, Clone)]
3812pub union perf_event_mmap_page__bindgen_ty_1 {
3813 pub capabilities: __u64,
3814 pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1,
3815}
3816#[repr(C)]
3817#[derive(Debug, Copy, Clone)]
3818pub struct perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 {
3819 pub _bitfield_align_1: [u64; 0],
3820 pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>,
3821}
3822impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 {
3823 #[inline]
3824 pub fn cap_bit0(&self) -> __u64 {
3825 unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) }
3826 }
3827 #[inline]
3828 pub fn set_cap_bit0(&mut self, val: __u64) {
3829 unsafe {
3830 let val: u64 = ::core::mem::transmute(val);
3831 self._bitfield_1.set(0usize, 1u8, val as u64)
3832 }
3833 }
3834 #[inline]
3835 pub unsafe fn cap_bit0_raw(this: *const Self) -> __u64 {
3836 unsafe {
3837 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3838 ::core::ptr::addr_of!((*this)._bitfield_1),
3839 0usize,
3840 1u8,
3841 ) as u64)
3842 }
3843 }
3844 #[inline]
3845 pub unsafe fn set_cap_bit0_raw(this: *mut Self, val: __u64) {
3846 unsafe {
3847 let val: u64 = ::core::mem::transmute(val);
3848 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3849 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3850 0usize,
3851 1u8,
3852 val as u64,
3853 )
3854 }
3855 }
3856 #[inline]
3857 pub fn cap_bit0_is_deprecated(&self) -> __u64 {
3858 unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) }
3859 }
3860 #[inline]
3861 pub fn set_cap_bit0_is_deprecated(&mut self, val: __u64) {
3862 unsafe {
3863 let val: u64 = ::core::mem::transmute(val);
3864 self._bitfield_1.set(1usize, 1u8, val as u64)
3865 }
3866 }
3867 #[inline]
3868 pub unsafe fn cap_bit0_is_deprecated_raw(this: *const Self) -> __u64 {
3869 unsafe {
3870 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3871 ::core::ptr::addr_of!((*this)._bitfield_1),
3872 1usize,
3873 1u8,
3874 ) as u64)
3875 }
3876 }
3877 #[inline]
3878 pub unsafe fn set_cap_bit0_is_deprecated_raw(this: *mut Self, val: __u64) {
3879 unsafe {
3880 let val: u64 = ::core::mem::transmute(val);
3881 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3882 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3883 1usize,
3884 1u8,
3885 val as u64,
3886 )
3887 }
3888 }
3889 #[inline]
3890 pub fn cap_user_rdpmc(&self) -> __u64 {
3891 unsafe { ::core::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) }
3892 }
3893 #[inline]
3894 pub fn set_cap_user_rdpmc(&mut self, val: __u64) {
3895 unsafe {
3896 let val: u64 = ::core::mem::transmute(val);
3897 self._bitfield_1.set(2usize, 1u8, val as u64)
3898 }
3899 }
3900 #[inline]
3901 pub unsafe fn cap_user_rdpmc_raw(this: *const Self) -> __u64 {
3902 unsafe {
3903 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3904 ::core::ptr::addr_of!((*this)._bitfield_1),
3905 2usize,
3906 1u8,
3907 ) as u64)
3908 }
3909 }
3910 #[inline]
3911 pub unsafe fn set_cap_user_rdpmc_raw(this: *mut Self, val: __u64) {
3912 unsafe {
3913 let val: u64 = ::core::mem::transmute(val);
3914 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3915 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3916 2usize,
3917 1u8,
3918 val as u64,
3919 )
3920 }
3921 }
3922 #[inline]
3923 pub fn cap_user_time(&self) -> __u64 {
3924 unsafe { ::core::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) }
3925 }
3926 #[inline]
3927 pub fn set_cap_user_time(&mut self, val: __u64) {
3928 unsafe {
3929 let val: u64 = ::core::mem::transmute(val);
3930 self._bitfield_1.set(3usize, 1u8, val as u64)
3931 }
3932 }
3933 #[inline]
3934 pub unsafe fn cap_user_time_raw(this: *const Self) -> __u64 {
3935 unsafe {
3936 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3937 ::core::ptr::addr_of!((*this)._bitfield_1),
3938 3usize,
3939 1u8,
3940 ) as u64)
3941 }
3942 }
3943 #[inline]
3944 pub unsafe fn set_cap_user_time_raw(this: *mut Self, val: __u64) {
3945 unsafe {
3946 let val: u64 = ::core::mem::transmute(val);
3947 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3948 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3949 3usize,
3950 1u8,
3951 val as u64,
3952 )
3953 }
3954 }
3955 #[inline]
3956 pub fn cap_user_time_zero(&self) -> __u64 {
3957 unsafe { ::core::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) }
3958 }
3959 #[inline]
3960 pub fn set_cap_user_time_zero(&mut self, val: __u64) {
3961 unsafe {
3962 let val: u64 = ::core::mem::transmute(val);
3963 self._bitfield_1.set(4usize, 1u8, val as u64)
3964 }
3965 }
3966 #[inline]
3967 pub unsafe fn cap_user_time_zero_raw(this: *const Self) -> __u64 {
3968 unsafe {
3969 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
3970 ::core::ptr::addr_of!((*this)._bitfield_1),
3971 4usize,
3972 1u8,
3973 ) as u64)
3974 }
3975 }
3976 #[inline]
3977 pub unsafe fn set_cap_user_time_zero_raw(this: *mut Self, val: __u64) {
3978 unsafe {
3979 let val: u64 = ::core::mem::transmute(val);
3980 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
3981 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
3982 4usize,
3983 1u8,
3984 val as u64,
3985 )
3986 }
3987 }
3988 #[inline]
3989 pub fn cap_user_time_short(&self) -> __u64 {
3990 unsafe { ::core::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) }
3991 }
3992 #[inline]
3993 pub fn set_cap_user_time_short(&mut self, val: __u64) {
3994 unsafe {
3995 let val: u64 = ::core::mem::transmute(val);
3996 self._bitfield_1.set(5usize, 1u8, val as u64)
3997 }
3998 }
3999 #[inline]
4000 pub unsafe fn cap_user_time_short_raw(this: *const Self) -> __u64 {
4001 unsafe {
4002 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
4003 ::core::ptr::addr_of!((*this)._bitfield_1),
4004 5usize,
4005 1u8,
4006 ) as u64)
4007 }
4008 }
4009 #[inline]
4010 pub unsafe fn set_cap_user_time_short_raw(this: *mut Self, val: __u64) {
4011 unsafe {
4012 let val: u64 = ::core::mem::transmute(val);
4013 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
4014 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
4015 5usize,
4016 1u8,
4017 val as u64,
4018 )
4019 }
4020 }
4021 #[inline]
4022 pub fn cap_____res(&self) -> __u64 {
4023 unsafe { ::core::mem::transmute(self._bitfield_1.get(6usize, 58u8) as u64) }
4024 }
4025 #[inline]
4026 pub fn set_cap_____res(&mut self, val: __u64) {
4027 unsafe {
4028 let val: u64 = ::core::mem::transmute(val);
4029 self._bitfield_1.set(6usize, 58u8, val as u64)
4030 }
4031 }
4032 #[inline]
4033 pub unsafe fn cap_____res_raw(this: *const Self) -> __u64 {
4034 unsafe {
4035 ::core::mem::transmute(<__BindgenBitfieldUnit<[u8; 8usize]>>::raw_get(
4036 ::core::ptr::addr_of!((*this)._bitfield_1),
4037 6usize,
4038 58u8,
4039 ) as u64)
4040 }
4041 }
4042 #[inline]
4043 pub unsafe fn set_cap_____res_raw(this: *mut Self, val: __u64) {
4044 unsafe {
4045 let val: u64 = ::core::mem::transmute(val);
4046 <__BindgenBitfieldUnit<[u8; 8usize]>>::raw_set(
4047 ::core::ptr::addr_of_mut!((*this)._bitfield_1),
4048 6usize,
4049 58u8,
4050 val as u64,
4051 )
4052 }
4053 }
4054 #[inline]
4055 pub fn new_bitfield_1(
4056 cap_bit0: __u64,
4057 cap_bit0_is_deprecated: __u64,
4058 cap_user_rdpmc: __u64,
4059 cap_user_time: __u64,
4060 cap_user_time_zero: __u64,
4061 cap_user_time_short: __u64,
4062 cap_____res: __u64,
4063 ) -> __BindgenBitfieldUnit<[u8; 8usize]> {
4064 let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default();
4065 __bindgen_bitfield_unit.set(0usize, 1u8, {
4066 let cap_bit0: u64 = unsafe { ::core::mem::transmute(cap_bit0) };
4067 cap_bit0 as u64
4068 });
4069 __bindgen_bitfield_unit.set(1usize, 1u8, {
4070 let cap_bit0_is_deprecated: u64 =
4071 unsafe { ::core::mem::transmute(cap_bit0_is_deprecated) };
4072 cap_bit0_is_deprecated as u64
4073 });
4074 __bindgen_bitfield_unit.set(2usize, 1u8, {
4075 let cap_user_rdpmc: u64 = unsafe { ::core::mem::transmute(cap_user_rdpmc) };
4076 cap_user_rdpmc as u64
4077 });
4078 __bindgen_bitfield_unit.set(3usize, 1u8, {
4079 let cap_user_time: u64 = unsafe { ::core::mem::transmute(cap_user_time) };
4080 cap_user_time as u64
4081 });
4082 __bindgen_bitfield_unit.set(4usize, 1u8, {
4083 let cap_user_time_zero: u64 = unsafe { ::core::mem::transmute(cap_user_time_zero) };
4084 cap_user_time_zero as u64
4085 });
4086 __bindgen_bitfield_unit.set(5usize, 1u8, {
4087 let cap_user_time_short: u64 = unsafe { ::core::mem::transmute(cap_user_time_short) };
4088 cap_user_time_short as u64
4089 });
4090 __bindgen_bitfield_unit.set(6usize, 58u8, {
4091 let cap_____res: u64 = unsafe { ::core::mem::transmute(cap_____res) };
4092 cap_____res as u64
4093 });
4094 __bindgen_bitfield_unit
4095 }
4096}
4097#[repr(C)]
4098#[derive(Debug, Copy, Clone)]
4099pub struct perf_event_header {
4100 pub type_: __u32,
4101 pub misc: __u16,
4102 pub size: __u16,
4103}
4104#[repr(u32)]
4105#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
4106pub enum perf_event_type {
4107 PERF_RECORD_MMAP = 1,
4108 PERF_RECORD_LOST = 2,
4109 PERF_RECORD_COMM = 3,
4110 PERF_RECORD_EXIT = 4,
4111 PERF_RECORD_THROTTLE = 5,
4112 PERF_RECORD_UNTHROTTLE = 6,
4113 PERF_RECORD_FORK = 7,
4114 PERF_RECORD_READ = 8,
4115 PERF_RECORD_SAMPLE = 9,
4116 PERF_RECORD_MMAP2 = 10,
4117 PERF_RECORD_AUX = 11,
4118 PERF_RECORD_ITRACE_START = 12,
4119 PERF_RECORD_LOST_SAMPLES = 13,
4120 PERF_RECORD_SWITCH = 14,
4121 PERF_RECORD_SWITCH_CPU_WIDE = 15,
4122 PERF_RECORD_NAMESPACES = 16,
4123 PERF_RECORD_KSYMBOL = 17,
4124 PERF_RECORD_BPF_EVENT = 18,
4125 PERF_RECORD_CGROUP = 19,
4126 PERF_RECORD_TEXT_POKE = 20,
4127 PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
4128 PERF_RECORD_MAX = 22,
4129}
4130pub const TCA_BPF_UNSPEC: _bindgen_ty_156 = 0;
4131pub const TCA_BPF_ACT: _bindgen_ty_156 = 1;
4132pub const TCA_BPF_POLICE: _bindgen_ty_156 = 2;
4133pub const TCA_BPF_CLASSID: _bindgen_ty_156 = 3;
4134pub const TCA_BPF_OPS_LEN: _bindgen_ty_156 = 4;
4135pub const TCA_BPF_OPS: _bindgen_ty_156 = 5;
4136pub const TCA_BPF_FD: _bindgen_ty_156 = 6;
4137pub const TCA_BPF_NAME: _bindgen_ty_156 = 7;
4138pub const TCA_BPF_FLAGS: _bindgen_ty_156 = 8;
4139pub const TCA_BPF_FLAGS_GEN: _bindgen_ty_156 = 9;
4140pub const TCA_BPF_TAG: _bindgen_ty_156 = 10;
4141pub const TCA_BPF_ID: _bindgen_ty_156 = 11;
4142pub const __TCA_BPF_MAX: _bindgen_ty_156 = 12;
4143pub type _bindgen_ty_156 = ::core::ffi::c_uint;
4144#[repr(C)]
4145#[derive(Debug, Copy, Clone)]
4146pub struct ifinfomsg {
4147 pub ifi_family: ::core::ffi::c_uchar,
4148 pub __ifi_pad: ::core::ffi::c_uchar,
4149 pub ifi_type: ::core::ffi::c_ushort,
4150 pub ifi_index: ::core::ffi::c_int,
4151 pub ifi_flags: ::core::ffi::c_uint,
4152 pub ifi_change: ::core::ffi::c_uint,
4153}
4154#[repr(C)]
4155#[derive(Debug, Copy, Clone)]
4156pub struct tcmsg {
4157 pub tcm_family: ::core::ffi::c_uchar,
4158 pub tcm__pad1: ::core::ffi::c_uchar,
4159 pub tcm__pad2: ::core::ffi::c_ushort,
4160 pub tcm_ifindex: ::core::ffi::c_int,
4161 pub tcm_handle: __u32,
4162 pub tcm_parent: __u32,
4163 pub tcm_info: __u32,
4164}
4165pub const TCA_UNSPEC: _bindgen_ty_176 = 0;
4166pub const TCA_KIND: _bindgen_ty_176 = 1;
4167pub const TCA_OPTIONS: _bindgen_ty_176 = 2;
4168pub const TCA_STATS: _bindgen_ty_176 = 3;
4169pub const TCA_XSTATS: _bindgen_ty_176 = 4;
4170pub const TCA_RATE: _bindgen_ty_176 = 5;
4171pub const TCA_FCNT: _bindgen_ty_176 = 6;
4172pub const TCA_STATS2: _bindgen_ty_176 = 7;
4173pub const TCA_STAB: _bindgen_ty_176 = 8;
4174pub const TCA_PAD: _bindgen_ty_176 = 9;
4175pub const TCA_DUMP_INVISIBLE: _bindgen_ty_176 = 10;
4176pub const TCA_CHAIN: _bindgen_ty_176 = 11;
4177pub const TCA_HW_OFFLOAD: _bindgen_ty_176 = 12;
4178pub const TCA_INGRESS_BLOCK: _bindgen_ty_176 = 13;
4179pub const TCA_EGRESS_BLOCK: _bindgen_ty_176 = 14;
4180pub const TCA_DUMP_FLAGS: _bindgen_ty_176 = 15;
4181pub const TCA_EXT_WARN_MSG: _bindgen_ty_176 = 16;
4182pub const __TCA_MAX: _bindgen_ty_176 = 17;
4183pub type _bindgen_ty_176 = ::core::ffi::c_uint;