btrfs_uapi/tree_search.rs
1//! # Generic B-tree search: walking any internal btrfs tree via `BTRFS_IOC_TREE_SEARCH`
2//!
3//! The kernel's tree search ioctl lets userspace read any internal btrfs tree
4//! (chunk, root, quota, …) by specifying a key range. Items are returned in
5//! batches; [`tree_search`] advances the cursor automatically and calls a
6//! closure once per item until the range is exhausted.
7//!
8//! # Byte order
9//!
10//! [`SearchHeader`] fields (objectid, offset, type) are in host byte order:
11//! the kernel fills them in through the ioctl layer. The `data` slice passed
12//! to the callback contains the raw on-disk item payload, which is
13//! **little-endian**; callers must use `u64::from_le_bytes` and friends when
14//! interpreting it.
15//!
16//! # Ioctl version
17//!
18//! This module provides two variants:
19//!
20//! - [`tree_search`] uses `BTRFS_IOC_TREE_SEARCH` (v1) with its fixed
21//! 3992-byte result buffer. Sufficient for all item types used by this crate.
22//! - [`tree_search_v2`] uses `BTRFS_IOC_TREE_SEARCH_V2` with a caller-chosen
23//! buffer size. Useful when items may be larger than what v1 can return in a
24//! single batch.
25
26use crate::raw::{
27 btrfs_ioc_tree_search, btrfs_ioc_tree_search_v2, btrfs_ioctl_search_args,
28 btrfs_ioctl_search_args_v2, btrfs_ioctl_search_header,
29 btrfs_ioctl_search_key,
30};
31use std::{
32 mem,
33 os::{fd::AsRawFd, unix::io::BorrowedFd},
34};
35
36/// A compound B-tree key: `(objectid, item_type, offset)`.
37///
38/// Items in a btrfs tree are ordered by this 136-bit compound value:
39/// `(objectid << 72) | (item_type << 64) | offset`.
40/// The three fields are not independent ranges — they form a single
41/// ordered tuple.
42#[derive(Debug, Clone, Copy, PartialEq, Eq)]
43pub struct Key {
44 /// Object this key belongs to (e.g. inode number, tree ID, device ID).
45 pub objectid: u64,
46 /// Item type: a `BTRFS_*_KEY` constant from `crate::raw`.
47 pub item_type: u32,
48 /// Type-specific offset (e.g. byte offset for extents, parent ID for
49 /// backrefs).
50 pub offset: u64,
51}
52
53impl Key {
54 /// The smallest possible key.
55 pub const MIN: Self = Self {
56 objectid: 0,
57 item_type: 0,
58 offset: 0,
59 };
60
61 /// The largest possible key.
62 pub const MAX: Self = Self {
63 objectid: u64::MAX,
64 item_type: u32::MAX,
65 offset: u64::MAX,
66 };
67}
68
69/// Filter specifying which items to return from a tree search.
70///
71/// Filtering works in three stages:
72///
73/// 1. Select a tree by `tree_id` (e.g. root tree, chunk tree, quota tree).
74/// 2. Return only items whose compound key `(objectid, item_type, offset)`
75/// falls within the inclusive range `[start, end]`. Because the key is a
76/// compound tuple, the three components of `start` and `end` are not
77/// independent filters — they form a single ordered bound on the B-tree.
78/// This means items with unexpected types can appear if their compound key
79/// is between `start` and `end`; callbacks should filter on
80/// `hdr.item_type` when they need a single type.
81/// 3. Trim results to only include items stored in metadata blocks whose
82/// transaction ID falls within `[min_transid, max_transid]`.
83///
84/// Build a filter for common cases with [`SearchFilter::for_type`] or
85/// [`SearchFilter::for_objectid_range`].
86///
87/// Tree IDs and item type codes are the `BTRFS_*_OBJECTID` and `BTRFS_*_KEY`
88/// constants from `crate::raw`, cast to `u64` and `u32` respectively at the
89/// call site.
90#[derive(Debug, Clone)]
91pub struct SearchFilter {
92 /// Tree to search: a `BTRFS_*_TREE_OBJECTID` constant from `crate::raw`.
93 pub tree_id: u64,
94 /// Lower bound of the key range (inclusive).
95 pub start: Key,
96 /// Upper bound of the key range (inclusive).
97 pub end: Key,
98 /// Lower bound on the metadata block transaction ID (inclusive).
99 pub min_transid: u64,
100 /// Upper bound on the metadata block transaction ID (inclusive).
101 pub max_transid: u64,
102}
103
104impl SearchFilter {
105 /// Return all items of `item_type` in `tree_id`, across every objectid
106 /// and offset.
107 #[must_use]
108 pub fn for_type(tree_id: u64, item_type: u32) -> Self {
109 Self {
110 tree_id,
111 start: Key {
112 objectid: 0,
113 item_type,
114 offset: 0,
115 },
116 end: Key {
117 objectid: u64::MAX,
118 item_type,
119 offset: u64::MAX,
120 },
121 min_transid: 0,
122 max_transid: u64::MAX,
123 }
124 }
125
126 /// Return all items of `item_type` in `tree_id` whose objectid falls in
127 /// `[min_objectid, max_objectid]`.
128 #[must_use]
129 pub fn for_objectid_range(
130 tree_id: u64,
131 item_type: u32,
132 min_objectid: u64,
133 max_objectid: u64,
134 ) -> Self {
135 Self {
136 tree_id,
137 start: Key {
138 objectid: min_objectid,
139 item_type,
140 offset: 0,
141 },
142 end: Key {
143 objectid: max_objectid,
144 item_type,
145 offset: u64::MAX,
146 },
147 min_transid: 0,
148 max_transid: u64::MAX,
149 }
150 }
151}
152
153/// Metadata returned for each item found by [`tree_search`].
154///
155/// The header fields are in host byte order (the kernel fills them in through
156/// the ioctl layer). The accompanying `data` slice passed to the callback is
157/// the raw on-disk item payload and is in **little-endian** byte order.
158#[derive(Debug, Clone, Copy)]
159pub struct SearchHeader {
160 /// Transaction ID of the metadata block that contains this item.
161 pub transid: u64,
162 /// Object ID portion of the item's btrfs key.
163 pub objectid: u64,
164 /// Offset portion of the item's btrfs key.
165 pub offset: u64,
166 /// Item type (the `type` field of the btrfs key).
167 pub item_type: u32,
168 /// Length in bytes of the item's data payload.
169 pub len: u32,
170}
171
172/// Number of items to request per ioctl call.
173const ITEMS_PER_BATCH: u32 = 4096;
174
175/// Size of `btrfs_ioctl_search_header` as a compile-time constant.
176const SEARCH_HEADER_SIZE: usize = mem::size_of::<btrfs_ioctl_search_header>();
177
178/// Walk every item in the tree that falls within `key`, calling `f` once for
179/// each one.
180///
181/// `f` receives:
182/// * a reference to the item's [`SearchHeader`] (objectid, offset, type, …)
183/// * a byte slice of the item's raw on-disk data payload (little-endian)
184///
185/// The search stops early and the error is returned if:
186/// * `f` returns `Err(_)`
187/// * the underlying `BTRFS_IOC_TREE_SEARCH` ioctl fails
188///
189/// Returns `Ok(())` when the entire requested range has been scanned.
190///
191/// **Important:** the kernel treats `(min_objectid, min_type, min_offset)` and
192/// `(max_objectid, max_type, max_offset)` as compound tuple keys, not three
193/// independent range filters. This means items whose type falls outside
194/// `[min_type, max_type]` CAN be returned when their compound key is between
195/// the min and max bounds (e.g. a type-144 item with a lower objectid than a
196/// type-132 item with a higher objectid). Callbacks that need a single item
197/// type must filter on `hdr.item_type` themselves.
198///
199/// # Errors
200///
201/// Returns `Err` if the ioctl fails or the callback returns an error.
202///
203/// # Privileges
204///
205/// Most trees require `CAP_SYS_ADMIN`. The subvolume tree of the inode
206/// belonging to `fd` can be searched without elevated privileges by setting
207/// `key.tree_id = 0`.
208#[allow(clippy::needless_pass_by_value)] // SearchFilter is small but not Copy by design
209pub fn tree_search(
210 fd: BorrowedFd,
211 filter: SearchFilter,
212 mut f: impl FnMut(&SearchHeader, &[u8]) -> nix::Result<()>,
213) -> nix::Result<()> {
214 let mut args: btrfs_ioctl_search_args = unsafe { mem::zeroed() };
215
216 fill_search_key(&mut args.key, &filter);
217
218 loop {
219 args.key.nr_items = ITEMS_PER_BATCH;
220
221 unsafe { btrfs_ioc_tree_search(fd.as_raw_fd(), &raw mut args) }?;
222
223 let nr = args.key.nr_items;
224 if nr == 0 {
225 break;
226 }
227
228 // Walk the result buffer. We use raw pointer reads to avoid borrow
229 // checker conflicts: args.buf (read-only after the ioctl) and
230 // args.key (mutated below for cursor advancement) are separate fields,
231 // but a Rust reference to one would prevent mutation of the other.
232 //
233 // SAFETY:
234 // * The ioctl has returned successfully, so args.buf contains nr valid
235 // (header, data) pairs totalling at most args.buf.len() bytes.
236 // * buf_base is derived from args.buf which lives for the entire loop
237 // body; it is not invalidated until args is dropped.
238 // * All raw reads are bounds-checked before dereferencing.
239 // * The `data` slices passed to `f` do not outlive this loop
240 // iteration — `f` takes `&[u8]`, not `&'static [u8]`.
241 let buf_base: *const u8 = args.buf.as_ptr().cast();
242 let buf_cap = args.buf.len();
243
244 let mut off = 0usize;
245 let mut last = SearchHeader {
246 transid: 0,
247 objectid: 0,
248 offset: 0,
249 item_type: 0,
250 len: 0,
251 };
252
253 for _ in 0..nr {
254 if off + SEARCH_HEADER_SIZE > buf_cap {
255 return Err(nix::errno::Errno::EOVERFLOW);
256 }
257 let raw_hdr: btrfs_ioctl_search_header = unsafe {
258 buf_base
259 .add(off)
260 .cast::<btrfs_ioctl_search_header>()
261 .read_unaligned()
262 };
263 let hdr = SearchHeader {
264 transid: raw_hdr.transid,
265 objectid: raw_hdr.objectid,
266 offset: raw_hdr.offset,
267 item_type: raw_hdr.type_,
268 len: raw_hdr.len,
269 };
270 off += SEARCH_HEADER_SIZE;
271
272 let data_len = hdr.len as usize;
273 if off + data_len > buf_cap {
274 return Err(nix::errno::Errno::EOVERFLOW);
275 }
276 let data: &[u8] = unsafe {
277 std::slice::from_raw_parts(buf_base.add(off), data_len)
278 };
279 off += data_len;
280
281 f(&hdr, data)?;
282 last = hdr;
283 }
284
285 if !advance_cursor(&mut args.key, &last) {
286 break;
287 }
288 }
289
290 Ok(())
291}
292
293/// Default buffer size for [`tree_search_v2`]: 64 KiB.
294const DEFAULT_V2_BUF_SIZE: usize = 64 * 1024;
295
296/// Like [`tree_search`] but uses `BTRFS_IOC_TREE_SEARCH_V2` with a larger
297/// result buffer.
298///
299/// `buf_size` controls the buffer size in bytes (default 64 KiB if `None`).
300/// The v2 ioctl is otherwise identical to v1 but can return more data per
301/// batch, reducing the number of round-trips for large result sets.
302///
303/// If `buf_size` is too small for even a single item, the kernel returns
304/// `EOVERFLOW` and sets `buf_size` to the required minimum. This function
305/// automatically retries with the larger buffer.
306///
307/// # Errors
308///
309/// Returns `Err` if the ioctl fails or the callback returns an error.
310#[allow(clippy::needless_pass_by_value)] // SearchFilter is small but not Copy by design
311#[allow(clippy::too_many_lines)]
312pub fn tree_search_v2(
313 fd: BorrowedFd,
314 filter: SearchFilter,
315 buf_size: Option<usize>,
316 mut f: impl FnMut(&SearchHeader, &[u8]) -> nix::Result<()>,
317) -> nix::Result<()> {
318 let base_size = mem::size_of::<btrfs_ioctl_search_args_v2>();
319 let mut capacity = buf_size.unwrap_or(DEFAULT_V2_BUF_SIZE);
320
321 // Allocate as Vec<u64> for 8-byte alignment.
322 let alloc_bytes = base_size + capacity;
323 let num_u64s = alloc_bytes.div_ceil(mem::size_of::<u64>());
324 let mut buf = vec![0u64; num_u64s];
325
326 // SAFETY: buf is correctly sized and aligned for btrfs_ioctl_search_args_v2.
327 let args_ptr = buf.as_mut_ptr().cast::<btrfs_ioctl_search_args_v2>();
328 unsafe {
329 fill_search_key(&mut (*args_ptr).key, &filter);
330 }
331
332 loop {
333 unsafe {
334 (*args_ptr).key.nr_items = ITEMS_PER_BATCH;
335 (*args_ptr).buf_size = capacity as u64;
336 }
337
338 match unsafe {
339 btrfs_ioc_tree_search_v2(fd.as_raw_fd(), &raw mut *args_ptr)
340 } {
341 Ok(_) => {}
342 Err(nix::errno::Errno::EOVERFLOW) => {
343 // Kernel tells us the needed size via buf_size.
344 #[allow(clippy::cast_possible_truncation)]
345 // buf_size fits in usize
346 let needed = unsafe { (*args_ptr).buf_size } as usize;
347 if needed <= capacity {
348 return Err(nix::errno::Errno::EOVERFLOW);
349 }
350 capacity = needed;
351 let alloc_bytes = base_size + capacity;
352 let num_u64s = alloc_bytes.div_ceil(mem::size_of::<u64>());
353 buf.resize(num_u64s, 0);
354 // args_ptr must be refreshed after reallocation.
355 let args_ptr_new =
356 buf.as_mut_ptr().cast::<btrfs_ioctl_search_args_v2>();
357 unsafe {
358 (*args_ptr_new).key.nr_items = ITEMS_PER_BATCH;
359 (*args_ptr_new).buf_size = capacity as u64;
360 btrfs_ioc_tree_search_v2(
361 fd.as_raw_fd(),
362 &raw mut *args_ptr_new,
363 )?;
364 }
365 // Fall through to process results with the new pointer.
366 // Update our local for the rest of the loop.
367 let _ = args_ptr_new;
368 }
369 Err(e) => return Err(e),
370 }
371
372 // Re-derive pointer after potential reallocation.
373 let args_ptr = buf.as_mut_ptr().cast::<btrfs_ioctl_search_args_v2>();
374
375 let nr = unsafe { (*args_ptr).key.nr_items };
376 if nr == 0 {
377 break;
378 }
379
380 // The result data starts right after the base struct (at the
381 // flexible array member `buf[]`).
382 let data_base: *const u8 =
383 unsafe { (args_ptr as *const u8).add(base_size) };
384
385 let mut off = 0usize;
386 let mut last = SearchHeader {
387 transid: 0,
388 objectid: 0,
389 offset: 0,
390 item_type: 0,
391 len: 0,
392 };
393
394 for _ in 0..nr {
395 if off + SEARCH_HEADER_SIZE > capacity {
396 return Err(nix::errno::Errno::EOVERFLOW);
397 }
398 let raw_hdr: btrfs_ioctl_search_header = unsafe {
399 data_base
400 .add(off)
401 .cast::<btrfs_ioctl_search_header>()
402 .read_unaligned()
403 };
404 let hdr = SearchHeader {
405 transid: raw_hdr.transid,
406 objectid: raw_hdr.objectid,
407 offset: raw_hdr.offset,
408 item_type: raw_hdr.type_,
409 len: raw_hdr.len,
410 };
411 off += SEARCH_HEADER_SIZE;
412
413 let data_len = hdr.len as usize;
414 if off + data_len > capacity {
415 return Err(nix::errno::Errno::EOVERFLOW);
416 }
417 let data: &[u8] = unsafe {
418 std::slice::from_raw_parts(data_base.add(off), data_len)
419 };
420 off += data_len;
421
422 f(&hdr, data)?;
423 last = hdr;
424 }
425
426 if !advance_cursor(unsafe { &mut (*args_ptr).key }, &last) {
427 break;
428 }
429 }
430
431 Ok(())
432}
433
434fn fill_search_key(sk: &mut btrfs_ioctl_search_key, filter: &SearchFilter) {
435 sk.tree_id = filter.tree_id;
436 sk.min_objectid = filter.start.objectid;
437 sk.max_objectid = filter.end.objectid;
438 sk.min_type = filter.start.item_type;
439 sk.max_type = filter.end.item_type;
440 sk.min_offset = filter.start.offset;
441 sk.max_offset = filter.end.offset;
442 sk.min_transid = filter.min_transid;
443 sk.max_transid = filter.max_transid;
444}
445
446/// Advance the search cursor past `last` so the next batch begins from the
447/// item immediately after it in the 136-bit key space
448/// `(objectid << 72) | (type << 64) | offset`.
449///
450/// The kernel interprets `(min_objectid, min_type, min_offset)` as a compound
451/// tuple key, so all three fields must be updated together to point past the
452/// last returned item. Advancing only `min_offset` while leaving
453/// `min_objectid` at its original value would cause items from lower objectids
454/// that were already returned to satisfy the new minimum and be yielded again.
455///
456/// Returns `false` when the objectid also overflows, meaning the full key
457/// space has been exhausted.
458fn advance_cursor(
459 sk: &mut btrfs_ioctl_search_key,
460 last: &SearchHeader,
461) -> bool {
462 let (new_offset, offset_overflow) = last.offset.overflowing_add(1);
463 if !offset_overflow {
464 sk.min_objectid = last.objectid;
465 sk.min_type = last.item_type;
466 sk.min_offset = new_offset;
467 return true;
468 }
469
470 sk.min_offset = 0;
471 let (new_type, type_overflow) = last.item_type.overflowing_add(1);
472 if !type_overflow {
473 sk.min_objectid = last.objectid;
474 sk.min_type = new_type;
475 return true;
476 }
477
478 sk.min_type = 0;
479 let (new_oid, oid_overflow) = last.objectid.overflowing_add(1);
480 if oid_overflow {
481 return false;
482 }
483 sk.min_objectid = new_oid;
484 true
485}
486
487#[cfg(test)]
488mod tests {
489 use super::*;
490
491 fn header(objectid: u64, item_type: u32, offset: u64) -> SearchHeader {
492 SearchHeader {
493 transid: 0,
494 objectid,
495 offset,
496 item_type,
497 len: 0,
498 }
499 }
500
501 fn zeroed_search_key() -> btrfs_ioctl_search_key {
502 unsafe { mem::zeroed() }
503 }
504
505 // --- SearchFilter constructors ---
506
507 #[test]
508 fn for_type_covers_all_objectids_and_offsets() {
509 let f = SearchFilter::for_type(5, 132);
510 assert_eq!(f.tree_id, 5);
511 assert_eq!(f.start.objectid, 0);
512 assert_eq!(f.end.objectid, u64::MAX);
513 assert_eq!(f.start.item_type, 132);
514 assert_eq!(f.end.item_type, 132);
515 assert_eq!(f.start.offset, 0);
516 assert_eq!(f.end.offset, u64::MAX);
517 assert_eq!(f.min_transid, 0);
518 assert_eq!(f.max_transid, u64::MAX);
519 }
520
521 #[test]
522 fn for_objectid_range_restricts_objectids() {
523 let f = SearchFilter::for_objectid_range(1, 84, 100, 200);
524 assert_eq!(f.tree_id, 1);
525 assert_eq!(f.start.objectid, 100);
526 assert_eq!(f.end.objectid, 200);
527 assert_eq!(f.start.item_type, 84);
528 assert_eq!(f.end.item_type, 84);
529 assert_eq!(f.start.offset, 0);
530 assert_eq!(f.end.offset, u64::MAX);
531 }
532
533 // --- fill_search_key ---
534
535 #[test]
536 fn fill_search_key_copies_all_fields() {
537 let filter = SearchFilter {
538 tree_id: 1,
539 start: Key {
540 objectid: 10,
541 item_type: 30,
542 offset: 50,
543 },
544 end: Key {
545 objectid: 20,
546 item_type: 40,
547 offset: 60,
548 },
549 min_transid: 70,
550 max_transid: 80,
551 };
552 let mut sk = zeroed_search_key();
553 fill_search_key(&mut sk, &filter);
554 assert_eq!(sk.tree_id, 1);
555 assert_eq!(sk.min_objectid, 10);
556 assert_eq!(sk.max_objectid, 20);
557 assert_eq!(sk.min_type, 30);
558 assert_eq!(sk.max_type, 40);
559 assert_eq!(sk.min_offset, 50);
560 assert_eq!(sk.max_offset, 60);
561 assert_eq!(sk.min_transid, 70);
562 assert_eq!(sk.max_transid, 80);
563 }
564
565 // --- advance_cursor: normal case ---
566
567 #[test]
568 fn advance_increments_offset() {
569 let mut sk = zeroed_search_key();
570 let last = header(256, 132, 100);
571 assert!(advance_cursor(&mut sk, &last));
572 assert_eq!(sk.min_objectid, 256);
573 assert_eq!(sk.min_type, 132);
574 assert_eq!(sk.min_offset, 101);
575 }
576
577 #[test]
578 fn advance_tracks_objectid_from_last_item() {
579 // This is the bug that was fixed: the cursor must track the last
580 // item's objectid, not leave min_objectid at its original value.
581 let mut sk = zeroed_search_key();
582 sk.min_objectid = 100; // original search started at 100
583 let last = header(300, 132, 50); // batch ended at objectid 300
584 assert!(advance_cursor(&mut sk, &last));
585 assert_eq!(sk.min_objectid, 300, "must track last item's objectid");
586 assert_eq!(sk.min_type, 132);
587 assert_eq!(sk.min_offset, 51);
588 }
589
590 #[test]
591 fn advance_tracks_type_from_last_item() {
592 let mut sk = zeroed_search_key();
593 let last = header(256, 180, 42);
594 assert!(advance_cursor(&mut sk, &last));
595 assert_eq!(sk.min_objectid, 256);
596 assert_eq!(sk.min_type, 180);
597 assert_eq!(sk.min_offset, 43);
598 }
599
600 // --- advance_cursor: offset overflow ---
601
602 #[test]
603 fn advance_offset_overflow_bumps_type() {
604 let mut sk = zeroed_search_key();
605 let last = header(256, 132, u64::MAX);
606 assert!(advance_cursor(&mut sk, &last));
607 assert_eq!(sk.min_objectid, 256);
608 assert_eq!(sk.min_type, 133);
609 assert_eq!(sk.min_offset, 0);
610 }
611
612 // --- advance_cursor: type overflow ---
613
614 #[test]
615 fn advance_type_overflow_bumps_objectid() {
616 let mut sk = zeroed_search_key();
617 let last = header(256, u32::MAX, u64::MAX);
618 assert!(advance_cursor(&mut sk, &last));
619 assert_eq!(sk.min_objectid, 257);
620 assert_eq!(sk.min_type, 0);
621 assert_eq!(sk.min_offset, 0);
622 }
623
624 // --- advance_cursor: full keyspace exhaustion ---
625
626 #[test]
627 fn advance_all_overflow_returns_false() {
628 let mut sk = zeroed_search_key();
629 let last = header(u64::MAX, u32::MAX, u64::MAX);
630 assert!(!advance_cursor(&mut sk, &last));
631 }
632
633 // --- advance_cursor: edge cases ---
634
635 #[test]
636 fn advance_zero_key() {
637 let mut sk = zeroed_search_key();
638 let last = header(0, 0, 0);
639 assert!(advance_cursor(&mut sk, &last));
640 assert_eq!(sk.min_objectid, 0);
641 assert_eq!(sk.min_type, 0);
642 assert_eq!(sk.min_offset, 1);
643 }
644
645 #[test]
646 fn advance_objectid_max_type_zero_offset_max() {
647 // offset overflows, type bumps to 1
648 let mut sk = zeroed_search_key();
649 let last = header(u64::MAX, 0, u64::MAX);
650 assert!(advance_cursor(&mut sk, &last));
651 assert_eq!(sk.min_objectid, u64::MAX);
652 assert_eq!(sk.min_type, 1);
653 assert_eq!(sk.min_offset, 0);
654 }
655
656 #[test]
657 fn advance_preserves_unrelated_search_key_fields() {
658 let mut sk = zeroed_search_key();
659 sk.max_objectid = 999;
660 sk.max_type = 888;
661 sk.max_offset = 777;
662 sk.max_transid = 666;
663 let last = header(10, 20, 30);
664 advance_cursor(&mut sk, &last);
665 assert_eq!(sk.max_objectid, 999);
666 assert_eq!(sk.max_type, 888);
667 assert_eq!(sk.max_offset, 777);
668 assert_eq!(sk.max_transid, 666);
669 }
670}