absurder_sql/storage/export.rs
1//! Export and Import functionality for SQLite databases
2//!
3//! This module provides conversion between IndexedDB block storage and standard SQLite .db files.
4//!
5//! # Features
6//! - **Export**: Convert IndexedDB blocks to downloadable .db file
7//! - **Import**: Load .db file into IndexedDB storage
8//! - **Validation**: Verify SQLite file format integrity
9//!
10//! # Architecture
11//! The system works with 4096-byte blocks stored in IndexedDB. Export reads all allocated blocks
12//! and concatenates them into a standard SQLite file. Import splits a .db file into blocks and
13//! writes them to IndexedDB with proper metadata tracking.
14
15use crate::storage::block_storage::BlockStorage;
16use crate::types::DatabaseError;
17
18const BLOCK_SIZE: usize = 4096;
19/// Default maximum export size: 2GB
20///
21/// Rationale:
22/// - IndexedDB limits: 10GB (Firefox) to ~60% of disk (Chrome/Safari)
23/// - WASM/Browser memory: ~2-4GB per tab
24/// - Export requires loading entire DB into memory
25/// - 2GB provides safety margin while allowing large databases
26/// - Configurable via DatabaseConfig.max_export_size_bytes
27const DEFAULT_MAX_EXPORT_SIZE: u64 = 2 * 1024 * 1024 * 1024; // 2GB
28
29/// Default chunk size for streaming export: 10MB
30///
31/// For databases >100MB, export processes blocks in chunks of this size
32/// to reduce memory pressure and allow event loop yielding
33const DEFAULT_CHUNK_SIZE: u64 = 10 * 1024 * 1024; // 10MB
34
35/// Progress callback type for export operations
36///
37/// Parameters: (bytes_exported, total_bytes)
38pub type ProgressCallback = Box<dyn Fn(u64, u64) + Send + Sync>;
39
40/// Options for database export operations
41///
42/// Allows configuration of size limits, chunking behavior, and progress tracking
43#[derive(Default)]
44pub struct ExportOptions {
45 /// Maximum allowed database size (bytes). None for no limit.
46 /// Default: 2GB
47 pub max_size_bytes: Option<u64>,
48
49 /// Chunk size for streaming large exports (bytes).
50 /// Export processes this many bytes at a time, yielding to event loop between chunks.
51 /// Default: 10MB
52 pub chunk_size_bytes: Option<u64>,
53
54 /// Optional progress callback invoked after each chunk.
55 /// Called with (bytes_exported_so_far, total_bytes)
56 pub progress_callback: Option<ProgressCallback>,
57}
58
59/// SQLite file format constants
60const SQLITE_MAGIC: &[u8; 16] = b"SQLite format 3\0";
61const SQLITE_HEADER_SIZE: usize = 100;
62const PAGE_SIZE_OFFSET: usize = 16;
63const PAGE_COUNT_OFFSET: usize = 28;
64
65/// Minimum and maximum valid page sizes for SQLite
66const MIN_PAGE_SIZE: usize = 512;
67const MAX_PAGE_SIZE: usize = 65536;
68
69/// Parse SQLite database header to extract metadata
70///
71/// # Arguments
72/// * `header` - First 100+ bytes of SQLite database file
73///
74/// # Returns
75/// * `Ok((page_size, page_count))` - Database page size and number of pages
76/// * `Err(DatabaseError)` - If header is invalid or corrupted
77///
78/// # SQLite Header Format
79/// - Bytes 0-15: Magic string "SQLite format 3\0"
80/// - Bytes 16-17: Page size (big-endian u16), special case: 1 = 65536
81/// - Bytes 28-31: Page count (big-endian u32)
82///
83/// # Example
84/// ```rust,no_run
85/// use absurder_sql::storage::export::parse_sqlite_header;
86///
87/// let header_data: Vec<u8> = vec![/* ... header bytes ... */];
88/// match parse_sqlite_header(&header_data) {
89/// Ok((page_size, page_count)) => {
90/// println!("Database: {} pages of {} bytes", page_count, page_size);
91/// }
92/// Err(e) => eprintln!("Invalid header: {}", e),
93/// }
94/// ```
95pub fn parse_sqlite_header(data: &[u8]) -> Result<(usize, u32), DatabaseError> {
96 // Validate minimum header size
97 if data.len() < SQLITE_HEADER_SIZE {
98 return Err(DatabaseError::new(
99 "INVALID_HEADER",
100 &format!(
101 "Header too small: {} bytes (minimum {} required)",
102 data.len(),
103 SQLITE_HEADER_SIZE
104 ),
105 ));
106 }
107
108 // Validate magic string
109 if &data[0..16] != SQLITE_MAGIC {
110 let magic_str = String::from_utf8_lossy(&data[0..16]);
111 return Err(DatabaseError::new(
112 "INVALID_SQLITE_FILE",
113 &format!(
114 "Invalid SQLite magic string. Expected 'SQLite format 3', got: '{}'",
115 magic_str
116 ),
117 ));
118 }
119
120 // Extract page size (big-endian u16 at bytes 16-17)
121 let page_size_raw = u16::from_be_bytes([data[PAGE_SIZE_OFFSET], data[PAGE_SIZE_OFFSET + 1]]);
122
123 // Handle special case: page_size == 1 means 65536
124 let page_size = if page_size_raw == 1 {
125 65536
126 } else {
127 page_size_raw as usize
128 };
129
130 // Validate page size is a power of 2 between 512 and 65536
131 if !(512..=65536).contains(&page_size) || !page_size.is_power_of_two() {
132 return Err(DatabaseError::new(
133 "INVALID_PAGE_SIZE",
134 &format!(
135 "Invalid page size: {}. Must be power of 2 between 512 and 65536",
136 page_size
137 ),
138 ));
139 }
140
141 // Extract page count (big-endian u32 at bytes 28-31)
142 let page_count = u32::from_be_bytes([
143 data[PAGE_COUNT_OFFSET],
144 data[PAGE_COUNT_OFFSET + 1],
145 data[PAGE_COUNT_OFFSET + 2],
146 data[PAGE_COUNT_OFFSET + 3],
147 ]);
148
149 log::debug!(
150 "Parsed SQLite header: page_size={}, page_count={}",
151 page_size,
152 page_count
153 );
154
155 Ok((page_size, page_count))
156}
157
158/// Validate export size against configured limit
159///
160/// Checks if the database size exceeds the maximum allowed export size.
161/// This prevents out-of-memory errors when exporting very large databases.
162///
163/// # Arguments
164/// * `size_bytes` - Size of the database in bytes
165/// * `max_size_bytes` - Maximum allowed size (None for default 500MB)
166///
167/// # Returns
168/// * `Ok(())` - Size is within limits
169/// * `Err(DatabaseError)` - Size exceeds limit
170///
171/// # Default Limit
172/// If `max_size_bytes` is None, defaults to 2GB (2,147,483,648 bytes).
173/// This balances IndexedDB capacity (10GB+) with browser memory limits (~2-4GB per tab).
174///
175/// # Example
176/// ```rust,no_run
177/// use absurder_sql::storage::export::validate_export_size;
178///
179/// // Use default 2GB limit
180/// validate_export_size(100_000_000, None).unwrap();
181///
182/// // Use custom 5GB limit
183/// validate_export_size(3_000_000_000, Some(5 * 1024 * 1024 * 1024)).unwrap();
184/// ```
185pub fn validate_export_size(
186 size_bytes: u64,
187 max_size_bytes: Option<u64>,
188) -> Result<(), DatabaseError> {
189 let limit = max_size_bytes.unwrap_or(DEFAULT_MAX_EXPORT_SIZE);
190
191 if size_bytes > limit {
192 let size_mb = size_bytes as f64 / (1024.0 * 1024.0);
193 let limit_mb = limit as f64 / (1024.0 * 1024.0);
194
195 return Err(DatabaseError::new(
196 "DATABASE_TOO_LARGE",
197 &format!(
198 "Database too large for export: {:.2} MB exceeds limit of {:.2} MB. \
199 Consider increasing max_export_size_bytes in DatabaseConfig or exporting in smaller chunks.",
200 size_mb, limit_mb
201 ),
202 ));
203 }
204
205 Ok(())
206}
207
208/// Validate SQLite database file format
209///
210/// Performs comprehensive validation of a SQLite database file to ensure it can
211/// be safely imported. Checks file structure, magic string, page size validity,
212/// and size consistency.
213///
214/// # Arguments
215/// * `data` - Complete SQLite database file as bytes
216///
217/// # Returns
218/// * `Ok(())` - File is valid and safe to import
219/// * `Err(DatabaseError)` - File is invalid with detailed error message
220///
221/// # Validation Checks
222/// - File size is at least 100 bytes (minimum header size)
223/// - Magic string matches "SQLite format 3\0"
224/// - Page size is valid (power of 2, between 512 and 65536)
225/// - Page count is non-zero
226/// - File size matches (page_size × page_count)
227///
228/// # Example
229/// ```rust,no_run
230/// use absurder_sql::storage::export::validate_sqlite_file;
231///
232/// let file_data = std::fs::read("database.db").unwrap();
233/// match validate_sqlite_file(&file_data) {
234/// Ok(()) => println!("Valid SQLite file"),
235/// Err(e) => eprintln!("Invalid file: {}", e),
236/// }
237/// ```
238pub fn validate_sqlite_file(data: &[u8]) -> Result<(), DatabaseError> {
239 // Check minimum file size
240 if data.len() < SQLITE_HEADER_SIZE {
241 return Err(DatabaseError::new(
242 "INVALID_SQLITE_FILE",
243 &format!(
244 "File too small: {} bytes (minimum {} required)",
245 data.len(),
246 SQLITE_HEADER_SIZE
247 ),
248 ));
249 }
250
251 // Validate magic string
252 if &data[0..16] != SQLITE_MAGIC {
253 let magic_str = String::from_utf8_lossy(&data[0..16]);
254 return Err(DatabaseError::new(
255 "INVALID_SQLITE_FILE",
256 &format!(
257 "Invalid SQLite magic string. Expected 'SQLite format 3', got: '{}'",
258 magic_str.trim_end_matches('\0')
259 ),
260 ));
261 }
262
263 // Parse page size
264 let page_size_raw = u16::from_be_bytes([data[PAGE_SIZE_OFFSET], data[PAGE_SIZE_OFFSET + 1]]);
265 let page_size = if page_size_raw == 1 {
266 65536
267 } else {
268 page_size_raw as usize
269 };
270
271 // Validate page size is power of 2 and within valid range
272 if !(MIN_PAGE_SIZE..=MAX_PAGE_SIZE).contains(&page_size) {
273 return Err(DatabaseError::new(
274 "INVALID_PAGE_SIZE",
275 &format!(
276 "Invalid page size: {}. Must be between {} and {}",
277 page_size, MIN_PAGE_SIZE, MAX_PAGE_SIZE
278 ),
279 ));
280 }
281
282 if !page_size.is_power_of_two() {
283 return Err(DatabaseError::new(
284 "INVALID_PAGE_SIZE",
285 &format!("Invalid page size: {}. Must be a power of 2", page_size),
286 ));
287 }
288
289 // Parse page count
290 let page_count = u32::from_be_bytes([
291 data[PAGE_COUNT_OFFSET],
292 data[PAGE_COUNT_OFFSET + 1],
293 data[PAGE_COUNT_OFFSET + 2],
294 data[PAGE_COUNT_OFFSET + 3],
295 ]);
296
297 // Validate page count is non-zero
298 if page_count == 0 {
299 return Err(DatabaseError::new(
300 "INVALID_PAGE_COUNT",
301 "Invalid page count: 0. Database must have at least one page",
302 ));
303 }
304
305 // Validate file size matches header information
306 let expected_size = (page_size as u64) * (page_count as u64);
307 let actual_size = data.len() as u64;
308
309 if actual_size != expected_size {
310 return Err(DatabaseError::new(
311 "SIZE_MISMATCH",
312 &format!(
313 "File size mismatch: expected {} bytes ({} pages × {} bytes), got {} bytes",
314 expected_size, page_count, page_size, actual_size
315 ),
316 ));
317 }
318
319 log::debug!(
320 "SQLite file validation passed: {} pages × {} bytes = {} bytes",
321 page_count,
322 page_size,
323 expected_size
324 );
325
326 Ok(())
327}
328
329/// Export database from BlockStorage to SQLite .db file format
330///
331/// Reads all allocated blocks from storage and concatenates them into a standard
332/// SQLite database file that can be opened by any SQLite client.
333///
334/// # Arguments
335/// * `storage` - BlockStorage instance containing the database blocks
336///
337/// # Returns
338/// * `Ok(Vec<u8>)` - Complete SQLite database file as bytes
339/// * `Err(DatabaseError)` - If export fails
340///
341/// # Process
342/// 1. Sync storage to ensure all changes are persisted
343/// 2. Read block 0 (header) to determine database size
344/// 3. Read all allocated blocks
345/// 4. Concatenate blocks and truncate to exact database size
346///
347/// # Example
348/// ```rust,no_run
349/// use absurder_sql::storage::export::export_database_to_bytes;
350/// use absurder_sql::storage::BlockStorage;
351///
352/// async fn export_example(mut storage: BlockStorage) -> Result<Vec<u8>, absurder_sql::types::DatabaseError> {
353/// // Export with default 2GB limit
354/// let db_bytes = export_database_to_bytes(&mut storage, None).await?;
355/// // Save db_bytes to file or send to browser for download
356/// Ok(db_bytes)
357/// }
358/// ```
359#[cfg(target_arch = "wasm32")]
360pub async fn export_database_to_bytes(
361 storage: &BlockStorage,
362 max_size_bytes: Option<u64>,
363) -> Result<Vec<u8>, DatabaseError> {
364 export_database_to_bytes_impl(storage, max_size_bytes).await
365}
366
367#[cfg(not(target_arch = "wasm32"))]
368pub async fn export_database_to_bytes(
369 storage: &mut BlockStorage,
370 max_size_bytes: Option<u64>,
371) -> Result<Vec<u8>, DatabaseError> {
372 export_database_to_bytes_impl(storage, max_size_bytes).await
373}
374
375#[allow(invalid_reference_casting)]
376async fn export_database_to_bytes_impl(
377 storage: &BlockStorage,
378 max_size_bytes: Option<u64>,
379) -> Result<Vec<u8>, DatabaseError> {
380 log::info!("Starting database export");
381
382 // NOTE: Removed sync() call - export is a read-only operation and should not
383 // trigger sync. The caller should ensure data is synced before export if needed.
384 // Concurrent exports were hanging because all tried to sync simultaneously,
385 // causing RefCell borrow conflicts in global storage.
386
387 // Read first block to get header
388 log::debug!("Reading block 0 for header");
389 let header_block = storage.read_block(0).await?;
390 log::debug!(
391 "Block 0 size: {} bytes, first 16 bytes: {:?}",
392 header_block.len(),
393 &header_block.get(0..16).unwrap_or(&[])
394 );
395
396 // Parse header to determine database size
397 let (page_size, page_count) = parse_sqlite_header(&header_block)?;
398
399 // Calculate total database size
400 let total_db_size = (page_size as u64) * (page_count as u64);
401
402 // Validate size doesn't exceed maximum
403 validate_export_size(total_db_size, max_size_bytes)?;
404
405 // Warn if database is large (>100MB)
406 const MB_100: u64 = 100 * 1024 * 1024;
407 if total_db_size > MB_100 {
408 log::warn!(
409 "Exporting large database: {} bytes ({:.2} MB). This may consume significant memory.",
410 total_db_size,
411 total_db_size as f64 / (1024.0 * 1024.0)
412 );
413 }
414
415 log::info!(
416 "Export: page_size={}, page_count={}, total_size={}",
417 page_size,
418 page_count,
419 total_db_size
420 );
421 let total_blocks = total_db_size.div_ceil(BLOCK_SIZE as u64);
422
423 // Build list of block IDs to read
424 let block_ids: Vec<u64> = (0..total_blocks).collect();
425
426 log::debug!("Reading {} blocks for export", block_ids.len());
427
428 // DEBUG: Check what blocks actually exist in storage
429 #[cfg(target_arch = "wasm32")]
430 {
431 use crate::storage::vfs_sync::with_global_storage;
432 with_global_storage(|storage_map| {
433 if let Some(db_storage) = storage_map.borrow().get(storage.get_db_name()) {
434 web_sys::console::log_1(
435 &format!("[EXPORT] GLOBAL_STORAGE has {} blocks", db_storage.len()).into(),
436 );
437 web_sys::console::log_1(
438 &format!(
439 "[EXPORT] Block IDs in GLOBAL_STORAGE: {:?}",
440 db_storage.keys().collect::<Vec<_>>()
441 )
442 .into(),
443 );
444 }
445 });
446 web_sys::console::log_1(
447 &format!(
448 "[EXPORT] Requesting {} blocks: {:?}",
449 block_ids.len(),
450 block_ids
451 )
452 .into(),
453 );
454 }
455
456 // Read all blocks at once
457 let blocks = storage.read_blocks(&block_ids).await?;
458
459 #[cfg(target_arch = "wasm32")]
460 web_sys::console::log_1(&format!("[EXPORT] Actually read {} blocks", blocks.len()).into());
461
462 // Concatenate all blocks
463 let mut result = Vec::with_capacity(total_db_size as usize);
464 for (i, block) in blocks.iter().enumerate() {
465 result.extend_from_slice(block);
466 #[cfg(target_arch = "wasm32")]
467 if i < 5 {
468 web_sys::console::log_1(
469 &format!(
470 "[EXPORT] Block {} has {} bytes, first 16: {:02x?}",
471 i,
472 block.len(),
473 &block[..16.min(block.len())]
474 )
475 .into(),
476 );
477 }
478 #[cfg(not(target_arch = "wasm32"))]
479 let _ = i; // Suppress unused warning on native
480 }
481
482 // Truncate to exact database size
483 result.truncate(total_db_size as usize);
484
485 log::info!("Export complete: {} bytes", result.len());
486
487 #[cfg(target_arch = "wasm32")]
488 {
489 web_sys::console::log_1(&format!("[EXPORT] Final result: {} bytes", result.len()).into());
490 if result.len() >= 100 {
491 web_sys::console::log_1(
492 &format!("[EXPORT] Header bytes 28-39: {:02x?}", &result[28..40]).into(),
493 );
494 web_sys::console::log_1(
495 &format!("[EXPORT] Header bytes 40-60: {:02x?}", &result[40..60]).into(),
496 );
497 let largest_root_page =
498 u32::from_be_bytes([result[52], result[53], result[54], result[55]]);
499 web_sys::console::log_1(
500 &format!(
501 "[EXPORT] Largest root b-tree page (bytes 52-55): {}",
502 largest_root_page
503 )
504 .into(),
505 );
506 }
507 }
508
509 Ok(result)
510}
511
512/// Export database with advanced options (streaming, progress callbacks)
513///
514/// For large databases (>100MB), this function processes blocks in chunks,
515/// yields to the event loop between chunks, and reports progress.
516///
517/// # Arguments
518/// * `storage` - Block storage containing the database
519/// * `options` - Export configuration (size limits, chunk size, progress callback)
520///
521/// # Returns
522/// Complete database as bytes
523///
524/// # Example
525/// ```rust,no_run
526/// use absurder_sql::storage::export::{export_database_with_options, ExportOptions};
527/// use absurder_sql::storage::BlockStorage;
528///
529/// async fn export_with_progress(mut storage: BlockStorage) -> Result<Vec<u8>, absurder_sql::types::DatabaseError> {
530/// let options = ExportOptions {
531/// max_size_bytes: Some(1024 * 1024 * 1024), // 1GB limit
532/// chunk_size_bytes: Some(10 * 1024 * 1024), // 10MB chunks
533/// progress_callback: Some(Box::new(|exported, total| {
534/// println!("Progress: {}/{} bytes ({:.1}%)",
535/// exported, total, (exported as f64 / total as f64) * 100.0);
536/// })),
537/// };
538/// export_database_with_options(&mut storage, options).await
539/// }
540/// ```
541#[cfg(target_arch = "wasm32")]
542pub async fn export_database_with_options(
543 storage: &BlockStorage,
544 options: ExportOptions,
545) -> Result<Vec<u8>, DatabaseError> {
546 export_database_with_options_impl(storage, options).await
547}
548
549#[cfg(not(target_arch = "wasm32"))]
550pub async fn export_database_with_options(
551 storage: &mut BlockStorage,
552 options: ExportOptions,
553) -> Result<Vec<u8>, DatabaseError> {
554 export_database_with_options_impl(storage, options).await
555}
556
557#[allow(invalid_reference_casting)]
558async fn export_database_with_options_impl(
559 storage: &BlockStorage,
560 options: ExportOptions,
561) -> Result<Vec<u8>, DatabaseError> {
562 log::info!("Starting streaming database export");
563
564 // Force sync to ensure all data is persisted
565 #[cfg(target_arch = "wasm32")]
566 storage.sync().await?;
567 #[cfg(not(target_arch = "wasm32"))]
568 {
569 // SAFETY: Called from public API that takes &mut on native
570 let storage_mut = unsafe { &mut *(storage as *const _ as *mut BlockStorage) };
571 storage_mut.sync().await?;
572 }
573
574 // Read first block to get header
575 log::debug!("Reading block 0 for header");
576 let header_block = storage.read_block(0).await?;
577
578 // Parse header to determine database size
579 let (page_size, page_count) = parse_sqlite_header(&header_block)?;
580 let total_db_size = (page_size as u64) * (page_count as u64);
581
582 // Validate size doesn't exceed maximum
583 validate_export_size(total_db_size, options.max_size_bytes)?;
584
585 // Warn if database is large (>100MB)
586 const MB_100: u64 = 100 * 1024 * 1024;
587 if total_db_size > MB_100 {
588 log::warn!(
589 "Exporting large database: {} bytes ({:.2} MB). Using streaming export with chunks.",
590 total_db_size,
591 total_db_size as f64 / (1024.0 * 1024.0)
592 );
593 }
594
595 log::info!(
596 "Export: page_size={}, page_count={}, total_size={}",
597 page_size,
598 page_count,
599 total_db_size
600 );
601
602 let total_blocks = total_db_size.div_ceil(BLOCK_SIZE as u64);
603 let chunk_size = options.chunk_size_bytes.unwrap_or(DEFAULT_CHUNK_SIZE);
604 let blocks_per_chunk = (chunk_size / BLOCK_SIZE as u64).max(1);
605
606 // Preallocate result vector
607 let mut result = Vec::with_capacity(total_db_size as usize);
608
609 // Process blocks in chunks
610 for chunk_start in (0..total_blocks).step_by(blocks_per_chunk as usize) {
611 let chunk_end = (chunk_start + blocks_per_chunk).min(total_blocks);
612 let block_ids: Vec<u64> = (chunk_start..chunk_end).collect();
613
614 log::debug!(
615 "Reading blocks {}-{} ({} blocks)",
616 chunk_start,
617 chunk_end - 1,
618 block_ids.len()
619 );
620
621 // Read chunk of blocks
622 let blocks = storage.read_blocks(&block_ids).await?;
623
624 // Concatenate blocks in this chunk
625 for block in blocks {
626 result.extend_from_slice(&block);
627 }
628
629 let bytes_exported = result.len() as u64;
630
631 // Invoke progress callback if provided
632 if let Some(ref callback) = options.progress_callback {
633 callback(bytes_exported.min(total_db_size), total_db_size);
634 }
635
636 // Yield to event loop between chunks to prevent blocking
637 #[cfg(target_arch = "wasm32")]
638 {
639 // In WASM, yield to browser event loop
640 wasm_bindgen_futures::JsFuture::from(js_sys::Promise::resolve(
641 &wasm_bindgen::JsValue::NULL,
642 ))
643 .await
644 .ok();
645 }
646 #[cfg(not(target_arch = "wasm32"))]
647 {
648 // In native, yield to tokio runtime
649 tokio::task::yield_now().await;
650 }
651 }
652
653 // Truncate to exact database size
654 result.truncate(total_db_size as usize);
655
656 // Final progress callback
657 if let Some(ref callback) = options.progress_callback {
658 callback(total_db_size, total_db_size);
659 }
660
661 log::info!("Streaming export complete: {} bytes", result.len());
662
663 Ok(result)
664}
665
666/// Streaming export with basic parameters (convenience wrapper)
667///
668/// Simplified interface for streaming export with progress callback.
669/// For full control, use `export_database_with_options`.
670///
671/// # Arguments
672/// * `storage` - Block storage containing the database
673/// * `max_size_bytes` - Maximum allowed size (None for default 2GB)
674/// * `chunk_size_bytes` - Chunk size for streaming (None for default 10MB)
675/// * `progress_callback` - Optional progress callback
676///
677/// # Example
678/// ```rust,no_run
679/// use absurder_sql::storage::export::export_database_to_bytes_streaming;
680/// use absurder_sql::storage::BlockStorage;
681///
682/// async fn export_example(mut storage: BlockStorage) -> Result<Vec<u8>, absurder_sql::types::DatabaseError> {
683/// let progress = Box::new(|exported: u64, total: u64| {
684/// println!("Exported {}/{} bytes", exported, total);
685/// });
686///
687/// export_database_to_bytes_streaming(
688/// &mut storage,
689/// None,
690/// Some(10 * 1024 * 1024), // 10MB chunks
691/// Some(progress)
692/// ).await
693/// }
694/// ```
695#[cfg(target_arch = "wasm32")]
696pub async fn export_database_to_bytes_streaming(
697 storage: &BlockStorage,
698 max_size_bytes: Option<u64>,
699 chunk_size_bytes: Option<u64>,
700 progress_callback: Option<ProgressCallback>,
701) -> Result<Vec<u8>, DatabaseError> {
702 let options = ExportOptions {
703 max_size_bytes,
704 chunk_size_bytes,
705 progress_callback,
706 };
707 export_database_with_options(storage, options).await
708}
709
710#[cfg(not(target_arch = "wasm32"))]
711pub async fn export_database_to_bytes_streaming(
712 storage: &mut BlockStorage,
713 max_size_bytes: Option<u64>,
714 chunk_size_bytes: Option<u64>,
715 progress_callback: Option<ProgressCallback>,
716) -> Result<Vec<u8>, DatabaseError> {
717 let options = ExportOptions {
718 max_size_bytes,
719 chunk_size_bytes,
720 progress_callback,
721 };
722 export_database_with_options(storage, options).await
723}
724
725#[cfg(test)]
726mod tests {
727 use super::*;
728
729 #[test]
730 fn test_sqlite_magic_constant() {
731 assert_eq!(SQLITE_MAGIC.len(), 16);
732 assert_eq!(&SQLITE_MAGIC[0..14], b"SQLite format ");
733 }
734
735 #[test]
736 fn test_header_size_constant() {
737 assert_eq!(SQLITE_HEADER_SIZE, 100);
738 }
739
740 #[test]
741 fn test_page_size_offset() {
742 assert_eq!(PAGE_SIZE_OFFSET, 16);
743 }
744
745 #[test]
746 fn test_page_count_offset() {
747 assert_eq!(PAGE_COUNT_OFFSET, 28);
748 }
749}