1use crate::error::{Error, Result};
11use crate::ondisk::*;
12use crate::volume::Volume;
13
14pub struct Writer {
16 pub vol: Volume,
18 resblocksize: u32,
20 rescluster: u32,
21 index_per_block: u32,
22 anodes_per_block: u32,
23 firstreserved: u32,
24 numreserved: u32,
25 bitmapstart: u32,
26 datestamp: u32,
27 res_bitmap: Vec<u32>,
29 data_bm: Vec<(u32, Vec<u32>)>, pending_writes: Vec<(u32, Vec<u8>)>,
32}
33
34impl Writer {
35 pub fn open(vol: Volume) -> Result<Self> {
37 let rb = &vol.rootblock;
38 let rbs = rb.reserved_blksize as u32;
39 let rescluster = rbs / vol.block_size();
40 let firstreserved = rb.firstreserved;
41 let numreserved = (rb.lastreserved - firstreserved + 1) / rescluster;
42 let index_per_block = (rbs / 4).saturating_sub(3);
43 let anodes_per_block =
44 rbs.saturating_sub(ANODE_BLOCK_HEADER_SIZE as u32) / ANODE_SIZE as u32;
45 let bitmapstart = rb.lastreserved + 1;
46 let datestamp = rb.datestamp;
47
48 let mut w = Self {
49 resblocksize: rbs,
50 rescluster,
51 index_per_block,
52 anodes_per_block,
53 firstreserved,
54 numreserved,
55 bitmapstart,
56 datestamp,
57 res_bitmap: Vec::new(),
58 data_bm: Vec::new(),
59 pending_writes: Vec::new(),
60 vol,
61 };
62 w.load_reserved_bitmap()?;
63 w.load_data_bitmap()?;
64 Ok(w)
65 }
66
67 pub fn into_volume(self) -> Volume {
69 self.vol
70 }
71
72 fn next_datestamp(&mut self) -> u32 {
73 self.datestamp += 1;
74 self.datestamp
75 }
76
77 pub fn write_file(&mut self, path: &str, data: &[u8]) -> Result<()> {
81 let (parent_anode, filename) = self.split_path(path)?;
82 self.write_file_in(parent_anode, &filename, data)
83 }
84
85 pub fn create_dir(&mut self, path: &str) -> Result<()> {
87 let (parent_anode, dirname) = self.split_path(path)?;
88 self.create_dir_in(parent_anode, &dirname)
89 }
90
91 pub fn delete(&mut self, path: &str) -> Result<()> {
93 let (parent_anode, name) = self.split_path(path)?;
94 self.delete_in(parent_anode, &name)
95 }
96
97 pub fn write_file_in(&mut self, parent_anode: u32, name: &str, data: &[u8]) -> Result<()> {
101 self.write_file_in_no_commit(parent_anode, name, data)?;
102 self.update_rootblock()
103 }
104
105 fn write_file_in_no_commit(
107 &mut self,
108 parent_anode: u32,
109 name: &str,
110 data: &[u8],
111 ) -> Result<()> {
112 let bs = self.vol.block_size() as usize;
113 let num_blocks = data.len().div_ceil(bs).max(1);
114
115 let blocks = self.alloc_data_blocks(num_blocks as u32)?;
116 for (i, &blk) in blocks.iter().enumerate() {
117 let start = i * bs;
118 let end = (start + bs).min(data.len());
119 let mut sector = vec![0u8; bs];
120 if start < data.len() {
121 sector[..end - start].copy_from_slice(&data[start..end]);
122 }
123 self.vol.dev.write_block(blk as u64, §or)?;
124 }
125 self.vol.dev.flush()?; let anodenr = self.create_anode_chain(&blocks)?;
128 self.add_dir_entry(parent_anode, name, ST_FILE, anodenr, data.len() as u64, 0)
129 }
130
131 pub fn create_dir_in(&mut self, parent_anode: u32, name: &str) -> Result<()> {
133 let dir_blk = self.alloc_reserved_block()?;
134 let anodenr = self.alloc_anode(1, dir_blk, 0)?;
135
136 let mut dir_data = vec![0u8; self.resblocksize as usize];
137 put_u16(&mut dir_data, 0x00, DBLKID);
138 put_u32(&mut dir_data, 0x04, self.next_datestamp());
139 put_u32(&mut dir_data, 0x0C, anodenr);
140 put_u32(&mut dir_data, 0x10, parent_anode);
141 self.write_reserved(dir_blk, &dir_data)?;
142
143 self.add_dir_entry(parent_anode, name, ST_USERDIR, anodenr, 0, 0)?;
144 self.update_rootblock()
145 }
146
147 pub fn create_softlink(&mut self, path: &str, target: &str) -> Result<()> {
149 let (parent_anode, name) = self.split_path(path)?;
150 self.create_softlink_in(parent_anode, &name, target)
151 }
152
153 pub fn create_softlink_in(
155 &mut self,
156 parent_anode: u32,
157 name: &str,
158 target: &str,
159 ) -> Result<()> {
160 let data = target.as_bytes();
161 let bs = self.vol.block_size() as usize;
162 let num_blocks = data.len().div_ceil(bs).max(1);
163 let blocks = self.alloc_data_blocks(num_blocks as u32)?;
164 for (i, &blk) in blocks.iter().enumerate() {
165 let start = i * bs;
166 let end = (start + bs).min(data.len());
167 let mut sector = vec![0u8; bs];
168 if start < data.len() {
169 sector[..end - start].copy_from_slice(&data[start..end]);
170 }
171 self.vol.dev.write_block(blk as u64, §or)?;
172 }
173 self.vol.dev.flush()?; let anodenr = self.create_anode_chain(&blocks)?;
175 self.add_dir_entry(
176 parent_anode,
177 name,
178 ST_SOFTLINK,
179 anodenr,
180 data.len() as u64,
181 0,
182 )?;
183 self.update_rootblock()
184 }
185
186 pub fn create_hardlink(&mut self, path: &str, target_anode: u32) -> Result<()> {
188 let (parent_anode, name) = self.split_path(path)?;
189 self.add_dir_entry(parent_anode, &name, ST_LINKFILE, target_anode, 0, 0)?;
190 self.update_rootblock()
191 }
192
193 pub fn undelete(&mut self, deldir_idx: usize, dest_path: &str) -> Result<()> {
195 let rext = self
197 .vol
198 .rootblock_ext
199 .as_ref()
200 .ok_or_else(|| Error::NotFound("no rootblock extension".into()))?;
201 let deldirblocks: Vec<u32> = rext
202 .deldirblocks
203 .iter()
204 .copied()
205 .filter(|&b| b != 0)
206 .collect();
207 let rbs = self.vol.rootblock.reserved_blksize;
208 let entries_per_block = deldir_entries_per_block(rbs);
209 if entries_per_block == 0 {
210 return Err(Error::Corrupt("invalid reserved block size".into()));
211 }
212
213 let block_idx = deldir_idx / entries_per_block;
214 let slot_idx = deldir_idx % entries_per_block;
215 if block_idx >= deldirblocks.len() {
216 return Err(Error::NotFound(format!(
217 "deldir index {} out of range",
218 deldir_idx
219 )));
220 }
221 let blk = deldirblocks[block_idx];
222 let data = self.read_reserved_raw(blk)?;
223 let off = DELDIR_HEADER_SIZE + slot_idx * DELDIR_ENTRY_SIZE;
224 let entry = DelDirEntry::parse(&data[off..off + DELDIR_ENTRY_SIZE])
225 .ok_or_else(|| Error::NotFound("empty deldir slot".into()))?;
226
227 if self.vol.lookup(dest_path)?.is_some() {
229 return Err(Error::AlreadyExists(dest_path.to_string()));
230 }
231
232 let old_anode = entry.anode;
233
234 let file_data = self.vol.read_file_data(old_anode, entry.file_size())?;
236
237 let (parent_anode, filename) = self.split_path(dest_path)?;
239 self.write_file_in_no_commit(parent_anode, &filename, &file_data)?;
240
241 let _ = self.clear_anode_chain(old_anode);
243
244 let mut block_data = self.read_reserved_raw(blk)?;
246 for b in &mut block_data[off..off + DELDIR_ENTRY_SIZE] {
247 *b = 0;
248 }
249 self.write_reserved(blk, &block_data)?;
250
251 self.update_rootblock()
253 }
254
255 pub fn force_remove_entry(&mut self, parent_anode: u32, name: &str) -> Result<()> {
258 self.remove_dir_entry(parent_anode, name)?;
259 self.update_rootblock()
260 }
261
262 pub fn repair_blocksfree(&mut self, correct_free: u32) -> Result<()> {
264 self.vol.rootblock.blocksfree = correct_free;
265 self.update_rootblock()
266 }
267
268 pub fn repair_reserved_free(&mut self, correct_free: u32) -> Result<()> {
270 self.vol.rootblock.reserved_free = correct_free;
271 self.update_rootblock()
272 }
273
274 pub fn overwrite_file_in(
277 &mut self,
278 parent_anode: u32,
279 name: &str,
280 file_anode: u32,
281 data: &[u8],
282 ) -> Result<()> {
283 let bs = self.vol.block_size() as usize;
284 let new_blocks_needed = data.len().div_ceil(bs).max(1) as u32;
285
286 let old_chain =
288 self.vol
289 .anodes
290 .get_chain(file_anode, self.vol.dev.as_ref(), &mut self.vol.cache)?;
291 let old_total: u32 = old_chain.iter().map(|a| a.clustersize).sum();
292
293 let mut written = 0usize;
295 let mut blocks_used = 0u32;
296 let mut sector = vec![0u8; bs];
297 for an in &old_chain {
298 for i in 0..an.clustersize {
299 if blocks_used >= new_blocks_needed {
300 break;
301 }
302 sector.fill(0);
303 let start = written;
304 let end = (start + bs).min(data.len());
305 if start < data.len() {
306 sector[..end - start].copy_from_slice(&data[start..end]);
307 }
308 self.vol
309 .dev
310 .write_block(an.blocknr as u64 + i as u64, §or)?;
311 written += bs;
312 blocks_used += 1;
313 }
314 if blocks_used >= new_blocks_needed {
315 break;
316 }
317 }
318 self.vol.dev.flush()?;
319
320 if new_blocks_needed <= old_total {
321 self.truncate_anode_chain(file_anode, new_blocks_needed)?;
323 } else {
324 let extra = new_blocks_needed - old_total;
326 let new_blocks = self.alloc_data_blocks(extra)?;
327 for &blk in &new_blocks {
328 sector.fill(0);
329 let start = written;
330 let end = (start + bs).min(data.len());
331 if start < data.len() {
332 sector[..end - start].copy_from_slice(&data[start..end]);
333 }
334 self.vol.dev.write_block(blk as u64, §or)?;
335 written += bs;
336 }
337 self.vol.dev.flush()?;
338 let new_chain_head = self.create_anode_chain(&new_blocks)?;
340 self.append_to_anode_chain(file_anode, new_chain_head)?;
341 }
342
343 self.update_dir_entry_size(parent_anode, name, data.len() as u64)?;
345 self.update_rootblock()
346 }
347
348 fn truncate_anode_chain(&mut self, head: u32, keep_blocks: u32) -> Result<()> {
351 let chain = self
352 .vol
353 .anodes
354 .get_chain(head, self.vol.dev.as_ref(), &mut self.vol.cache)?;
355 let mut remaining = keep_blocks;
356
357 for (idx, an) in chain.iter().enumerate() {
358 if remaining == 0 {
359 self.free_and_clear_anodes(&chain[idx..])?;
360 return Ok(());
361 } else if remaining < an.clustersize {
362 for i in remaining..an.clustersize {
364 self.free_data_block(an.blocknr + i)?;
365 }
366 self.write_anode_fields(an.nr, remaining, an.blocknr, ANODE_EOF)?;
367 self.free_and_clear_anodes(&chain[idx + 1..])?;
368 return Ok(());
369 } else {
370 remaining -= an.clustersize;
371 if remaining == 0 {
372 self.write_anode_fields(an.nr, an.clustersize, an.blocknr, ANODE_EOF)?;
374 self.free_and_clear_anodes(&chain[idx + 1..])?;
375 return Ok(());
376 }
377 }
378 }
379 Ok(())
380 }
381
382 fn free_and_clear_anodes(&mut self, anodes: &[crate::ondisk::Anode]) -> Result<()> {
384 for an in anodes {
385 for i in 0..an.clustersize {
386 self.free_data_block(an.blocknr + i)?;
387 }
388 self.clear_single_anode(an.nr)?;
389 }
390 Ok(())
391 }
392
393 fn append_to_anode_chain(&mut self, head: u32, new_head: u32) -> Result<()> {
395 let chain = self
396 .vol
397 .anodes
398 .get_chain(head, self.vol.dev.as_ref(), &mut self.vol.cache)?;
399 let tail = chain.last().ok_or(Error::AnodeNotFound(head))?;
400 self.write_anode_fields(tail.nr, tail.clustersize, tail.blocknr, new_head)
401 }
402
403 fn write_anode_fields(
405 &mut self,
406 anodenr: u32,
407 clustersize: u32,
408 blocknr: u32,
409 next: u32,
410 ) -> Result<()> {
411 let split = self.vol.rootblock.is_splitted_anodes();
412 let (seqnr, offset) = if split {
413 (anodenr >> 16, anodenr & 0xFFFF)
414 } else {
415 (
416 anodenr / self.anodes_per_block,
417 anodenr % self.anodes_per_block,
418 )
419 };
420 let blk_num = self.get_anode_block_nr(seqnr)?;
421 let mut data = self.read_reserved_raw(blk_num)?;
422 let base = ANODE_BLOCK_HEADER_SIZE + offset as usize * ANODE_SIZE;
423 put_u32(&mut data, base, clustersize);
424 put_u32(&mut data, base + 4, blocknr);
425 put_u32(&mut data, base + 8, next);
426 put_u32(&mut data, 4, self.datestamp);
427 self.write_reserved(blk_num, &data)
428 }
429
430 fn clear_single_anode(&mut self, anodenr: u32) -> Result<()> {
432 self.write_anode_fields(anodenr, 0, 0, 0)
433 }
434
435 fn update_dir_entry_size(&mut self, dir_anode: u32, name: &str, new_size: u64) -> Result<()> {
437 let chain =
438 self.vol
439 .anodes
440 .get_chain(dir_anode, self.vol.dev.as_ref(), &mut self.vol.cache)?;
441 for an in &chain {
442 for i in 0..an.clustersize {
443 let blk = an.blocknr + i;
444 let mut data = self.read_reserved_raw(blk)?;
445 if u16::from_be_bytes(data[0..2].try_into().unwrap()) != DBLKID {
446 continue;
447 }
448 let mut pos = DIR_BLOCK_HEADER_SIZE;
449 while pos < self.resblocksize as usize {
450 let esize = data[pos] as usize;
451 if esize == 0 {
452 break;
453 }
454 let nlen = data[pos + 17] as usize;
455 let ename = crate::util::latin1_to_string(&data[pos + 18..pos + 18 + nlen]);
456 if crate::util::name_eq_ci(&ename, name) {
457 put_u32(&mut data, pos + 6, new_size as u32);
459 let coff = pos + 18 + nlen;
461 if coff < pos + esize {
462 let clen = data[coff] as usize;
463 let mut fp = coff + 1 + clen;
464 if fp & 1 != 0 {
465 fp += 1;
466 }
467 if fp + 2 <= pos + esize {
468 let flags =
469 u16::from_be_bytes(data[fp..fp + 2].try_into().unwrap());
470 fp += 2;
471 if flags & 0x0001 != 0 {
472 fp += 4;
473 }
474 if flags & 0x0002 != 0 {
475 fp += 2;
476 }
477 if flags & 0x0004 != 0 {
478 fp += 2;
479 }
480 if flags & 0x0008 != 0 {
481 fp += 4;
482 }
483 if flags & 0x0010 != 0 {
484 fp += 4;
485 }
486 if flags & 0x0020 != 0 {
487 fp += 4;
488 }
489 if flags & 0x0040 != 0 && fp + 2 <= pos + esize {
490 put_u16(&mut data, fp, (new_size >> 32) as u16);
491 }
492 }
493 }
494 let (cday, cmin, ctick) = crate::util::current_amiga_datestamp();
496 put_u16(&mut data, pos + 10, cday);
497 put_u16(&mut data, pos + 12, cmin);
498 put_u16(&mut data, pos + 14, ctick);
499 put_u32(&mut data, 4, self.next_datestamp());
500 self.write_reserved(blk, &data)?;
501 return Ok(());
502 }
503 pos += esize;
504 }
505 }
506 }
507 Err(Error::NotFound(name.to_string()))
508 }
509
510 pub fn update_dir_entry_protection(
512 &mut self,
513 dir_anode: u32,
514 name: &str,
515 protection: u8,
516 ) -> Result<()> {
517 let chain =
518 self.vol
519 .anodes
520 .get_chain(dir_anode, self.vol.dev.as_ref(), &mut self.vol.cache)?;
521 for an in &chain {
522 for i in 0..an.clustersize {
523 let blk = an.blocknr + i;
524 let mut data = self.read_reserved_raw(blk)?;
525 if u16::from_be_bytes(data[0..2].try_into().unwrap()) != DBLKID {
526 continue;
527 }
528 let mut pos = DIR_BLOCK_HEADER_SIZE;
529 while pos < self.resblocksize as usize {
530 let esize = data[pos] as usize;
531 if esize == 0 {
532 break;
533 }
534 let nlen = data[pos + 17] as usize;
535 let ename = crate::util::latin1_to_string(&data[pos + 18..pos + 18 + nlen]);
536 if crate::util::name_eq_ci(&ename, name) {
537 data[pos + 16] = protection;
538 put_u32(&mut data, 4, self.next_datestamp());
539 self.write_reserved(blk, &data)?;
540 return self.update_rootblock();
541 }
542 pos += esize;
543 }
544 }
545 }
546 Err(Error::NotFound(name.to_string()))
547 }
548
549 pub fn rename_in(
550 &mut self,
551 src_parent: u32,
552 src_name: &str,
553 dst_parent: u32,
554 dst_name: &str,
555 ) -> Result<()> {
556 let entries = self.vol.list_dir_by_anode(src_parent)?;
557 let entry = entries
558 .iter()
559 .find(|e| crate::util::name_eq_ci(&e.name, src_name))
560 .ok_or_else(|| Error::NotFound(src_name.to_string()))?
561 .clone();
562
563 if let Ok(dst_entries) = self.vol.list_dir_by_anode(dst_parent)
565 && dst_entries
566 .iter()
567 .any(|e| crate::util::name_eq_ci(&e.name, dst_name))
568 {
569 self.delete_in(dst_parent, dst_name)?;
570 }
571
572 self.add_dir_entry(
574 dst_parent,
575 dst_name,
576 entry.entry_type,
577 entry.anode,
578 entry.file_size(),
579 entry.protection,
580 )?;
581 self.remove_dir_entry(src_parent, src_name)?;
583 self.update_rootblock()
584 }
585
586 pub fn delete_in(&mut self, parent_anode: u32, name: &str) -> Result<()> {
588 let entries = self.vol.list_dir_by_anode(parent_anode)?;
589 let target = entries
590 .iter()
591 .find(|e| crate::util::name_eq_ci(&e.name, name))
592 .ok_or_else(|| Error::NotFound(name.to_string()))?
593 .clone();
594
595 if target.is_dir() {
596 let sub = self.vol.list_dir_by_anode(target.anode)?;
597 if !sub.is_empty() {
598 return Err(Error::NotEmpty);
599 }
600 self.free_anode_chain_reserved(target.anode)?;
601 self.clear_anode_chain(target.anode)?;
602 } else {
603 if !self.move_to_deldir(&target) {
605 self.free_data_blocks(target.anode)?;
606 self.clear_anode_chain(target.anode)?;
607 }
608 }
609 self.remove_dir_entry(parent_anode, name)?;
610 self.update_rootblock()
611 }
612
613 fn move_to_deldir(&mut self, entry: &crate::ondisk::DirEntry) -> bool {
615 use crate::ondisk::*;
616 if !self.vol.rootblock.has_flag(MODE_DELDIR) {
617 return false;
618 }
619 let rext = match &self.vol.rootblock_ext {
620 Some(e) => e,
621 None => return false,
622 };
623 let deldirblocks: Vec<u32> = rext
624 .deldirblocks
625 .iter()
626 .copied()
627 .filter(|&b| b != 0)
628 .collect();
629 if deldirblocks.is_empty() {
630 return false;
631 }
632
633 let rbs = self.vol.rootblock.reserved_blksize;
634 let entries_per_block = deldir_entries_per_block(rbs);
635
636 for blk in &deldirblocks {
638 let data = match self.read_reserved_raw(*blk) {
639 Ok(d) => d,
640 Err(_) => continue,
641 };
642 if u16::from_be_bytes(data[0..2].try_into().unwrap()) != DELDIRID {
643 continue;
644 }
645
646 for i in 0..entries_per_block {
647 let off = DELDIR_HEADER_SIZE + i * DELDIR_ENTRY_SIZE;
648 if off + DELDIR_ENTRY_SIZE > data.len() {
649 break;
650 }
651 let slot_anode = u32::from_be_bytes(data[off..off + 4].try_into().unwrap());
652 if slot_anode == 0 {
653 let mut block_data = data;
655 self.write_deldir_entry(&mut block_data, off, entry);
656 let _ = self.write_reserved(*blk, &block_data);
657 return true;
658 }
659 }
660 }
661
662 let blk = deldirblocks[0];
664 let data = match self.read_reserved_raw(blk) {
665 Ok(d) => d,
666 Err(_) => return false,
667 };
668 let off = DELDIR_HEADER_SIZE;
669 let evict_anode = u32::from_be_bytes(data[off..off + 4].try_into().unwrap());
670 if evict_anode != 0 {
671 let _ = self.free_data_blocks(evict_anode);
672 let _ = self.clear_anode_chain(evict_anode);
673 }
674 let mut block_data = data;
675 self.write_deldir_entry(&mut block_data, off, entry);
676 let _ = self.write_reserved(blk, &block_data);
677 true
678 }
679
680 fn write_deldir_entry(&self, block: &mut [u8], off: usize, entry: &crate::ondisk::DirEntry) {
681 put_u32(block, off, entry.anode);
682 put_u32(block, off + 4, entry.file_size() as u32);
683 put_u16(block, off + 8, entry.creation_day);
684 put_u16(block, off + 10, entry.creation_minute);
685 put_u16(block, off + 12, entry.creation_tick);
686 let name_bytes = entry.name.as_bytes();
687 let len = name_bytes.len().min(16);
688 for b in &mut block[off + 14..off + 30] {
689 *b = 0;
690 }
691 block[off + 14..off + 14 + len].copy_from_slice(&name_bytes[..len]);
692 put_u16(block, off + 30, (entry.file_size() >> 32) as u16);
693 }
694
695 fn load_data_bitmap(&mut self) -> Result<()> {
698 let no_bmb = {
699 let bits_per_bmb = self.index_per_block * 32;
700 let ds = self.vol.rootblock.disksize;
701 ds.div_ceil(bits_per_bmb)
702 };
703 for seq in 0..no_bmb {
704 if let Some(blk) = self.get_bitmap_block_nr(seq)? {
705 let data = self.read_reserved_raw(blk)?;
706 let mut longs = Vec::new();
707 for i in 0..self.index_per_block as usize {
708 let off = 12 + i * 4;
709 if off + 4 <= data.len() {
710 longs.push(u32::from_be_bytes(data[off..off + 4].try_into().unwrap()));
711 }
712 }
713 self.data_bm.push((blk, longs));
714 }
715 }
716 Ok(())
717 }
718
719 fn alloc_data_blocks(&mut self, count: u32) -> Result<Vec<u32>> {
720 let mut allocated = Vec::new();
721 for bm_idx in 0..self.data_bm.len() {
722 let (_, ref mut longs) = self.data_bm[bm_idx];
723 #[allow(clippy::needless_range_loop)]
724 for li in 0..longs.len() {
725 if longs[li] == 0 {
726 continue;
727 }
728 for bit in 0..32u32 {
729 if longs[li] & (0x8000_0000 >> bit) != 0 {
730 let data_blk = bm_idx as u32 * self.index_per_block * 32
731 + li as u32 * 32
732 + bit
733 + self.bitmapstart;
734 longs[li] &= !(0x8000_0000 >> bit);
735 allocated.push(data_blk);
736 if allocated.len() as u32 == count {
737 self.write_data_bitmap_block(bm_idx)?;
738 self.vol.rootblock.blocksfree -= count;
739 return Ok(allocated);
740 }
741 }
742 }
743 }
744 if !allocated.is_empty() {
745 self.write_data_bitmap_block(bm_idx)?;
746 }
747 }
748 Err(Error::DiskFull(format!(
749 "not enough free blocks (need {})",
750 count
751 )))
752 }
753
754 fn write_data_bitmap_block(&mut self, bm_idx: usize) -> Result<()> {
755 let (blk, ref longs) = self.data_bm[bm_idx];
756 let mut data = self.read_reserved_raw(blk)?;
757 put_u32(&mut data, 4, self.datestamp);
758 for (i, &val) in longs.iter().enumerate() {
759 put_u32(&mut data, 12 + i * 4, val);
760 }
761 self.write_reserved(blk, &data)
762 }
763
764 fn free_data_blocks(&mut self, anodenr: u32) -> Result<()> {
765 let chain =
766 self.vol
767 .anodes
768 .get_chain(anodenr, self.vol.dev.as_ref(), &mut self.vol.cache)?;
769 for an in &chain {
770 for i in 0..an.clustersize {
771 self.free_data_block(an.blocknr + i)?;
772 }
773 }
774 Ok(())
775 }
776
777 fn free_data_block(&mut self, blk: u32) -> Result<()> {
778 if blk < self.bitmapstart {
779 return Ok(());
780 }
781 let rel = blk - self.bitmapstart;
782 let bm_idx = rel / (self.index_per_block * 32);
783 let remainder = rel % (self.index_per_block * 32);
784 let li = (remainder / 32) as usize;
785 let bit = remainder % 32;
786 if (bm_idx as usize) < self.data_bm.len() {
787 self.data_bm[bm_idx as usize].1[li] |= 0x8000_0000 >> bit;
788 self.write_data_bitmap_block(bm_idx as usize)?;
789 self.vol.rootblock.blocksfree += 1;
790 }
791 Ok(())
792 }
793
794 fn load_reserved_bitmap(&mut self) -> Result<()> {
797 let rb = &self.vol.rootblock;
798 let bs = self.vol.block_size() as usize;
799 let cluster_size = rb.rblkcluster as usize * bs;
800 let mut cluster = vec![0u8; cluster_size];
801 self.vol.dev.read_blocks(
802 self.firstreserved as u64,
803 rb.rblkcluster as u32,
804 &mut cluster,
805 )?;
806 let bm_off = bs + 12; self.res_bitmap.clear();
808 for i in 0..=(self.numreserved / 32) {
809 let off = bm_off + i as usize * 4;
810 if off + 4 <= cluster.len() {
811 self.res_bitmap.push(u32::from_be_bytes(
812 cluster[off..off + 4].try_into().unwrap(),
813 ));
814 }
815 }
816 Ok(())
817 }
818
819 fn alloc_reserved_block(&mut self) -> Result<u32> {
820 for li in 0..self.res_bitmap.len() {
821 if self.res_bitmap[li] == 0 {
822 continue;
823 }
824 for bit in 0..32u32 {
825 if self.res_bitmap[li] & (0x8000_0000 >> bit) != 0 {
826 let idx = li as u32 * 32 + bit;
827 self.res_bitmap[li] &= !(0x8000_0000 >> bit);
828 self.vol.rootblock.reserved_free -= 1;
829 return Ok(self.firstreserved + idx * self.rescluster);
830 }
831 }
832 }
833 Err(Error::DiskFull("out of reserved blocks".into()))
834 }
835
836 fn free_reserved_block(&mut self, blk: u32) -> Result<()> {
837 let idx = (blk - self.firstreserved) / self.rescluster;
838 let li = (idx / 32) as usize;
839 let bit = idx % 32;
840 if li < self.res_bitmap.len() {
841 self.res_bitmap[li] |= 0x8000_0000 >> bit;
842 self.vol.rootblock.reserved_free += 1;
843 }
844 Ok(())
845 }
846
847 fn alloc_anode(&mut self, clustersize: u32, blocknr: u32, next: u32) -> Result<u32> {
850 let split = self.vol.rootblock.is_splitted_anodes();
851 for seqnr in 0..256u32 {
852 let blk_num = self.get_anode_block_nr(seqnr)?;
853 if blk_num == 0 {
854 break;
855 }
856 let mut data = self.read_reserved_raw(blk_num)?;
857 if u16::from_be_bytes(data[0..2].try_into().unwrap()) != ABLKID {
858 continue;
859 }
860 for offset in 0..self.anodes_per_block {
861 let anodenr = if split {
862 (seqnr << 16) | offset
863 } else {
864 seqnr * self.anodes_per_block + offset
865 };
866 if anodenr < ANODE_USERFIRST {
867 continue;
868 }
869 let base = ANODE_BLOCK_HEADER_SIZE + offset as usize * ANODE_SIZE;
870 let cs = u32::from_be_bytes(data[base..base + 4].try_into().unwrap());
871 let bn = u32::from_be_bytes(data[base + 4..base + 8].try_into().unwrap());
872 if cs == 0 && bn == 0 {
873 put_u32(&mut data, base, clustersize);
874 put_u32(&mut data, base + 4, blocknr);
875 put_u32(&mut data, base + 8, next);
876 put_u32(&mut data, 4, self.datestamp);
877 self.write_reserved(blk_num, &data)?;
878 return Ok(anodenr);
879 }
880 }
881 }
882 Err(Error::DiskFull("no free anode slots".into()))
883 }
884
885 fn create_anode_chain(&mut self, blocks: &[u32]) -> Result<u32> {
886 let mut clusters = Vec::new();
887 let mut i = 0usize;
888 while i < blocks.len() {
889 let start = blocks[i];
890 let mut count = 1u32;
891 while i + (count as usize) < blocks.len() && blocks[i + count as usize] == start + count
892 {
893 count += 1;
894 }
895 clusters.push((start, count));
896 i += count as usize;
897 }
898 let mut next_nr = 0u32;
900 for &(start, count) in clusters.iter().rev() {
901 next_nr = self.alloc_anode(count, start, next_nr)?;
902 }
903 Ok(next_nr)
904 }
905
906 fn clear_anode_chain(&mut self, anodenr: u32) -> Result<()> {
907 let chain =
908 self.vol
909 .anodes
910 .get_chain(anodenr, self.vol.dev.as_ref(), &mut self.vol.cache)?;
911 for an in &chain {
912 self.clear_single_anode(an.nr)?;
913 }
914 Ok(())
915 }
916
917 fn free_anode_chain_reserved(&mut self, anodenr: u32) -> Result<()> {
918 let chain =
919 self.vol
920 .anodes
921 .get_chain(anodenr, self.vol.dev.as_ref(), &mut self.vol.cache)?;
922 for an in &chain {
923 for i in 0..an.clustersize {
924 self.free_reserved_block(an.blocknr + i)?;
925 }
926 }
927 Ok(())
928 }
929
930 fn add_dir_entry(
933 &mut self,
934 dir_anode: u32,
935 name: &str,
936 entry_type: i8,
937 anode: u32,
938 fsize: u64,
939 protection: u8,
940 ) -> Result<()> {
941 let entry_bytes = self.build_dir_entry(name, entry_type, anode, fsize, protection);
942 let chain =
943 self.vol
944 .anodes
945 .get_chain(dir_anode, self.vol.dev.as_ref(), &mut self.vol.cache)?;
946
947 for an in &chain {
948 for i in 0..an.clustersize {
949 let blk = an.blocknr + i;
950 let mut data = self.read_reserved_raw(blk)?;
951 if u16::from_be_bytes(data[0..2].try_into().unwrap()) != DBLKID {
952 continue;
953 }
954 let mut pos = DIR_BLOCK_HEADER_SIZE;
956 while pos < self.resblocksize as usize {
957 if data[pos] == 0 {
958 break;
959 }
960 pos += data[pos] as usize;
961 }
962 if pos + entry_bytes.len() < self.resblocksize as usize {
963 data[pos..pos + entry_bytes.len()].copy_from_slice(&entry_bytes);
964 if pos + entry_bytes.len() < self.resblocksize as usize {
965 data[pos + entry_bytes.len()] = 0;
966 }
967 put_u32(&mut data, 4, self.next_datestamp());
968 self.write_reserved(blk, &data)?;
969 return Ok(());
970 }
971 }
972 }
973 let new_blk = self.alloc_reserved_block()?;
975 let mut new_data = vec![0u8; self.resblocksize as usize];
976 put_u16(&mut new_data, 0x00, DBLKID);
977 put_u32(&mut new_data, 0x04, self.next_datestamp());
978 put_u32(&mut new_data, 0x0C, dir_anode);
979 put_u32(&mut new_data, 0x10, dir_anode);
980 new_data[DIR_BLOCK_HEADER_SIZE..DIR_BLOCK_HEADER_SIZE + entry_bytes.len()]
981 .copy_from_slice(&entry_bytes);
982 self.write_reserved(new_blk, &new_data)?;
983 self.extend_anode_chain(dir_anode, new_blk)
984 }
985
986 fn remove_dir_entry(&mut self, dir_anode: u32, name: &str) -> Result<()> {
987 let chain =
988 self.vol
989 .anodes
990 .get_chain(dir_anode, self.vol.dev.as_ref(), &mut self.vol.cache)?;
991 for an in &chain {
992 for i in 0..an.clustersize {
993 let blk = an.blocknr + i;
994 let mut data = self.read_reserved_raw(blk)?;
995 if u16::from_be_bytes(data[0..2].try_into().unwrap()) != DBLKID {
996 continue;
997 }
998 let mut pos = DIR_BLOCK_HEADER_SIZE;
999 while pos < self.resblocksize as usize {
1000 let esize = data[pos] as usize;
1001 if esize == 0 {
1002 break;
1003 }
1004 let nlen = data[pos + 17] as usize;
1005 let ename = crate::util::latin1_to_string(&data[pos + 18..pos + 18 + nlen]);
1006 if crate::util::name_eq_ci(&ename, name) {
1007 let end = pos + esize;
1008 let remaining = self.resblocksize as usize - end;
1009 data.copy_within(end..end + remaining, pos);
1010 for b in &mut data[pos + remaining..pos + remaining + esize] {
1011 *b = 0;
1012 }
1013 put_u32(&mut data, 4, self.next_datestamp());
1014 self.write_reserved(blk, &data)?;
1015 return Ok(());
1016 }
1017 pos += esize;
1018 }
1019 }
1020 }
1021 Err(Error::NotFound(name.to_string()))
1022 }
1023
1024 fn build_dir_entry(
1025 &self,
1026 name: &str,
1027 entry_type: i8,
1028 anode: u32,
1029 fsize: u64,
1030 protection: u8,
1031 ) -> Vec<u8> {
1032 let name_bytes = name.as_bytes();
1033 let nlen = name_bytes.len().min(107);
1034 let fsizex = (fsize >> 32) as u16;
1035 let has_fsizex = fsizex > 0;
1036 let extra_bytes = if has_fsizex { 4 } else { 2 }; let mut base_size = 18 + nlen + 1 + extra_bytes;
1038 if base_size & 1 != 0 {
1039 base_size += 1;
1040 }
1041 let mut entry = vec![0u8; base_size];
1042 entry[0] = base_size as u8;
1043 entry[1] = entry_type as u8;
1044 put_u32(&mut entry, 2, anode);
1045 put_u32(&mut entry, 6, fsize as u32);
1046 let (cday, cmin, ctick) = crate::util::current_amiga_datestamp();
1047 put_u16(&mut entry, 10, cday);
1048 put_u16(&mut entry, 12, cmin);
1049 put_u16(&mut entry, 14, ctick);
1050 entry[16] = protection;
1051 entry[17] = nlen as u8;
1052 entry[18..18 + nlen].copy_from_slice(&name_bytes[..nlen]);
1053 entry[18 + nlen] = 0; let ef_off = if (18 + nlen + 1) & 1 != 0 {
1055 18 + nlen + 2
1056 } else {
1057 18 + nlen + 1
1058 };
1059 let flags: u16 = if has_fsizex { 0x0040 } else { 0 };
1060 put_u16(&mut entry, ef_off, flags);
1061 if has_fsizex {
1062 put_u16(&mut entry, ef_off + 2, fsizex);
1063 }
1064 entry
1065 }
1066
1067 fn extend_anode_chain(&mut self, head_anode: u32, new_blk: u32) -> Result<()> {
1068 let new_anodenr = self.alloc_anode(1, new_blk, 0)?;
1069 self.append_to_anode_chain(head_anode, new_anodenr)
1070 }
1071
1072 fn update_rootblock(&mut self) -> Result<()> {
1075 self.flush_pending()?;
1077
1078 let bs = self.vol.block_size() as usize;
1080 let rblkcluster = self.vol.rootblock.rblkcluster as u32;
1081 let cluster_size = rblkcluster as usize * bs;
1082 let mut cluster = vec![0u8; cluster_size];
1083 self.vol
1084 .dev
1085 .read_blocks(self.firstreserved as u64, rblkcluster, &mut cluster)?;
1086
1087 let ds = self.next_datestamp();
1089 put_u32(&mut cluster, RB_OFF_DATESTAMP, ds);
1090 put_u32(
1091 &mut cluster,
1092 RB_OFF_RESERVED_FREE,
1093 self.vol.rootblock.reserved_free,
1094 );
1095 put_u32(
1096 &mut cluster,
1097 RB_OFF_BLOCKSFREE,
1098 self.vol.rootblock.blocksfree,
1099 );
1100
1101 let bm_off = bs + 12;
1103 for (i, &val) in self.res_bitmap.iter().enumerate() {
1104 let off = bm_off + i * 4;
1105 if off + 4 <= cluster.len() {
1106 put_u32(&mut cluster, off, val);
1107 }
1108 }
1109
1110 self.vol
1111 .dev
1112 .write_blocks(self.firstreserved as u64, rblkcluster, &cluster)?;
1113 self.vol.dev.flush()
1114 }
1115
1116 fn flush_pending(&mut self) -> Result<()> {
1118 let bs = self.vol.block_size() as usize;
1119 let writes: Vec<(u32, Vec<u8>)> = self.pending_writes.drain(..).collect();
1120 for (blk, data) in &writes {
1121 write_reserved_blocks(
1122 self.vol.dev.as_ref(),
1123 *blk as u64,
1124 data,
1125 self.rescluster,
1126 bs,
1127 )?;
1128 self.vol.cache.invalidate(*blk as u64);
1129 }
1130 self.vol.dev.flush()
1131 }
1132
1133 fn read_reserved_raw(&self, blk: u32) -> Result<Vec<u8>> {
1136 for (b, data) in self.pending_writes.iter().rev() {
1138 if *b == blk {
1139 return Ok(data.clone());
1140 }
1141 }
1142 let mut data = vec![0u8; self.resblocksize as usize];
1143 self.vol
1144 .dev
1145 .read_blocks(blk as u64, self.rescluster, &mut data)?;
1146 Ok(data)
1147 }
1148
1149 fn write_reserved(&mut self, blk: u32, data: &[u8]) -> Result<()> {
1150 if let Some(pos) = self.pending_writes.iter().position(|(b, _)| *b == blk) {
1152 self.pending_writes[pos].1 = data.to_vec();
1153 } else {
1154 self.pending_writes.push((blk, data.to_vec()));
1155 }
1156 self.vol.cache.invalidate(blk as u64);
1157 Ok(())
1158 }
1159
1160 fn get_anode_block_nr(&mut self, seqnr: u32) -> Result<u32> {
1161 self.vol
1162 .anodes
1163 .resolve_anode_block(seqnr, self.vol.dev.as_ref(), &mut self.vol.cache)
1164 }
1165
1166 fn get_bitmap_block_nr(&mut self, seqnr: u32) -> Result<Option<u32>> {
1167 self.vol
1168 .bitmap
1169 .get_bitmap_block(seqnr, self.vol.dev.as_ref(), &mut self.vol.cache)
1170 }
1171
1172 fn split_path(&mut self, path: &str) -> Result<(u32, String)> {
1173 let parts: Vec<&str> = path.split('/').filter(|s| !s.is_empty()).collect();
1174 if parts.is_empty() {
1175 return Err(Error::NotFound("empty path".into()));
1176 }
1177 let filename = parts.last().unwrap().to_string();
1178 let mut parent = ANODE_ROOTDIR;
1179 for &part in &parts[..parts.len() - 1] {
1180 let entries = self.vol.list_dir_by_anode(parent)?;
1181 let dir = entries
1182 .iter()
1183 .find(|e| crate::util::name_eq_ci(&e.name, part) && e.is_dir())
1184 .ok_or_else(|| Error::NotFound(part.to_string()))?;
1185 parent = dir.anode;
1186 }
1187 Ok((parent, filename))
1188 }
1189}