io_transform! {
#[cfg(feature = "write")]
use core::ops::DerefMut;
#[cfg(feature = "write")]
use crate::{
raw::{DirEntryAttrFlags, RawDirectoryEntry, RawFileEntry},
error::{FatError, Result},
file::ShortFileName,
};
#[cfg(feature = "write")]
use super::{
fat_table::Fat, dir::{FatDir, FileEntry}, fs::FatFs,
io::{Cluster, ClusterLike, Read, ReadExt, Seek, SeekFrom, Write},
};
#[cfg(feature = "write")]
use hadris_common::types::endian::{Endian, LittleEndian};
#[cfg(feature = "write")]
#[derive(Debug, Clone, Copy)]
pub struct FatDateTime {
pub date: u16,
pub time: u16,
pub time_tenth: u8,
}
#[cfg(feature = "write")]
impl FatDateTime {
pub fn new(year: u16, month: u8, day: u8, hour: u8, minute: u8, second: u8) -> Self {
let year_offset = year.saturating_sub(1980).min(127);
let date = (year_offset << 9) | ((month as u16 & 0x0F) << 5) | (day as u16 & 0x1F);
let time = ((hour as u16 & 0x1F) << 11)
| ((minute as u16 & 0x3F) << 5)
| ((second as u16 / 2) & 0x1F);
Self {
date,
time,
time_tenth: 0,
}
}
#[cfg(feature = "std")]
pub fn now() -> Self {
use chrono::{Datelike, Local, Timelike};
let now = Local::now();
let year = now.year() as u16;
let month = now.month() as u8;
let day = now.day() as u8;
let hour = now.hour() as u8;
let minute = now.minute() as u8;
let second = now.second() as u8;
let millis = now.timestamp_subsec_millis();
let mut dt = Self::new(year, month, day, hour, minute, second);
dt.time_tenth = ((second % 2) as u32 * 100 + millis / 10).min(199) as u8;
dt
}
#[cfg(not(feature = "std"))]
pub fn now() -> Self {
Self::new(1980, 1, 1, 0, 0, 0)
}
pub fn to_raw(&self) -> (u16, u16, u8) {
(self.date, self.time, self.time_tenth)
}
}
#[cfg(feature = "write")]
impl Default for FatDateTime {
fn default() -> Self {
Self::now()
}
}
#[cfg(feature = "write")]
pub struct FileWriter<'a, DATA: Read + Write + Seek> {
fs: &'a FatFs<DATA>,
first_cluster: Option<Cluster<usize>>,
current_cluster: Option<Cluster<usize>>,
offset_in_cluster: usize,
total_written: usize,
entry_parent: Cluster<usize>,
entry_offset: usize,
fixed_root: Option<(usize, usize)>,
}
#[cfg(feature = "write")]
impl<'a, DATA: Read + Write + Seek> FileWriter<'a, DATA> {
pub fn new(fs: &'a FatFs<DATA>, entry: &FileEntry) -> Result<Self> {
if entry.is_directory() {
return Err(FatError::NotAFile);
}
let first_cluster = if entry.cluster().0 >= 2 {
Some(entry.cluster())
} else {
None
};
let fixed_root = if entry.parent_clus.0 == 0 {
fs.fixed_root_dir_info()
} else {
None
};
Ok(Self {
fs,
first_cluster,
current_cluster: first_cluster,
offset_in_cluster: 0,
total_written: 0,
entry_parent: entry.parent_clus,
entry_offset: entry.offset_within_cluster,
fixed_root,
})
}
pub async fn new_append(fs: &'a FatFs<DATA>, entry: &FileEntry) -> Result<Self> {
if entry.is_directory() {
return Err(FatError::NotAFile);
}
let fixed_root = if entry.parent_clus.0 == 0 {
fs.fixed_root_dir_info()
} else {
None
};
let file_size = entry.size();
let first_cluster = if entry.cluster().0 >= 2 {
Some(entry.cluster())
} else {
None
};
if file_size == 0 || first_cluster.is_none() {
return Ok(Self {
fs,
first_cluster,
current_cluster: first_cluster,
offset_in_cluster: 0,
total_written: 0,
entry_parent: entry.parent_clus,
entry_offset: entry.offset_within_cluster,
fixed_root,
});
}
let cluster_size = {
let data = fs.data.lock();
data.cluster_size
};
let mut current = first_cluster.unwrap();
loop {
let mut data = fs.data.lock();
match fs.fat.next_cluster(data.deref_mut(), current.0).await? {
Some(next) => current = Cluster(next as usize),
None => break,
}
}
let offset_in_last = file_size % cluster_size;
Ok(Self {
fs,
first_cluster,
current_cluster: Some(current),
offset_in_cluster: offset_in_last,
total_written: file_size,
entry_parent: entry.parent_clus,
entry_offset: entry.offset_within_cluster,
fixed_root,
})
}
pub async fn write(&mut self, buf: &[u8]) -> Result<usize> {
if buf.is_empty() {
return Ok(0);
}
let mut data = self.fs.data.lock();
let cluster_size = data.cluster_size;
let mut written = 0;
while written < buf.len() {
if self.current_cluster.is_none() || self.offset_in_cluster >= cluster_size {
let hint = self.current_cluster.map(|c| c.0 as u32 + 1).unwrap_or(2);
let new_cluster = match &self.fs.fat {
Fat::Fat12(fat12) => {
fat12.allocate_cluster(data.deref_mut(), hint as u16).await? as u32
}
Fat::Fat16(fat16) => {
fat16.allocate_cluster(data.deref_mut(), hint as u16).await? as u32
}
Fat::Fat32(fat32) => fat32.allocate_cluster(data.deref_mut(), hint).await?,
};
self.fs.decrement_free_count();
self.fs.update_next_free_hint(new_cluster);
if let Some(prev) = self.current_cluster {
match &self.fs.fat {
Fat::Fat12(fat12) => {
fat12.write_clus(data.deref_mut(), prev.0, new_cluster as u16).await?;
}
Fat::Fat16(fat16) => {
fat16.write_clus(data.deref_mut(), prev.0, new_cluster as u16).await?;
}
Fat::Fat32(fat32) => {
fat32.write_clus(data.deref_mut(), prev.0, new_cluster).await?;
}
}
}
if self.first_cluster.is_none() {
self.first_cluster = Some(Cluster(new_cluster as usize));
}
self.current_cluster = Some(Cluster(new_cluster as usize));
self.offset_in_cluster = 0;
}
let cluster = self.current_cluster.unwrap();
let bytes_left_in_cluster = cluster_size - self.offset_in_cluster;
let to_write = (buf.len() - written).min(bytes_left_in_cluster);
let seek_pos =
cluster.to_bytes(self.fs.info.data_start, cluster_size) + self.offset_in_cluster;
data.seek(SeekFrom::Start(seek_pos as u64)).await?;
data.write_all(&buf[written..written + to_write]).await?;
self.offset_in_cluster += to_write;
self.total_written += to_write;
written += to_write;
}
Ok(written)
}
pub fn bytes_written(&self) -> usize {
self.total_written
}
pub async fn finish(self) -> Result<()> {
let mut data = self.fs.data.lock();
let cluster_size = data.cluster_size;
let entry_pos = if self.entry_parent.0 == 0 {
let (root_start, _) = self.fixed_root.expect("Fixed root info required");
root_start + self.entry_offset
} else {
self.entry_parent
.to_bytes(self.fs.info.data_start, cluster_size)
+ self.entry_offset
};
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
let mut raw_entry = data.read_struct::<RawDirectoryEntry>().await?;
let file_entry = unsafe { &mut raw_entry.file };
file_entry.size =
hadris_common::types::number::U32::<LittleEndian>::new(self.total_written as u32);
if let Some(cluster) = self.first_cluster {
let (high, low) = match &self.fs.fat {
Fat::Fat12(_) | Fat::Fat16(_) => (0u16, cluster.0 as u16),
Fat::Fat32(_) => ((cluster.0 >> 16) as u16, cluster.0 as u16),
};
file_entry.first_cluster_high =
hadris_common::types::number::U16::<LittleEndian>::new(high);
file_entry.first_cluster_low =
hadris_common::types::number::U16::<LittleEndian>::new(low);
} else {
file_entry.first_cluster_high =
hadris_common::types::number::U16::<LittleEndian>::new(0);
file_entry.first_cluster_low =
hadris_common::types::number::U16::<LittleEndian>::new(0);
}
let now = FatDateTime::now();
file_entry.last_write_date = now.date.to_le_bytes();
file_entry.last_write_time = now.time.to_le_bytes();
file_entry.last_access_date = now.date.to_le_bytes();
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
data.write_all(bytemuck::bytes_of(&raw_entry)).await?;
data.flush().await?;
Ok(())
}
}
#[cfg(feature = "write")]
pub trait FatFsWriteExt<DATA: Read + Write + Seek> {
fn write_file<'a>(&'a self, entry: &FileEntry) -> Result<FileWriter<'a, DATA>>;
async fn truncate(&self, entry: &FileEntry, new_size: usize) -> Result<()>;
}
#[cfg(feature = "write")]
impl<DATA: Read + Write + Seek> FatFsWriteExt<DATA> for FatFs<DATA> {
fn write_file<'a>(&'a self, entry: &FileEntry) -> Result<FileWriter<'a, DATA>> {
FileWriter::new(self, entry)
}
async fn truncate(&self, entry: &FileEntry, new_size: usize) -> Result<()> {
if !entry.is_file() {
return Err(FatError::NotAFile);
}
let current_size = entry.size();
if new_size >= current_size {
return Ok(()); }
let first_cluster = entry.cluster();
let cluster_size = self.info.cluster_size;
let fixed_root = if entry.parent_clus.0 == 0 {
self.fixed_root_dir_info()
} else {
None
};
if new_size == 0 {
let freed_count = if first_cluster.0 >= 2 {
let mut data = self.data.lock();
self.fat.free_chain(data.deref_mut(), first_cluster.0).await?
} else {
0
};
self.increment_free_count(freed_count);
self.update_entry_size_and_cluster(entry, 0, Cluster(0), fixed_root).await?;
} else {
let clusters_needed = new_size.div_ceil(cluster_size);
let mut current = first_cluster;
for _ in 1..clusters_needed {
let mut data = self.data.lock();
if let Some(next) = self.fat.next_cluster(data.deref_mut(), current.0).await? {
current = Cluster(next as usize);
} else {
break;
}
}
let freed_count = {
let mut data = self.data.lock();
self.fat.truncate_chain(data.deref_mut(), current.0).await?
};
self.increment_free_count(freed_count);
self.update_entry_size_and_cluster(entry, new_size, first_cluster, fixed_root).await?;
}
Ok(())
}
}
#[cfg(feature = "write")]
fn kanji_short_name_fixup(name: &mut [u8; 11]) {
if name[0] == 0xE5 {
name[0] = 0x05;
}
}
#[cfg(feature = "write")]
impl<DATA: Read + Write + Seek> FatFs<DATA> {
async fn find_free_entry_slot_in_dir(
&self,
dir: &FatDir<'_, DATA>,
) -> Result<(Cluster<usize>, usize)> {
if let Some((root_start, root_size)) = dir.fixed_root {
self.find_free_entry_in_fixed_root(root_start, root_size).await
} else {
self.find_free_entry_in_cluster_chain(dir.cluster).await
}
}
async fn find_free_entry_in_fixed_root(
&self,
root_start: usize,
root_size: usize,
) -> Result<(Cluster<usize>, usize)> {
let mut data = self.data.lock();
let entry_size = core::mem::size_of::<RawDirectoryEntry>();
let max_entries = root_size / entry_size;
for i in 0..max_entries {
let offset = i * entry_size;
let seek_pos = root_start + offset;
data.seek(SeekFrom::Start(seek_pos as u64)).await?;
let raw_entry = data.read_struct::<RawDirectoryEntry>().await?;
let first_byte = unsafe { raw_entry.bytes[0] };
if first_byte == 0x00 || first_byte == 0xE5 {
return Ok((Cluster(0), offset));
}
}
Err(FatError::DirectoryFull)
}
async fn find_free_entry_in_cluster_chain(
&self,
dir_cluster: Cluster<usize>,
) -> Result<(Cluster<usize>, usize)> {
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let entry_size = core::mem::size_of::<RawDirectoryEntry>();
let entries_per_cluster = cluster_size / entry_size;
let mut current_cluster = dir_cluster;
loop {
for i in 0..entries_per_cluster {
let offset = i * entry_size;
let seek_pos =
current_cluster.to_bytes(self.info.data_start, cluster_size) + offset;
data.seek(SeekFrom::Start(seek_pos as u64)).await?;
let raw_entry = data.read_struct::<RawDirectoryEntry>().await?;
let first_byte = unsafe { raw_entry.bytes[0] };
if first_byte == 0x00 || first_byte == 0xE5 {
return Ok((current_cluster, offset));
}
}
let next = self.fat.next_cluster(data.deref_mut(), current_cluster.0).await?;
match next {
Some(cluster) => {
current_cluster = Cluster(cluster as usize);
}
None => {
let new_cluster = match &self.fat {
Fat::Fat12(fat12) => {
let hint = (current_cluster.0 as u16).saturating_add(1);
let new = fat12.allocate_cluster(data.deref_mut(), hint).await?;
fat12.write_clus(data.deref_mut(), current_cluster.0, new).await?;
new as u32
}
Fat::Fat16(fat16) => {
let hint = (current_cluster.0 as u16).saturating_add(1);
let new = fat16.allocate_cluster(data.deref_mut(), hint).await?;
fat16.write_clus(data.deref_mut(), current_cluster.0, new).await?;
new as u32
}
Fat::Fat32(fat32) => {
let hint = current_cluster.0 as u32 + 1;
let new = fat32.allocate_cluster(data.deref_mut(), hint).await?;
fat32.write_clus(data.deref_mut(), current_cluster.0, new).await?;
new
}
};
self.decrement_free_count();
self.update_next_free_hint(new_cluster);
let new_cluster_pos =
Cluster(new_cluster as usize).to_bytes(self.info.data_start, cluster_size);
data.seek(SeekFrom::Start(new_cluster_pos as u64)).await?;
let zeros = alloc::vec![0u8; cluster_size];
data.write_all(&zeros).await?;
return Ok((Cluster(new_cluster as usize), 0));
}
}
}
}
async fn write_raw_entry(
&self,
cluster: Cluster<usize>,
offset: usize,
entry: &RawFileEntry,
fixed_root: Option<(usize, usize)>,
) -> Result<()> {
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let seek_pos = if cluster.0 == 0 {
let (root_start, _) = fixed_root.expect("Fixed root info required for cluster 0");
root_start + offset
} else {
cluster.to_bytes(self.info.data_start, cluster_size) + offset
};
data.seek(SeekFrom::Start(seek_pos as u64)).await?;
data.write_all(bytemuck::bytes_of(entry)).await?;
Ok(())
}
pub async fn create_file(&self, parent: &FatDir<'_, DATA>, name: &str) -> Result<FileEntry> {
if parent.find(name).await?.is_some() {
return Err(FatError::AlreadyExists);
}
let short_name =
ShortFileName::from_long_name(name, 0).map_err(|_| FatError::InvalidFilename)?;
let (slot_cluster, slot_offset) = self.find_free_entry_slot_in_dir(parent).await?;
let now = FatDateTime::now();
let (date, time, time_tenth) = now.to_raw();
let mut raw_name = short_name.to_raw_bytes();
kanji_short_name_fixup(&mut raw_name);
let entry = RawFileEntry {
name: raw_name,
attributes: DirEntryAttrFlags::ARCHIVE.bits(),
reserved: 0,
creation_time_tenth: time_tenth,
creation_time: time.to_le_bytes(),
creation_date: date.to_le_bytes(),
last_access_date: date.to_le_bytes(),
first_cluster_high: hadris_common::types::number::U16::<LittleEndian>::new(0),
last_write_time: time.to_le_bytes(),
last_write_date: date.to_le_bytes(),
first_cluster_low: hadris_common::types::number::U16::<LittleEndian>::new(0),
size: hadris_common::types::number::U32::<LittleEndian>::new(0),
};
self.write_raw_entry(slot_cluster, slot_offset, &entry, parent.fixed_root).await?;
Ok(FileEntry {
short_name,
#[cfg(feature = "lfn")]
long_name: None,
attr: DirEntryAttrFlags::ARCHIVE,
size: 0,
parent_clus: slot_cluster,
offset_within_cluster: slot_offset,
cluster: Cluster(0),
})
}
pub async fn create_dir<'a>(
&'a self,
parent: &FatDir<'a, DATA>,
name: &str,
) -> Result<FatDir<'a, DATA>> {
if parent.find(name).await?.is_some() {
return Err(FatError::AlreadyExists);
}
let short_name =
ShortFileName::from_long_name(name, 0).map_err(|_| FatError::InvalidFilename)?;
let new_cluster = {
let mut data = self.data.lock();
match &self.fat {
Fat::Fat12(fat12) => fat12.allocate_cluster(data.deref_mut(), 2).await? as u32,
Fat::Fat16(fat16) => fat16.allocate_cluster(data.deref_mut(), 2).await? as u32,
Fat::Fat32(fat32) => fat32.allocate_cluster(data.deref_mut(), 2).await?,
}
};
self.decrement_free_count();
self.update_next_free_hint(new_cluster);
let (slot_cluster, slot_offset) = self.find_free_entry_slot_in_dir(parent).await?;
let now = FatDateTime::now();
let (date, time, time_tenth) = now.to_raw();
let (cluster_high, cluster_low) = match &self.fat {
Fat::Fat12(_) | Fat::Fat16(_) => (0u16, new_cluster as u16),
Fat::Fat32(_) => ((new_cluster >> 16) as u16, new_cluster as u16),
};
let mut raw_name = short_name.to_raw_bytes();
kanji_short_name_fixup(&mut raw_name);
let entry = RawFileEntry {
name: raw_name,
attributes: DirEntryAttrFlags::DIRECTORY.bits(),
reserved: 0,
creation_time_tenth: time_tenth,
creation_time: time.to_le_bytes(),
creation_date: date.to_le_bytes(),
last_access_date: date.to_le_bytes(),
first_cluster_high: hadris_common::types::number::U16::<LittleEndian>::new(
cluster_high,
),
last_write_time: time.to_le_bytes(),
last_write_date: date.to_le_bytes(),
first_cluster_low: hadris_common::types::number::U16::<LittleEndian>::new(cluster_low),
size: hadris_common::types::number::U32::<LittleEndian>::new(0),
};
self.write_raw_entry(slot_cluster, slot_offset, &entry, parent.fixed_root).await?;
{
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let dir_pos =
Cluster(new_cluster as usize).to_bytes(self.info.data_start, cluster_size);
data.seek(SeekFrom::Start(dir_pos as u64)).await?;
let zeros = alloc::vec![0u8; cluster_size];
data.write_all(&zeros).await?;
let dot_entry = RawFileEntry {
name: *b". ",
attributes: DirEntryAttrFlags::DIRECTORY.bits(),
reserved: 0,
creation_time_tenth: time_tenth,
creation_time: time.to_le_bytes(),
creation_date: date.to_le_bytes(),
last_access_date: date.to_le_bytes(),
first_cluster_high: hadris_common::types::number::U16::<LittleEndian>::new(
cluster_high,
),
last_write_time: time.to_le_bytes(),
last_write_date: date.to_le_bytes(),
first_cluster_low: hadris_common::types::number::U16::<LittleEndian>::new(
cluster_low,
),
size: hadris_common::types::number::U32::<LittleEndian>::new(0),
};
data.seek(SeekFrom::Start(dir_pos as u64)).await?;
data.write_all(bytemuck::bytes_of(&dot_entry)).await?;
let parent_cluster = parent.cluster.0 as u32;
let (parent_high, parent_low) = match &self.fat {
Fat::Fat12(_) | Fat::Fat16(_) => (0u16, parent_cluster as u16),
Fat::Fat32(_) => ((parent_cluster >> 16) as u16, parent_cluster as u16),
};
let dotdot_entry = RawFileEntry {
name: *b".. ",
attributes: DirEntryAttrFlags::DIRECTORY.bits(),
reserved: 0,
creation_time_tenth: time_tenth,
creation_time: time.to_le_bytes(),
creation_date: date.to_le_bytes(),
last_access_date: date.to_le_bytes(),
first_cluster_high: hadris_common::types::number::U16::<LittleEndian>::new(
parent_high,
),
last_write_time: time.to_le_bytes(),
last_write_date: date.to_le_bytes(),
first_cluster_low: hadris_common::types::number::U16::<LittleEndian>::new(
parent_low,
),
size: hadris_common::types::number::U32::<LittleEndian>::new(0),
};
let dotdot_pos = dir_pos + core::mem::size_of::<RawDirectoryEntry>();
data.seek(SeekFrom::Start(dotdot_pos as u64)).await?;
data.write_all(bytemuck::bytes_of(&dotdot_entry)).await?;
}
Ok(FatDir {
data: self,
cluster: Cluster(new_cluster as usize),
fixed_root: None, })
}
pub async fn delete(&self, entry: &FileEntry) -> Result<()> {
if entry.is_directory() {
let dir = FatDir {
data: self,
cluster: entry.cluster(),
fixed_root: None, };
let mut count = 0;
let mut iter = dir.entries();
while let Some(item) = iter.next_entry().await {
let item = item?;
let name = item.name();
if name != "." && name != ".." {
count += 1;
}
}
if count > 0 {
return Err(FatError::DirectoryNotEmpty);
}
}
if entry.cluster().0 >= 2 {
let freed_count = {
let mut data = self.data.lock();
match &self.fat {
Fat::Fat12(fat12) => {
fat12.free_chain(data.deref_mut(), entry.cluster().0 as u16).await?
}
Fat::Fat16(fat16) => {
fat16.free_chain(data.deref_mut(), entry.cluster().0 as u16).await?
}
Fat::Fat32(fat32) => {
fat32.free_chain(data.deref_mut(), entry.cluster().0 as u32).await?
}
}
};
self.increment_free_count(freed_count);
}
{
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let entry_pos = if entry.parent_clus.0 == 0 {
let (root_start, _) = self
.fixed_root_dir_info()
.expect("Fixed root info required for cluster 0");
root_start + entry.offset_within_cluster
} else {
entry
.parent_clus
.to_bytes(self.info.data_start, cluster_size)
+ entry.offset_within_cluster
};
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
data.write_all(&[0xE5]).await?;
}
Ok(())
}
pub async fn rename(
&self,
entry: &FileEntry,
dest_dir: &FatDir<'_, DATA>,
new_name: &str,
) -> Result<FileEntry> {
if dest_dir.find(new_name).await?.is_some() {
return Err(FatError::AlreadyExists);
}
let short_name =
ShortFileName::from_long_name(new_name, 0).map_err(|_| FatError::InvalidFilename)?;
let (slot_cluster, slot_offset) = self.find_free_entry_slot_in_dir(dest_dir).await?;
let original_raw = {
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let entry_pos = if entry.parent_clus.0 == 0 {
let (root_start, _) = self
.fixed_root_dir_info()
.expect("Fixed root info required for cluster 0");
root_start + entry.offset_within_cluster
} else {
entry
.parent_clus
.to_bytes(self.info.data_start, cluster_size)
+ entry.offset_within_cluster
};
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
data.read_struct::<RawDirectoryEntry>().await?
};
let original_file = unsafe { &original_raw.file };
let mut raw_name = short_name.to_raw_bytes();
kanji_short_name_fixup(&mut raw_name);
let now = FatDateTime::now();
let new_entry = RawFileEntry {
name: raw_name,
attributes: original_file.attributes,
reserved: original_file.reserved,
creation_time_tenth: original_file.creation_time_tenth,
creation_time: original_file.creation_time,
creation_date: original_file.creation_date,
last_access_date: now.date.to_le_bytes(),
first_cluster_high: original_file.first_cluster_high,
last_write_time: now.time.to_le_bytes(),
last_write_date: now.date.to_le_bytes(),
first_cluster_low: original_file.first_cluster_low,
size: original_file.size,
};
self.write_raw_entry(slot_cluster, slot_offset, &new_entry, dest_dir.fixed_root)
.await?;
if entry.is_directory()
&& entry.cluster().0 >= 2
&& dest_dir.cluster != entry.parent_clus
{
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let dir_data_start =
entry.cluster().to_bytes(self.info.data_start, cluster_size);
let dotdot_pos = dir_data_start + core::mem::size_of::<RawDirectoryEntry>();
data.seek(SeekFrom::Start(dotdot_pos as u64)).await?;
let mut dotdot = data.read_struct::<RawDirectoryEntry>().await?;
let dotdot_file = unsafe { &mut dotdot.file };
let parent_cluster = dest_dir.cluster.0 as u32;
let (parent_high, parent_low) = match &self.fat {
Fat::Fat12(_) | Fat::Fat16(_) => (0u16, parent_cluster as u16),
Fat::Fat32(_) => ((parent_cluster >> 16) as u16, parent_cluster as u16),
};
dotdot_file.first_cluster_high =
hadris_common::types::number::U16::<LittleEndian>::new(parent_high);
dotdot_file.first_cluster_low =
hadris_common::types::number::U16::<LittleEndian>::new(parent_low);
data.seek(SeekFrom::Start(dotdot_pos as u64)).await?;
data.write_all(bytemuck::bytes_of(&dotdot)).await?;
}
{
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let entry_pos = if entry.parent_clus.0 == 0 {
let (root_start, _) = self
.fixed_root_dir_info()
.expect("Fixed root info required for cluster 0");
root_start + entry.offset_within_cluster
} else {
entry
.parent_clus
.to_bytes(self.info.data_start, cluster_size)
+ entry.offset_within_cluster
};
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
data.write_all(&[0xE5]).await?;
}
Ok(FileEntry {
short_name,
#[cfg(feature = "lfn")]
long_name: None,
attr: DirEntryAttrFlags::from_bits_retain(original_file.attributes),
size: original_file.size.get() as usize,
parent_clus: slot_cluster,
offset_within_cluster: slot_offset,
cluster: entry.cluster(),
})
}
async fn update_entry_size_and_cluster(
&self,
entry: &FileEntry,
new_size: usize,
first_cluster: Cluster<usize>,
fixed_root: Option<(usize, usize)>,
) -> Result<()> {
use super::fat_table::Fat;
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let entry_pos = if entry.parent_clus.0 == 0 {
let (root_start, _) = fixed_root.expect("Fixed root info required for cluster 0");
root_start + entry.offset_within_cluster
} else {
entry
.parent_clus
.to_bytes(self.info.data_start, cluster_size)
+ entry.offset_within_cluster
};
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
let mut raw_entry = data.read_struct::<RawDirectoryEntry>().await?;
let file_entry = unsafe { &mut raw_entry.file };
file_entry.size = hadris_common::types::number::U32::<LittleEndian>::new(new_size as u32);
let (high, low) = if first_cluster.0 >= 2 {
match &self.fat {
Fat::Fat12(_) | Fat::Fat16(_) => (0u16, first_cluster.0 as u16),
Fat::Fat32(_) => ((first_cluster.0 >> 16) as u16, first_cluster.0 as u16),
}
} else {
(0u16, 0u16)
};
file_entry.first_cluster_high =
hadris_common::types::number::U16::<LittleEndian>::new(high);
file_entry.first_cluster_low = hadris_common::types::number::U16::<LittleEndian>::new(low);
let now = FatDateTime::now();
file_entry.last_write_date = now.date.to_le_bytes();
file_entry.last_write_time = now.time.to_le_bytes();
file_entry.last_access_date = now.date.to_le_bytes();
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
data.write_all(bytemuck::bytes_of(&raw_entry)).await?;
Ok(())
}
}
#[cfg(feature = "write")]
impl<DATA: Read + Write + Seek> FatFs<DATA> {
pub async fn set_attributes(
&self,
entry: &FileEntry,
attrs: DirEntryAttrFlags,
) -> Result<()> {
let mut data = self.data.lock();
let cluster_size = data.cluster_size;
let entry_pos = if entry.parent_clus.0 == 0 {
let (root_start, _) = self
.fixed_root_dir_info()
.expect("Fixed root info required for cluster 0");
root_start + entry.offset_within_cluster
} else {
entry
.parent_clus
.to_bytes(self.info.data_start, cluster_size)
+ entry.offset_within_cluster
};
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
let mut raw_entry = data.read_struct::<RawDirectoryEntry>().await?;
let file_entry = unsafe { &mut raw_entry.file };
file_entry.attributes = attrs.bits();
data.seek(SeekFrom::Start(entry_pos as u64)).await?;
data.write_all(bytemuck::bytes_of(&raw_entry)).await?;
Ok(())
}
}
#[cfg(feature = "write")]
impl<DATA: Read + Write + Seek> FatFs<DATA> {
pub async fn sync(&self) -> Result<()> {
self.write_fsinfo().await?;
let mut data = self.data.lock();
data.flush().await?;
Ok(())
}
async fn write_fsinfo(&self) -> Result<()> {
use super::fs::FatFsExt;
use crate::raw::RawFsInfo;
let ext = match &self.ext {
FatFsExt::Fat32(ext) => ext,
_ => return Ok(()), };
let mut data = self.data.lock();
data.seek_sector(ext.fs_info_sec).await?;
let mut fs_info = data.read_struct::<RawFsInfo>().await?;
fs_info.free_count =
hadris_common::types::number::U32::<LittleEndian>::new(ext.free_count.get());
fs_info.next_free =
hadris_common::types::number::U32::<LittleEndian>::new(ext.next_free.get().0);
data.seek_sector(ext.fs_info_sec).await?;
data.write_all(bytemuck::bytes_of(&fs_info)).await?;
Ok(())
}
pub(crate) fn decrement_free_count(&self) {
use super::fs::FatFsExt;
if let FatFsExt::Fat32(ext) = &self.ext {
let count = ext.free_count.get();
if count > 0 && count != 0xFFFFFFFF {
ext.free_count.set(count - 1);
}
}
}
pub(crate) fn increment_free_count(&self, amount: u32) {
use super::fs::FatFsExt;
if let FatFsExt::Fat32(ext) = &self.ext {
let count = ext.free_count.get();
if count != 0xFFFFFFFF {
ext.free_count.set(count.saturating_add(amount));
}
}
}
pub(crate) fn update_next_free_hint(&self, cluster: u32) {
use super::fs::FatFsExt;
if let FatFsExt::Fat32(ext) = &self.ext {
ext.next_free.set(Cluster(cluster.saturating_add(1)));
}
}
pub fn free_cluster_count(&self) -> Option<u32> {
use super::fs::FatFsExt;
match &self.ext {
FatFsExt::Fat32(ext) => {
let count = ext.free_count.get();
if count != 0xFFFFFFFF {
Some(count)
} else {
None
}
}
_ => None,
}
}
pub fn next_free_cluster_hint(&self) -> Option<u32> {
use super::fs::FatFsExt;
match &self.ext {
FatFsExt::Fat32(ext) => {
let hint = ext.next_free.get().0;
if hint >= 2 && hint != 0xFFFFFFFF {
Some(hint)
} else {
None
}
}
_ => None,
}
}
}
}