1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
macro_rules! allocate_size {
($file:ty) => {
pub fn allocated_size(file: &$file) -> Result<u64> {
unsafe {
let mut info: FILE_STANDARD_INFO = mem::zeroed();
let ret = GetFileInformationByHandleEx(
file.as_raw_handle() as HANDLE,
FileStandardInfo,
&mut info as *mut _ as *mut _,
mem::size_of::<FILE_STANDARD_INFO>() as u32,
);
if ret == 0 {
Err(Error::last_os_error())
} else {
Ok(info.AllocationSize as u64)
}
}
}
};
}
macro_rules! allocate {
($file:ty) => {
pub fn allocate(file: &$file, len: u64) -> Result<()> {
// If the file already has `len` bytes of cluster-aligned
// allocation, do nothing. Calling `set_len` in this branch
// (to extend the EOF pointer to `len` when it lags behind
// the `AllocationSize`) has been observed to trigger
// Windows quirks on the subsequent buffered I/O path — see
// issue #13. Skipping it is safe: the physical disk space
// the caller asked for is already reserved, and
// subsequent writes will extend EOF naturally.
if allocated_size(file)? >= len {
return Ok(());
}
unsafe {
let mut info: FILE_ALLOCATION_INFO = mem::zeroed();
info.AllocationSize = len as i64;
let ret = SetFileInformationByHandle(
file.as_raw_handle() as HANDLE,
FileAllocationInfo,
&mut info as *mut _ as *mut _,
mem::size_of::<FILE_ALLOCATION_INFO>() as u32,
);
if ret == 0 {
return Err(Error::last_os_error());
}
}
if file.metadata()?.len() < len {
file.set_len(len)
} else {
Ok(())
}
}
};
}
macro_rules! test_mod {
($($use_stmt:item)*) => {
#[cfg(test)]
mod test {
extern crate tempfile;
use crate::TryLockError;
$(
$use_stmt
)*
/// A file handle may not be exclusively locked multiple times, or exclusively locked and then
/// shared locked.
#[test]
fn lock_non_reentrant() {
let tempdir = tempfile::TempDir::with_prefix("fs4").unwrap();
let path = tempdir.path().join("fs4");
let file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(path)
.unwrap();
// Multiple exclusive locks fails.
FileExt::lock(&file).unwrap();
assert!(matches!(
FileExt::try_lock(&file),
Err(TryLockError::WouldBlock),
));
FileExt::unlock(&file).unwrap();
// Shared then Exclusive locks fails.
FileExt::lock_shared(&file).unwrap();
assert!(matches!(
FileExt::try_lock(&file),
Err(TryLockError::WouldBlock),
));
}
/// A file handle can hold an exclusive lock and any number of shared locks, all of which must
/// be unlocked independently.
#[test]
fn lock_layering() {
let tempdir = tempfile::TempDir::with_prefix("fs4").unwrap();
let path = tempdir.path().join("fs4");
let file = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(path)
.unwrap();
// Open two shared locks on the file, and then try and fail to open an exclusive lock.
FileExt::lock(&file).unwrap();
FileExt::lock_shared(&file).unwrap();
FileExt::lock_shared(&file).unwrap();
assert!(
matches!(FileExt::try_lock(&file), Err(TryLockError::WouldBlock)),
"the first try lock exclusive",
);
// Pop one of the shared locks and try again.
FileExt::unlock(&file).unwrap();
assert!(
matches!(FileExt::try_lock(&file), Err(TryLockError::WouldBlock)),
"pop the first shared lock",
);
// Pop the second shared lock and try again.
FileExt::unlock(&file).unwrap();
assert!(
matches!(FileExt::try_lock(&file), Err(TryLockError::WouldBlock)),
"pop the second shared lock",
);
// Pop the exclusive lock and finally succeed.
FileExt::unlock(&file).unwrap();
FileExt::lock(&file).unwrap();
}
/// A file handle with multiple open locks will have all locks closed on drop.
#[test]
fn lock_layering_cleanup() {
let tempdir = tempfile::TempDir::with_prefix("fs4").unwrap();
let path = tempdir.path().join("fs4");
let file1 = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
let file2 = fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(&path)
.unwrap();
// Open two shared locks on the file, and then try and fail to open an exclusive lock.
FileExt::lock_shared(&file1).unwrap();
assert!(matches!(
FileExt::try_lock(&file2),
Err(TryLockError::WouldBlock),
));
drop(file1);
FileExt::lock(&file2).unwrap();
}
}
};
}
cfg_sync! {
pub(crate) mod std_impl;
}
cfg_fs_err2! {
pub(crate) mod fs_err2_impl;
}
cfg_fs_err3! {
pub(crate) mod fs_err3_impl;
}