use super::{
display::{DisplayRecordedRunInfo, DisplayRecordedRunInfoDetailed, RunListAlignment, Styles},
format::{
RUN_LOG_FILE_NAME, RecordedRunList, RunsJsonWritePermission, STORE_FORMAT_VERSION,
STORE_ZIP_FILE_NAME, StoreFormatVersion, StoreVersionIncompatibility,
},
recorder::{RunRecorder, StoreSizes},
retention::{
PruneKind, PrunePlan, PruneResult, RecordRetentionPolicy, delete_orphaned_dirs, delete_runs,
},
run_id_index::{PrefixResolutionError, RunIdIndex, RunIdSelector, ShortestRunIdPrefix},
};
use crate::{
errors::{RunIdResolutionError, RunStoreError},
helpers::{ThemeCharacters, decimal_char_width},
redact::Redactor,
};
use camino::{Utf8Path, Utf8PathBuf};
use chrono::{DateTime, FixedOffset, Local, TimeDelta, Utc};
use debug_ignore::DebugIgnore;
use quick_junit::ReportUuid;
use semver::Version;
use std::{
collections::{BTreeMap, HashMap, HashSet},
fmt,
fs::{File, TryLockError},
io,
num::NonZero,
thread,
time::{Duration, Instant},
};
static RUNS_LOCK_FILE_NAME: &str = "runs.lock";
static RUNS_JSON_FILE_NAME: &str = "runs.json.zst";
#[derive(Clone, Copy, Debug)]
pub struct StoreRunsDir<'a>(&'a Utf8Path);
impl<'a> StoreRunsDir<'a> {
pub fn new(path: &'a Utf8Path) -> Self {
Self(path)
}
pub fn run_dir(self, run_id: ReportUuid) -> Utf8PathBuf {
self.0.join(run_id.to_string())
}
pub fn run_files(self, run_id: ReportUuid) -> StoreRunFiles {
StoreRunFiles {
run_dir: self.run_dir(run_id),
}
}
pub fn as_path(self) -> &'a Utf8Path {
self.0
}
}
pub trait RunFilesExist {
fn store_zip_exists(&self) -> bool;
fn run_log_exists(&self) -> bool;
}
pub struct StoreRunFiles {
run_dir: Utf8PathBuf,
}
impl RunFilesExist for StoreRunFiles {
fn store_zip_exists(&self) -> bool {
self.run_dir.join(STORE_ZIP_FILE_NAME).exists()
}
fn run_log_exists(&self) -> bool {
self.run_dir.join(RUN_LOG_FILE_NAME).exists()
}
}
#[derive(Debug)]
pub struct RunStore {
runs_dir: Utf8PathBuf,
}
impl RunStore {
pub fn new(store_dir: &Utf8Path) -> Result<Self, RunStoreError> {
let runs_dir = store_dir.join("runs");
std::fs::create_dir_all(&runs_dir).map_err(|error| RunStoreError::RunDirCreate {
run_dir: runs_dir.clone(),
error,
})?;
Ok(Self { runs_dir })
}
pub fn runs_dir(&self) -> StoreRunsDir<'_> {
StoreRunsDir(&self.runs_dir)
}
pub fn lock_shared(&self) -> Result<SharedLockedRunStore<'_>, RunStoreError> {
let lock_file_path = self.runs_dir.join(RUNS_LOCK_FILE_NAME);
let file = std::fs::OpenOptions::new()
.create(true)
.truncate(false)
.write(true)
.open(&lock_file_path)
.map_err(|error| RunStoreError::FileLock {
path: lock_file_path.clone(),
error,
})?;
acquire_lock_with_retry(&file, &lock_file_path, LockKind::Shared)?;
let result = read_runs_json(&self.runs_dir)?;
let run_id_index = RunIdIndex::new(&result.runs);
Ok(SharedLockedRunStore {
runs_dir: StoreRunsDir(&self.runs_dir),
locked_file: DebugIgnore(file),
runs: result.runs,
write_permission: result.write_permission,
run_id_index,
})
}
pub fn lock_exclusive(&self) -> Result<ExclusiveLockedRunStore<'_>, RunStoreError> {
let lock_file_path = self.runs_dir.join(RUNS_LOCK_FILE_NAME);
let file = std::fs::OpenOptions::new()
.create(true)
.truncate(false)
.write(true)
.open(&lock_file_path)
.map_err(|error| RunStoreError::FileLock {
path: lock_file_path.clone(),
error,
})?;
acquire_lock_with_retry(&file, &lock_file_path, LockKind::Exclusive)?;
let result = read_runs_json(&self.runs_dir)?;
Ok(ExclusiveLockedRunStore {
runs_dir: StoreRunsDir(&self.runs_dir),
locked_file: DebugIgnore(file),
runs: result.runs,
last_pruned_at: result.last_pruned_at,
write_permission: result.write_permission,
})
}
}
#[derive(Debug)]
pub struct ExclusiveLockedRunStore<'store> {
runs_dir: StoreRunsDir<'store>,
#[expect(dead_code)]
locked_file: DebugIgnore<File>,
runs: Vec<RecordedRunInfo>,
last_pruned_at: Option<DateTime<Utc>>,
write_permission: RunsJsonWritePermission,
}
impl<'store> ExclusiveLockedRunStore<'store> {
pub fn runs_dir(&self) -> StoreRunsDir<'store> {
self.runs_dir
}
pub fn write_permission(&self) -> RunsJsonWritePermission {
self.write_permission
}
pub fn complete_run(
&mut self,
run_id: ReportUuid,
sizes: StoreSizes,
status: RecordedRunStatus,
duration_secs: Option<f64>,
) -> Result<bool, RunStoreError> {
if let RunsJsonWritePermission::Denied {
file_version,
max_supported_version,
} = self.write_permission
{
return Err(RunStoreError::FormatVersionTooNew {
file_version,
max_supported_version,
});
}
let found = self.mark_run_completed_inner(run_id, sizes, status, duration_secs);
if found {
write_runs_json(self.runs_dir.as_path(), &self.runs, self.last_pruned_at)?;
}
Ok(found)
}
fn mark_run_completed_inner(
&mut self,
run_id: ReportUuid,
sizes: StoreSizes,
status: RecordedRunStatus,
duration_secs: Option<f64>,
) -> bool {
for run in &mut self.runs {
if run.run_id == run_id {
run.sizes = RecordedSizes {
log: ComponentSizes {
compressed: sizes.log.compressed,
uncompressed: sizes.log.uncompressed,
entries: sizes.log.entries,
},
store: ComponentSizes {
compressed: sizes.store.compressed,
uncompressed: sizes.store.uncompressed,
entries: sizes.store.entries,
},
};
run.status = status;
run.duration_secs = duration_secs;
run.last_written_at = Local::now().fixed_offset();
return true;
}
}
false
}
pub fn prune(
&mut self,
policy: &RecordRetentionPolicy,
kind: PruneKind,
) -> Result<PruneResult, RunStoreError> {
if let RunsJsonWritePermission::Denied {
file_version,
max_supported_version,
} = self.write_permission
{
return Err(RunStoreError::FormatVersionTooNew {
file_version,
max_supported_version,
});
}
let now = Utc::now();
let to_delete: HashSet<_> = policy
.compute_runs_to_delete(&self.runs, now)
.into_iter()
.collect();
let runs_dir = self.runs_dir();
let mut result = if to_delete.is_empty() {
PruneResult::default()
} else {
delete_runs(runs_dir, &mut self.runs, &to_delete)
};
result.kind = kind;
let known_runs: HashSet<_> = self.runs.iter().map(|r| r.run_id).collect();
delete_orphaned_dirs(self.runs_dir, &known_runs, &mut result);
if result.deleted_count > 0 || result.orphans_deleted > 0 {
self.last_pruned_at = Some(now);
write_runs_json(self.runs_dir.as_path(), &self.runs, self.last_pruned_at)?;
}
Ok(result)
}
pub fn prune_if_needed(
&mut self,
policy: &RecordRetentionPolicy,
) -> Result<Option<PruneResult>, RunStoreError> {
const PRUNE_INTERVAL: TimeDelta = match TimeDelta::try_days(1) {
Some(d) => d,
None => panic!("1 day should always be a valid TimeDelta"),
};
const LIMIT_EXCEEDED_FACTOR: f64 = 1.5;
let now = Utc::now();
let time_since_last_prune = self
.last_pruned_at
.map(|last| now.signed_duration_since(last))
.unwrap_or(TimeDelta::MAX);
let should_prune = time_since_last_prune >= PRUNE_INTERVAL
|| policy.limits_exceeded_by_factor(&self.runs, LIMIT_EXCEEDED_FACTOR);
if should_prune {
Ok(Some(self.prune(policy, PruneKind::Implicit)?))
} else {
Ok(None)
}
}
#[expect(clippy::too_many_arguments)]
pub(crate) fn create_run_recorder(
mut self,
run_id: ReportUuid,
nextest_version: Version,
started_at: DateTime<FixedOffset>,
cli_args: Vec<String>,
build_scope_args: Vec<String>,
env_vars: BTreeMap<String, String>,
max_output_size: bytesize::ByteSize,
parent_run_id: Option<ReportUuid>,
) -> Result<(RunRecorder, ShortestRunIdPrefix), RunStoreError> {
if let RunsJsonWritePermission::Denied {
file_version,
max_supported_version,
} = self.write_permission
{
return Err(RunStoreError::FormatVersionTooNew {
file_version,
max_supported_version,
});
}
let now = Local::now().fixed_offset();
let run = RecordedRunInfo {
run_id,
store_format_version: STORE_FORMAT_VERSION,
nextest_version,
started_at,
last_written_at: now,
duration_secs: None,
cli_args,
build_scope_args,
env_vars,
parent_run_id,
sizes: RecordedSizes::default(),
status: RecordedRunStatus::Incomplete,
};
self.runs.push(run);
if let Some(parent_run_id) = parent_run_id
&& let Some(parent_run) = self.runs.iter_mut().find(|r| r.run_id == parent_run_id)
{
parent_run.last_written_at = now;
}
write_runs_json(self.runs_dir.as_path(), &self.runs, self.last_pruned_at)?;
let index = RunIdIndex::new(&self.runs);
let unique_prefix = index
.shortest_unique_prefix(run_id)
.expect("run was just added to the list");
let run_dir = self.runs_dir().run_dir(run_id);
let recorder = RunRecorder::new(run_dir, max_output_size)?;
Ok((recorder, unique_prefix))
}
}
#[derive(Clone, Debug)]
pub struct RecordedRunInfo {
pub run_id: ReportUuid,
pub store_format_version: StoreFormatVersion,
pub nextest_version: Version,
pub started_at: DateTime<FixedOffset>,
pub last_written_at: DateTime<FixedOffset>,
pub duration_secs: Option<f64>,
pub cli_args: Vec<String>,
pub build_scope_args: Vec<String>,
pub env_vars: BTreeMap<String, String>,
pub parent_run_id: Option<ReportUuid>,
pub sizes: RecordedSizes,
pub status: RecordedRunStatus,
}
#[derive(Clone, Copy, Debug, Default)]
pub struct RecordedSizes {
pub log: ComponentSizes,
pub store: ComponentSizes,
}
#[derive(Clone, Copy, Debug, Default)]
pub struct ComponentSizes {
pub compressed: u64,
pub uncompressed: u64,
pub entries: u64,
}
impl RecordedSizes {
pub fn total_compressed(&self) -> u64 {
self.log.compressed + self.store.compressed
}
pub fn total_uncompressed(&self) -> u64 {
self.log.uncompressed + self.store.uncompressed
}
pub fn total_entries(&self) -> u64 {
self.log.entries + self.store.entries
}
}
#[derive(Clone, Debug)]
pub enum RecordedRunStatus {
Incomplete,
Completed(CompletedRunStats),
Cancelled(CompletedRunStats),
StressCompleted(StressCompletedRunStats),
StressCancelled(StressCompletedRunStats),
Unknown,
}
impl RecordedRunStatus {
pub fn short_status_str(&self) -> &'static str {
match self {
Self::Incomplete => "incomplete",
Self::Unknown => "unknown",
Self::Completed(_) => "completed",
Self::Cancelled(_) => "cancelled",
Self::StressCompleted(_) => "stress completed",
Self::StressCancelled(_) => "stress cancelled",
}
}
pub fn exit_code(&self) -> Option<i32> {
match self {
Self::Incomplete | Self::Unknown => None,
Self::Completed(stats) | Self::Cancelled(stats) => Some(stats.exit_code),
Self::StressCompleted(stats) | Self::StressCancelled(stats) => Some(stats.exit_code),
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct CompletedRunStats {
pub initial_run_count: usize,
pub passed: usize,
pub failed: usize,
pub exit_code: i32,
}
#[derive(Clone, Copy, Debug)]
pub struct StressCompletedRunStats {
pub initial_iteration_count: Option<NonZero<u32>>,
pub success_count: u32,
pub failed_count: u32,
pub exit_code: i32,
}
#[derive(Clone, Debug)]
pub enum ReplayabilityStatus {
Replayable,
NotReplayable(Vec<NonReplayableReason>),
Incomplete,
}
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum NonReplayableReason {
StoreVersionIncompatible {
incompatibility: StoreVersionIncompatibility,
},
MissingStoreZip,
MissingRunLog,
StatusUnknown,
}
impl fmt::Display for NonReplayableReason {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::StoreVersionIncompatible { incompatibility } => {
write!(f, "store format version incompatible: {}", incompatibility)
}
Self::MissingStoreZip => {
write!(f, "store.zip is missing")
}
Self::MissingRunLog => {
write!(f, "run.log.zst is missing")
}
Self::StatusUnknown => {
write!(f, "run status is unknown (from a newer nextest version)")
}
}
}
}
#[derive(Clone, Copy, Debug)]
pub struct ResolveRunIdResult {
pub run_id: ReportUuid,
}
impl RecordedRunStatus {
pub fn passed_count_width(&self) -> usize {
match self {
Self::Incomplete | Self::Unknown => 0,
Self::Completed(stats) | Self::Cancelled(stats) => decimal_char_width(stats.passed),
Self::StressCompleted(stats) | Self::StressCancelled(stats) => {
decimal_char_width(stats.success_count)
}
}
}
}
impl RecordedRunInfo {
pub fn check_replayability(&self, files: &dyn RunFilesExist) -> ReplayabilityStatus {
let mut blocking = Vec::new();
let mut is_incomplete = false;
if let Err(incompatibility) = self
.store_format_version
.check_readable_by(STORE_FORMAT_VERSION)
{
blocking.push(NonReplayableReason::StoreVersionIncompatible { incompatibility });
}
if !files.store_zip_exists() {
blocking.push(NonReplayableReason::MissingStoreZip);
}
if !files.run_log_exists() {
blocking.push(NonReplayableReason::MissingRunLog);
}
match &self.status {
RecordedRunStatus::Unknown => {
blocking.push(NonReplayableReason::StatusUnknown);
}
RecordedRunStatus::Incomplete => {
is_incomplete = true;
}
RecordedRunStatus::Completed(_)
| RecordedRunStatus::Cancelled(_)
| RecordedRunStatus::StressCompleted(_)
| RecordedRunStatus::StressCancelled(_) => {
}
}
if !blocking.is_empty() {
ReplayabilityStatus::NotReplayable(blocking)
} else if is_incomplete {
ReplayabilityStatus::Incomplete
} else {
ReplayabilityStatus::Replayable
}
}
pub fn display<'a>(
&'a self,
run_id_index: &'a RunIdIndex,
replayability: &'a ReplayabilityStatus,
alignment: RunListAlignment,
styles: &'a Styles,
redactor: &'a Redactor,
) -> DisplayRecordedRunInfo<'a> {
DisplayRecordedRunInfo::new(
self,
run_id_index,
replayability,
alignment,
styles,
redactor,
)
}
pub fn display_detailed<'a>(
&'a self,
run_id_index: &'a RunIdIndex,
replayability: &'a ReplayabilityStatus,
now: DateTime<Utc>,
styles: &'a Styles,
theme_characters: &'a ThemeCharacters,
redactor: &'a Redactor,
) -> DisplayRecordedRunInfoDetailed<'a> {
DisplayRecordedRunInfoDetailed::new(
self,
run_id_index,
replayability,
now,
styles,
theme_characters,
redactor,
)
}
}
struct ReadRunsJsonResult {
runs: Vec<RecordedRunInfo>,
last_pruned_at: Option<DateTime<Utc>>,
write_permission: RunsJsonWritePermission,
}
fn read_runs_json(runs_dir: &Utf8Path) -> Result<ReadRunsJsonResult, RunStoreError> {
let runs_json_path = runs_dir.join(RUNS_JSON_FILE_NAME);
let file = match File::open(&runs_json_path) {
Ok(file) => file,
Err(error) => {
if error.kind() == io::ErrorKind::NotFound {
return Ok(ReadRunsJsonResult {
runs: Vec::new(),
last_pruned_at: None,
write_permission: RunsJsonWritePermission::Allowed,
});
} else {
return Err(RunStoreError::RunListRead {
path: runs_json_path,
error,
});
}
}
};
let decoder = zstd::stream::Decoder::new(file).map_err(|error| RunStoreError::RunListRead {
path: runs_json_path.clone(),
error,
})?;
let list: RecordedRunList =
serde_json::from_reader(decoder).map_err(|error| RunStoreError::RunListDeserialize {
path: runs_json_path,
error,
})?;
let write_permission = list.write_permission();
let data = list.into_data();
Ok(ReadRunsJsonResult {
runs: data.runs,
last_pruned_at: data.last_pruned_at,
write_permission,
})
}
fn write_runs_json(
runs_dir: &Utf8Path,
runs: &[RecordedRunInfo],
last_pruned_at: Option<DateTime<Utc>>,
) -> Result<(), RunStoreError> {
let runs_json_path = runs_dir.join(RUNS_JSON_FILE_NAME);
let list = RecordedRunList::from_data(runs, last_pruned_at);
atomicwrites::AtomicFile::new(&runs_json_path, atomicwrites::AllowOverwrite)
.write(|file| {
let mut encoder = zstd::stream::Encoder::new(file, 3)?;
serde_json::to_writer_pretty(&mut encoder, &list)?;
encoder.finish()?;
Ok(())
})
.map_err(|error| RunStoreError::RunListWrite {
path: runs_json_path,
error,
})?;
Ok(())
}
#[derive(Debug)]
pub struct SharedLockedRunStore<'store> {
runs_dir: StoreRunsDir<'store>,
#[expect(dead_code, reason = "held for lock duration")]
locked_file: DebugIgnore<File>,
runs: Vec<RecordedRunInfo>,
write_permission: RunsJsonWritePermission,
run_id_index: RunIdIndex,
}
impl<'store> SharedLockedRunStore<'store> {
pub fn into_snapshot(self) -> RunStoreSnapshot {
RunStoreSnapshot {
runs_dir: self.runs_dir.as_path().to_owned(),
runs: self.runs,
write_permission: self.write_permission,
run_id_index: self.run_id_index,
}
}
}
#[derive(Debug)]
pub struct RunStoreSnapshot {
runs_dir: Utf8PathBuf,
runs: Vec<RecordedRunInfo>,
write_permission: RunsJsonWritePermission,
run_id_index: RunIdIndex,
}
impl RunStoreSnapshot {
pub fn runs_dir(&self) -> StoreRunsDir<'_> {
StoreRunsDir(&self.runs_dir)
}
pub fn write_permission(&self) -> RunsJsonWritePermission {
self.write_permission
}
pub fn runs(&self) -> &[RecordedRunInfo] {
&self.runs
}
pub fn run_count(&self) -> usize {
self.runs.len()
}
pub fn total_size(&self) -> u64 {
self.runs.iter().map(|r| r.sizes.total_compressed()).sum()
}
pub fn resolve_run_id(
&self,
selector: &RunIdSelector,
) -> Result<ResolveRunIdResult, RunIdResolutionError> {
match selector {
RunIdSelector::Latest => self.most_recent_run(),
RunIdSelector::Prefix(prefix) => {
let run_id = self.resolve_run_id_prefix(prefix)?;
Ok(ResolveRunIdResult { run_id })
}
}
}
fn resolve_run_id_prefix(&self, prefix: &str) -> Result<ReportUuid, RunIdResolutionError> {
self.run_id_index.resolve_prefix(prefix).map_err(|err| {
match err {
PrefixResolutionError::NotFound => RunIdResolutionError::NotFound {
prefix: prefix.to_string(),
},
PrefixResolutionError::Ambiguous { count, candidates } => {
let mut candidates: Vec<_> = candidates
.into_iter()
.filter_map(|run_id| self.get_run(run_id).cloned())
.collect();
candidates.sort_by(|a, b| b.started_at.cmp(&a.started_at));
RunIdResolutionError::Ambiguous {
prefix: prefix.to_string(),
count,
candidates,
run_id_index: self.run_id_index.clone(),
}
}
PrefixResolutionError::InvalidPrefix => RunIdResolutionError::InvalidPrefix {
prefix: prefix.to_string(),
},
}
})
}
pub fn run_id_index(&self) -> &RunIdIndex {
&self.run_id_index
}
pub fn get_run(&self, run_id: ReportUuid) -> Option<&RecordedRunInfo> {
self.runs.iter().find(|r| r.run_id == run_id)
}
pub fn most_recent_run(&self) -> Result<ResolveRunIdResult, RunIdResolutionError> {
self.runs
.iter()
.max_by_key(|r| r.started_at)
.map(|r| ResolveRunIdResult { run_id: r.run_id })
.ok_or(RunIdResolutionError::NoRuns)
}
pub fn compute_prune_plan(&self, policy: &RecordRetentionPolicy) -> PrunePlan {
PrunePlan::compute(&self.runs, policy)
}
}
#[derive(Debug)]
pub struct SnapshotWithReplayability<'a> {
snapshot: &'a RunStoreSnapshot,
replayability: HashMap<ReportUuid, ReplayabilityStatus>,
latest_run_id: Option<ReportUuid>,
}
impl<'a> SnapshotWithReplayability<'a> {
pub fn new(snapshot: &'a RunStoreSnapshot) -> Self {
let runs_dir = snapshot.runs_dir();
let replayability: HashMap<_, _> = snapshot
.runs()
.iter()
.map(|run| {
(
run.run_id,
run.check_replayability(&runs_dir.run_files(run.run_id)),
)
})
.collect();
let latest_run_id = snapshot.most_recent_run().ok().map(|r| r.run_id);
Self {
snapshot,
replayability,
latest_run_id,
}
}
pub fn snapshot(&self) -> &'a RunStoreSnapshot {
self.snapshot
}
pub fn replayability(&self) -> &HashMap<ReportUuid, ReplayabilityStatus> {
&self.replayability
}
pub fn get_replayability(&self, run_id: ReportUuid) -> &ReplayabilityStatus {
self.replayability
.get(&run_id)
.expect("run ID should be in replayability map")
}
pub fn latest_run_id(&self) -> Option<ReportUuid> {
self.latest_run_id
}
}
#[cfg(test)]
impl SnapshotWithReplayability<'_> {
pub fn new_for_test(snapshot: &RunStoreSnapshot) -> SnapshotWithReplayability<'_> {
let replayability: HashMap<_, _> = snapshot
.runs()
.iter()
.map(|run| (run.run_id, ReplayabilityStatus::Replayable))
.collect();
let latest_run_id = snapshot
.runs()
.iter()
.max_by_key(|r| r.started_at)
.map(|r| r.run_id);
SnapshotWithReplayability {
snapshot,
replayability,
latest_run_id,
}
}
}
#[cfg(test)]
impl RunStoreSnapshot {
pub(crate) fn new_for_test(runs: Vec<RecordedRunInfo>) -> Self {
use super::run_id_index::RunIdIndex;
let run_id_index = RunIdIndex::new(&runs);
Self {
runs_dir: Utf8PathBuf::from("/test/runs"),
runs,
write_permission: RunsJsonWritePermission::Allowed,
run_id_index,
}
}
}
#[derive(Clone, Copy)]
enum LockKind {
Shared,
Exclusive,
}
fn acquire_lock_with_retry(
file: &File,
lock_file_path: &Utf8Path,
kind: LockKind,
) -> Result<(), RunStoreError> {
const LOCK_TIMEOUT: Duration = Duration::from_secs(5);
const LOCK_RETRY_INTERVAL: Duration = Duration::from_millis(100);
let start = Instant::now();
loop {
let result = match kind {
LockKind::Shared => file.try_lock_shared(),
LockKind::Exclusive => file.try_lock(),
};
match result {
Ok(()) => return Ok(()),
Err(TryLockError::WouldBlock) => {
if start.elapsed() >= LOCK_TIMEOUT {
return Err(RunStoreError::FileLockTimeout {
path: lock_file_path.to_owned(),
timeout_secs: LOCK_TIMEOUT.as_secs(),
});
}
thread::sleep(LOCK_RETRY_INTERVAL);
}
Err(TryLockError::Error(error)) => {
return Err(RunStoreError::FileLock {
path: lock_file_path.to_owned(),
error,
});
}
}
}
}