git-lfs 0.7.0

Large file storage for git, implemented in Rust
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
//! Sync→async bridge for on-demand LFS downloads from the smudge filter.
//!
//! `filter/`'s [`smudge_with_fetch`](git_lfs_filter::smudge_with_fetch) and
//! [`filter_process`](git_lfs_filter::filter_process) take a sync closure
//! that's invoked when an object is missing from the local store. This
//! module hosts the closure: it owns the tokio runtime + the
//! [`git_lfs_transfer::Transfer`] instance that actually drives the
//! download.
//!
//! Construction is infallible at the runtime level — if `lfs.url` isn't
//! configured, the [`LfsFetcher`] returns the error lazily, on the first
//! `fetch` call. That means a smudge of a *locally-present* object never
//! has to talk to git config, so a stripped-down repo can still smudge
//! its already-fetched bytes.

use std::collections::HashSet;
use std::path::{Path, PathBuf};
use std::sync::Arc;

use git_lfs_api::{
    Auth, BatchRequest, Client as ApiClient, ObjectSpec, Operation, Ref, SharedSshResolver,
    SshAuth as ApiSshAuth, SshOperation as ApiSshOperation, SshResolver,
};
use git_lfs_creds::{
    AskpassHelper, CachingHelper, GitCredentialHelper, Helper, HelperChain, NetrcCredentialHelper,
    SshAuthClient, SshOperation as CredsSshOperation,
};
use git_lfs_filter::FetchError;
use git_lfs_git::ConfigScope;
use git_lfs_git::endpoint::SshInfo;
use git_lfs_pointer::Pointer;
use git_lfs_store::Store;
use git_lfs_transfer::{Report, Transfer, TransferConfig};
use tokio::runtime::Runtime;

/// Owns the tokio runtime and the configured [`Transfer`]. One per CLI
/// invocation; `fetch` is cheap to call repeatedly (e.g. inside the
/// long-running filter-process loop).
pub struct LfsFetcher {
    runtime: Runtime,
    transfer: Result<Transfer, String>,
    /// API client used directly for `check_server_has` (so push can ask
    /// the server which "missing locally" objects the server already
    /// holds before deciding whether to fail). Same client the
    /// `Transfer` uses for its batch calls.
    api: Result<ApiClient, String>,
    /// Refspec sent on every batch request. Resolved once at
    /// construction (current branch's tracked upstream, falling back
    /// to the current branch's full ref). Required by servers that
    /// scope LFS operations per-branch — without it, branch-required
    /// repos return `403 Expected ref ..., got ""` to push/pull.
    refspec: Option<Ref>,
    /// Repo cwd captured at construction. Used to persist
    /// `lfs.<url>.access` after a successful authenticated request,
    /// so future `git lfs env` runs (and the cred-prompting flow)
    /// know basic auth is in play.
    cwd: PathBuf,
}

impl LfsFetcher {
    /// Build a fetcher rooted at the given repo, defaulting the remote
    /// name to `origin`. Runtime construction is the only thing that can
    /// fail here; an unresolvable LFS endpoint is captured and surfaced
    /// on the first [`fetch`](Self::fetch) call instead.
    pub fn from_repo(cwd: &Path, store: &Store) -> std::io::Result<Self> {
        Self::from_repo_with_remote(cwd, store, None)
    }

    /// Like [`from_repo`](Self::from_repo) but lets the caller specify
    /// which remote to resolve against. Used by `push` / `pre-push` so
    /// the LFS endpoint matches the remote being pushed to (otherwise a
    /// `git push upstream` would still upload via `origin`'s LFS config).
    pub fn from_repo_with_remote(
        cwd: &Path,
        store: &Store,
        remote: Option<&str>,
    ) -> std::io::Result<Self> {
        let runtime = tokio::runtime::Builder::new_current_thread()
            .enable_all()
            .build()?;
        let endpoint_url = git_lfs_git::endpoint::endpoint_for_remote(cwd, remote);
        // Even for SSH endpoints we want the http client configured for
        // the *eventual* HTTPS host — proxy/CA settings index by URL.
        // For SSH-shaped endpoints, the resolver will replace the URL
        // per request via `href`; the endpoint string we pass here is
        // just the bootstrap. Pass the raw value — `http_client::build`
        // tolerates non-HTTP schemes.
        let http = endpoint_url
            .as_ref()
            .map(|u| crate::http_client::build(cwd, u))
            .unwrap_or_default();
        let api = build_api_client_with(cwd, remote, http.clone());
        let config = transfer_config_for(cwd, endpoint_url.as_deref().ok());
        let transfer = api
            .clone()
            .map(|c| Transfer::with_http_client(c, store.clone(), config, http));
        let refspec = git_lfs_git::refs::current_refspec(cwd).map(Ref::new);
        Ok(Self {
            runtime,
            transfer,
            api,
            refspec,
            cwd: cwd.to_path_buf(),
        })
    }

    /// If our most recent operation authenticated via HTTP Basic,
    /// persist `lfs.<url>.access = basic` to local git config so
    /// future `git lfs env` runs render `(auth=basic)` and the
    /// cred-prompting flow knows to fill upfront. No-op when the
    /// endpoint isn't resolvable, no auth was used, or the value is
    /// already set. Best-effort — config writes that fail are
    /// swallowed (we'd rather complete the user's operation than
    /// abort it on a bookkeeping write).
    pub fn persist_access_mode(&self) {
        let Ok(api) = self.api.as_ref() else { return };
        if !api.used_basic_auth() {
            return;
        }
        let url = api.endpoint().as_str();
        let key = format!("lfs.{url}.access");
        if let Ok(Some(existing)) = git_lfs_git::config::get_effective(&self.cwd, &key)
            && existing.eq_ignore_ascii_case("basic")
        {
            return;
        }
        let _ = git_lfs_git::config::set(&self.cwd, ConfigScope::Local, &key, "basic");
    }

    /// Override the auto-resolved refspec. Used by `pre-push`, where
    /// the relevant ref is the *remote* ref being pushed (parsed from
    /// the hook's stdin), not the local branch the user happens to be
    /// on.
    #[must_use]
    pub fn with_refspec(mut self, refspec: Option<String>) -> Self {
        self.refspec = refspec.map(Ref::new);
        self
    }

    /// Download the object identified by `pointer` into the local store.
    /// Returns once the bytes are committed (and hash-verified by
    /// [`Store::insert_verified`](git_lfs_store::Store)).
    ///
    /// Single-object convenience over [`download_many`](Self::download_many).
    /// Used by `smudge` / `filter-process`.
    pub fn fetch(&self, pointer: &Pointer) -> Result<(), FetchError> {
        let report = self.download_many(vec![ObjectSpec {
            oid: pointer.oid.to_string(),
            size: pointer.size,
        }])?;
        if let Some((failed_oid, err)) = report.failed.into_iter().next() {
            return Err(format!("download failed for {failed_oid}: {err}").into());
        }
        Ok(())
    }

    /// Download many objects in one batch. Errors eagerly if `lfs.url`
    /// isn't configured (vs. [`fetch`](Self::fetch) which only complains
    /// when actually invoked). Caller inspects the returned [`Report`] for
    /// per-object outcomes.
    pub fn download_many(&self, specs: Vec<ObjectSpec>) -> Result<Report, FetchError> {
        let transfer = self.transfer()?;
        let report = self
            .runtime
            .block_on(transfer.download(specs, self.refspec.clone(), None))?;
        Ok(report)
    }

    /// Upload many objects to the configured LFS endpoint. Server-side
    /// dedup happens at the batch layer: objects the server already has
    /// come back with no `actions`, the transfer queue treats those as
    /// success without any byte transfer.
    pub fn upload_many(&self, specs: Vec<ObjectSpec>) -> Result<Report, FetchError> {
        let transfer = self.transfer()?;
        let report = self
            .runtime
            .block_on(transfer.upload(specs, self.refspec.clone(), None))?;
        Ok(report)
    }

    /// Borrow the inner API client (e.g. for callers that want to
    /// run a one-off batch without going through `download_many`).
    /// Errors lazily — same `lfs.url` resolution as the transfer
    /// path.
    pub fn api_client(&self) -> Result<&ApiClient, FetchError> {
        self.api
            .as_ref()
            .map_err(|m| -> FetchError { m.clone().into() })
    }

    /// Drive an arbitrary future on the fetcher's tokio runtime. Used
    /// by callers (like fetch's --json --dry-run path) that need to
    /// hit the API directly while keeping the fetcher's runtime ownership.
    pub fn runtime_block_on<F: std::future::Future>(&self, fut: F) -> F::Output {
        self.runtime.block_on(fut)
    }

    /// Run the pre-flight `/locks/verify` check before push, returning
    /// an [`Outcome`](crate::locks_verify::Outcome) the caller acts on.
    /// Threads the fetcher's runtime + api client through to
    /// `locks_verify`.
    pub fn preflight_verify_locks(
        &self,
        cwd: &Path,
        remote_label: &str,
        endpoint: &str,
    ) -> Result<crate::locks_verify::Outcome, crate::push::PushCommandError> {
        let api = self
            .api
            .as_ref()
            .map_err(|m| -> FetchError { m.clone().into() })
            .map_err(crate::push::PushCommandError::Fetch)?;
        crate::locks_verify::run(
            &self.runtime,
            api,
            cwd,
            remote_label,
            endpoint,
            self.refspec.as_ref(),
        )
    }

    /// Ask the server which of the given OIDs it already holds. Used by
    /// `push` / `pre-push` to distinguish "missing locally but server
    /// has it" (silent skip — see `lfs.allowincompletepush`) from
    /// "missing both places" (the failure case).
    ///
    /// Sends one upload-direction batch and returns the OIDs that came
    /// back with neither `actions` nor `error` (the spec's no-op
    /// signal — server already has the object).
    pub fn check_server_has(&self, specs: Vec<ObjectSpec>) -> Result<HashSet<String>, FetchError> {
        if specs.is_empty() {
            return Ok(HashSet::new());
        }
        let api = self
            .api
            .as_ref()
            .map_err(|m| -> FetchError { m.clone().into() })?;
        let mut req = BatchRequest::new(Operation::Upload, specs);
        if let Some(r) = self.refspec.clone() {
            req = req.with_ref(r);
        }
        let resp = self
            .runtime
            .block_on(api.batch(&req))
            .map_err(|e| -> FetchError { e.to_string().into() })?;
        Ok(resp
            .objects
            .into_iter()
            .filter(|o| o.actions.is_none() && o.error.is_none())
            .map(|o| o.oid)
            .collect())
    }

    /// Ask the server which of the given OIDs it can serve back. Used
    /// by `prune --verify-remote` to confirm a candidate-for-deletion
    /// is actually safe to delete (the remote still has it). Returns
    /// the subset that the server replied to with usable `actions` —
    /// objects whose response carried an `error`, or no `actions`
    /// (a no-op signal that doesn't make sense in download mode), are
    /// **not** in the returned set.
    ///
    /// Sends one download-direction batch. Pre-signed action URLs are
    /// not consumed — we just look at which objects the server
    /// admits to having.
    pub fn check_server_can_download(
        &self,
        specs: Vec<ObjectSpec>,
    ) -> Result<HashSet<String>, FetchError> {
        if specs.is_empty() {
            return Ok(HashSet::new());
        }
        let api = self
            .api
            .as_ref()
            .map_err(|m| -> FetchError { m.clone().into() })?;
        let mut req = BatchRequest::new(Operation::Download, specs);
        if let Some(r) = self.refspec.clone() {
            req = req.with_ref(r);
        }
        let resp = self
            .runtime
            .block_on(api.batch(&req))
            .map_err(|e| -> FetchError { e.to_string().into() })?;
        Ok(resp
            .objects
            .into_iter()
            .filter(|o| o.actions.is_some() && o.error.is_none())
            .map(|o| o.oid)
            .collect())
    }

    fn transfer(&self) -> Result<&Transfer, FetchError> {
        self.transfer
            .as_ref()
            .map_err(|msg| -> FetchError { msg.clone().into() })
    }
}

/// Build an [`ApiClient`] for `cwd`+`remote` with our standard credential
/// helper chain attached. Used both by [`LfsFetcher`] (for transfers) and
/// by the locking commands (no transfers, just JSON requests).
pub fn build_api_client(cwd: &Path, remote: Option<&str>) -> Result<ApiClient, String> {
    let endpoint = git_lfs_git::endpoint::endpoint_for_remote(cwd, remote)
        .map_err(|e| format!("resolving LFS endpoint: {e}"))?;
    let http = crate::http_client::build(cwd, &endpoint);
    build_api_client_with(cwd, remote, http)
}

fn build_api_client_with(
    cwd: &Path,
    remote: Option<&str>,
    http: reqwest::Client,
) -> Result<ApiClient, String> {
    let info = git_lfs_git::endpoint::resolve_endpoint(cwd, remote)
        .map_err(|e| format!("resolving LFS endpoint: {e}"))?;
    // For SSH-shaped endpoints, the configured URL might be `ssh://...`
    // (e.g. `lfs.url = ssh://git@host/repo`). reqwest needs an http(s)
    // base URL; the SSH resolver's `href` will replace it on each
    // request, but we still need a parseable bootstrap value here.
    let endpoint =
        http_compatible_endpoint(&info.url).map_err(|e| format!("invalid LFS endpoint: {e}"))?;
    let mut url = url::Url::parse(&endpoint).map_err(|e| format!("invalid LFS endpoint: {e}"))?;
    // Extract `user:pass@` from the endpoint and use it as the initial
    // auth so we don't have to round-trip through 401 → fill on every
    // first request. Mirrors upstream's `setRequestAuthWithCreds` for
    // URL-embedded credentials. Strip the user info from the URL we
    // pass to reqwest so it doesn't double-apply.
    let initial_auth = take_url_basic_auth(&mut url);
    let use_http_path = read_bool_default(cwd, "credential.useHttpPath", false);
    // Resolve the credential URL up front (git remote URL when it
    // shares scheme+host with the LFS endpoint, else the LFS endpoint
    // itself). The helper chain consults it for URL-specific
    // `credential.<url>.helper` lookup, and the API client uses it for
    // prompts and "Git credentials for X not found" wording.
    let resolved_cred_url = remote
        .and_then(|r| git_lfs_git::endpoint::remote_url(cwd, r).ok().flatten())
        .and_then(|raw| url::Url::parse(&raw).ok())
        .filter(|gu| {
            gu.scheme() == url.scheme()
                && gu.host_str() == url.host_str()
                && gu.port() == url.port()
        });
    let cred_url_for_helper = resolved_cred_url.clone().unwrap_or_else(|| url.clone());
    // Snapshot the configured `http.<url>.extraHeader` values so the
    // GIT_CURL_VERBOSE dump can echo them. They're already on the wire
    // via the reqwest client's `default_headers` (set up in
    // `cli/src/http_client.rs`); the snapshot is purely so the
    // verbose dump can name them.
    let extras = git_lfs_git::extra_headers_for(cwd, url.as_str());
    let mut client = ApiClient::with_http_client(url.clone(), initial_auth, http)
        .with_credential_helper(default_helper_chain(cwd, &cred_url_for_helper))
        .with_use_http_path(use_http_path)
        .with_extra_headers_for_verbose(extras);
    if let Some(git_url) = resolved_cred_url {
        client = client.with_cred_url(git_url);
    }
    if let Some(ssh_info) = info.ssh {
        // `lfs.<url>.sshtransfer` (with `lfs.sshtransfer` fallback) gates
        // the pure-SSH transfer protocol (`git-lfs-transfer`). We don't
        // implement that yet, so:
        //   - `always`: the user explicitly asked for the unimplemented
        //     path → fail loudly with upstream's exact wording so
        //     scripts that opted in get a clear signal (and the shell
        //     test grep matches).
        //   - `never`: the user wants pure-SSH off; we honor that
        //     trivially (we already only do `git-lfs-authenticate`),
        //     and emit the trace line upstream prints for parity.
        //   - unset / other: default behavior — just use
        //     `git-lfs-authenticate`.
        let sshtransfer = read_sshtransfer_for(cwd, &info.url);
        match sshtransfer.as_deref() {
            Some("always") => {
                ssh_trace(format_args!(
                    "git-lfs-authenticate has been disabled by request"
                ));
                client = client.with_ssh_resolver(Arc::new(DisabledSshResolver));
            }
            Some("never") => {
                ssh_trace(format_args!("skipping pure SSH protocol"));
                client = client.with_ssh_resolver(build_ssh_resolver(ssh_info));
            }
            _ => {
                client = client.with_ssh_resolver(build_ssh_resolver(ssh_info));
            }
        }
    }
    Ok(client)
}

/// Read `lfs.<endpoint>.sshtransfer` falling back to `lfs.sshtransfer`.
/// Lowercased so callers can pattern-match on `"always" / "never"`.
fn read_sshtransfer_for(cwd: &Path, endpoint: &str) -> Option<String> {
    let endpoint_key = format!("lfs.{endpoint}.sshtransfer");
    if let Ok(Some(v)) = git_lfs_git::config::get_effective(cwd, &endpoint_key) {
        return Some(v.trim().to_ascii_lowercase());
    }
    if let Ok(Some(v)) = git_lfs_git::config::get_effective(cwd, "lfs.sshtransfer") {
        return Some(v.trim().to_ascii_lowercase());
    }
    None
}

/// Emit one stderr trace line, gated on `GIT_TRACE` (matches the
/// `creds/src/ssh.rs` trace gate). Used for the `sshtransfer=always` /
/// `sshtransfer=never` notes the upstream test suite greps for.
fn ssh_trace(args: std::fmt::Arguments) {
    if !ssh_trace_enabled() {
        return;
    }
    use std::io::Write as _;
    let mut e = std::io::stderr().lock();
    let _ = writeln!(e, "{args}");
}

fn ssh_trace_enabled() -> bool {
    match std::env::var_os("GIT_TRACE") {
        None => false,
        Some(v) => {
            let s = v.to_string_lossy().trim().to_lowercase();
            !matches!(s.as_str(), "" | "0" | "false" | "no" | "off")
        }
    }
}

/// Resolver installed when `lfs.<url>.sshtransfer=always` and we don't
/// implement pure-SSH transfer: refuses every request with upstream's
/// "git-lfs-authenticate has been disabled by request" wording so the
/// failure surfaces consistently regardless of which API method the
/// caller hit.
struct DisabledSshResolver;

impl SshResolver for DisabledSshResolver {
    fn resolve(&self, _op: ApiSshOperation) -> Result<ApiSshAuth, git_lfs_api::ApiError> {
        Err(git_lfs_api::ApiError::Decode(
            "git-lfs-authenticate has been disabled by request".into(),
        ))
    }
}

/// Convert an LFS endpoint string into something reqwest can use as a
/// base URL. SSH-style schemes (`ssh://...`, bare `git@host:repo`, …)
/// are run through [`derive_lfs_url`] to get the matching HTTPS form;
/// HTTP/HTTPS pass through verbatim. The SSH resolver overrides this
/// per-request when it returns a non-empty `href`.
fn http_compatible_endpoint(url_str: &str) -> Result<String, git_lfs_git::endpoint::EndpointError> {
    if url_str.starts_with("http://") || url_str.starts_with("https://") {
        return Ok(url_str.to_owned());
    }
    git_lfs_git::endpoint::derive_lfs_url(url_str)
}

/// Construct an [`SshResolver`] for the given SSH endpoint metadata,
/// using `GIT_SSH_COMMAND` / `GIT_SSH` / `ssh` for the executable in
/// upstream's selection order.
fn build_ssh_resolver(info: SshInfo) -> SharedSshResolver {
    let program = resolve_ssh_program();
    Arc::new(SshAuthAdapter {
        client: Arc::new(SshAuthClient::new(program)),
        ssh: info,
    })
}

/// Pick the SSH executable per upstream's order:
/// `GIT_SSH_COMMAND` (full command line, parsed by SshAuthClient) >
/// `GIT_SSH` (single program path) > literal `ssh` on `$PATH`.
/// `core.sshCommand` is upstream's fallback before `GIT_SSH`; we don't
/// honor it yet — see NOTES.md.
fn resolve_ssh_program() -> String {
    if let Some(v) = std::env::var_os("GIT_SSH_COMMAND")
        && !v.is_empty()
    {
        return v.to_string_lossy().into_owned();
    }
    if let Some(v) = std::env::var_os("GIT_SSH")
        && !v.is_empty()
    {
        return v.to_string_lossy().into_owned();
    }
    "ssh".to_owned()
}

/// Bridge between `git_lfs_creds::SshAuthClient` (which knows how to
/// spawn ssh and parse responses) and `git_lfs_api::SshResolver` (the
/// trait the API client calls into). Holds the per-endpoint metadata
/// the SSH command needs (`user@host`, port, path) and the operation
/// translation between the two crates' enums.
struct SshAuthAdapter {
    client: Arc<SshAuthClient>,
    ssh: SshInfo,
}

impl SshResolver for SshAuthAdapter {
    fn resolve(&self, op: ApiSshOperation) -> Result<ApiSshAuth, git_lfs_api::ApiError> {
        let creds_op = match op {
            ApiSshOperation::Upload => CredsSshOperation::Upload,
            ApiSshOperation::Download => CredsSshOperation::Download,
        };
        let resolved = self
            .client
            .resolve(
                &self.ssh.user_and_host,
                self.ssh.port.as_deref(),
                &self.ssh.path,
                creds_op,
            )
            .map_err(|e| git_lfs_api::ApiError::Decode(format!("ssh git-lfs-authenticate: {e}")))?;
        Ok(ApiSshAuth {
            href: resolved.href,
            headers: resolved.header,
        })
    }
}

/// Pull `user:pass@` (or `user@`, with empty password) out of `url` and
/// turn it into an [`Auth::Basic`]. Strips both fields from the URL so
/// downstream callers see the bare endpoint. Returns [`Auth::None`]
/// when there's no user info to extract.
fn take_url_basic_auth(url: &mut url::Url) -> Auth {
    let user = url.username().to_owned();
    let pass = url.password().map(str::to_owned).unwrap_or_default();
    if user.is_empty() && pass.is_empty() {
        return Auth::None;
    }
    let _ = url.set_username("");
    let _ = url.set_password(None);
    Auth::Basic {
        username: user,
        password: pass,
    }
}

/// Build a [`TransferConfig`] for `cwd`, plumbing
/// `lfs.transfer.enablehrefrewrite` + `url.<base>.insteadOf` into
/// `url_rewriter` when the flag is enabled. The rewrite map is captured
/// at construction so the queue's hot path doesn't shell out to git for
/// every action URL. `endpoint_url` (when known) is also used to
/// resolve URL-scoped `lfs.<url>.contenttype` — passed in rather than
/// re-resolved because the caller already paid for endpoint resolution.
fn transfer_config_for(cwd: &Path, endpoint_url: Option<&str>) -> TransferConfig {
    let mut config = TransferConfig::default();
    if href_rewrite_enabled(cwd) {
        let aliases = git_lfs_git::aliases::load_aliases(cwd).unwrap_or_default();
        let push_aliases = git_lfs_git::aliases::load_push_aliases(cwd).unwrap_or_default();
        if !aliases.is_empty() {
            let download_aliases = aliases.clone();
            config.url_rewriter = Some(Arc::new(move |url: &str| {
                git_lfs_git::aliases::apply(&download_aliases, url)
            }));
        }
        if !push_aliases.is_empty() {
            // pushInsteadOf wins for upload + verify; insteadOf is the
            // fallback when a URL doesn't match any push alias.
            let upload_push = push_aliases;
            let upload_fallback = aliases;
            config.upload_url_rewriter = Some(Arc::new(move |url: &str| {
                let rewritten = git_lfs_git::aliases::apply(&upload_push, url);
                if rewritten != url {
                    rewritten
                } else {
                    git_lfs_git::aliases::apply(&upload_fallback, url)
                }
            }));
        }
    }
    if let Ok(Some(raw)) = git_lfs_git::config::get_effective(cwd, "lfs.transfer.batchSize")
        && let Ok(n) = raw.trim().parse::<usize>()
        && n > 0
    {
        config.batch_size = n;
    }
    // `lfs.<url>.contenttype` falls back to global `lfs.contenttype`,
    // default `true`. Without an endpoint we can't apply URL scoping
    // — fall back to the global key directly.
    config.detect_content_type = match endpoint_url {
        Some(u) => git_lfs_git::lfs_url_bool(cwd, u, "contenttype", true),
        None => match git_lfs_git::config::get_effective(cwd, "lfs.contenttype") {
            Ok(Some(v)) => !matches!(
                v.trim().to_ascii_lowercase().as_str(),
                "false" | "0" | "no" | "off"
            ),
            _ => true,
        },
    };
    config
}

/// Read `lfs.transfer.enablehrefrewrite` from the effective config
/// (default false). Accepts the standard git-bool spellings.
fn href_rewrite_enabled(cwd: &Path) -> bool {
    let raw = git_lfs_git::config::get_effective(cwd, "lfs.transfer.enablehrefrewrite")
        .ok()
        .flatten()
        .unwrap_or_default();
    matches!(
        raw.to_ascii_lowercase().as_str(),
        "true" | "1" | "yes" | "on"
    )
}

/// Default credential resolution chain: in-process cache → netrc
/// (when `~/.netrc` is present) → optional askpass → `git credential`.
/// Cache is consulted first so a single CLI invocation only shells out
/// once per host. Reads `credential.protectProtocol` (default `true`)
/// and threads it into the git-credential helper so CR-bearing URLs
/// can be opted into when the user explicitly does so.
///
/// Netrc lookup is inserted ahead of askpass + git-credential when
/// `$HOME/.netrc` (or `_netrc` on Windows) exists, matching upstream's
/// `creds.NewCredentialHelperContext` order. Hosts not covered by
/// netrc fall through to the rest of the chain.
///
/// Askpass is only inserted when:
/// - `GIT_ASKPASS` / `core.askpass` / `SSH_ASKPASS` resolves to a
///   non-empty program (priority in that order, matching upstream's
///   `creds.NewCredentialHelperContext`), AND
/// - `credential.helper` is unset — a configured helper takes
///   precedence over interactive prompting.
fn default_helper_chain(cwd: &Path, cred_url: &url::Url) -> Arc<dyn Helper> {
    let protect_protocol = read_bool_default(cwd, "credential.protectProtocol", true);
    // Netrc goes ahead of the cache so the `netrc: git credential fill`
    // trace fires on every authenticated request, not just the first.
    // For an actually-cached host, netrc's lookup is in-memory and
    // cheap (file is read once at construction). Hosts not in netrc
    // fall through to cache + askpass + git-credential as before.
    let mut helpers: Vec<Box<dyn Helper>> = Vec::new();
    if let Some(netrc) = NetrcCredentialHelper::from_default_location() {
        helpers.push(Box::new(netrc));
    }
    helpers.push(Box::new(CachingHelper::new()));
    if let Some(askpass) = resolve_askpass_program(cwd)
        && !has_credential_helper(cwd, cred_url)
    {
        helpers.push(Box::new(AskpassHelper::new(askpass)));
    }
    helpers.push(Box::new(
        GitCredentialHelper::new().with_protect_protocol(protect_protocol),
    ));
    Arc::new(HelperChain::new(helpers))
}

/// Resolve the askpass program per upstream's selection order:
/// `GIT_ASKPASS` env > `core.askpass` config > `SSH_ASKPASS` env. First
/// non-empty value wins. Returns `None` when none is configured (which
/// is the typical headless / CI case).
fn resolve_askpass_program(cwd: &Path) -> Option<String> {
    if let Some(v) = std::env::var_os("GIT_ASKPASS")
        && !v.is_empty()
    {
        return Some(v.to_string_lossy().into_owned());
    }
    if let Ok(Some(v)) = git_lfs_git::config::get_effective(cwd, "core.askpass")
        && !v.trim().is_empty()
    {
        return Some(v.trim().to_owned());
    }
    if let Some(v) = std::env::var_os("SSH_ASKPASS")
        && !v.is_empty()
    {
        return Some(v.to_string_lossy().into_owned());
    }
    None
}

/// True if a non-empty `credential.helper` is configured for `cred_url`.
/// Checks the URL-prefix variants in upstream's precedence order
/// (`credential.<scheme>://<host>[:port]/<path>.helper` →
/// `credential.<scheme>://<host>[:port].helper` →
/// `credential.helper`) and returns true on the first non-empty match.
/// When `true`, we skip askpass so a configured helper isn't shadowed
/// by a pop-up prompt. Mirrors upstream's
/// `urlConfig.Get("credential", rawurl, "helper")` lookup.
fn has_credential_helper(cwd: &Path, cred_url: &url::Url) -> bool {
    let mut keys = Vec::with_capacity(3);
    let host_authority = match (cred_url.host_str(), cred_url.port()) {
        (Some(h), Some(p)) => Some(format!("{}://{h}:{p}", cred_url.scheme())),
        (Some(h), None) => Some(format!("{}://{h}", cred_url.scheme())),
        _ => None,
    };
    if let Some(h) = &host_authority {
        // Most-specific: include path component if present.
        if !cred_url.path().is_empty() && cred_url.path() != "/" {
            keys.push(format!(
                "credential.{}{}.helper",
                h,
                cred_url.path().trim_end_matches('/')
            ));
        }
        keys.push(format!("credential.{h}.helper"));
    }
    keys.push("credential.helper".to_string());
    for key in &keys {
        if let Ok(Some(v)) = git_lfs_git::config::get_effective(cwd, key)
            && !v.trim().is_empty()
        {
            return true;
        }
    }
    false
}

/// Read a git-bool config value, returning `default` when the key is
/// unset or unparseable. Matches git's accepted spellings (true/false,
/// yes/no, on/off, 1/0).
fn read_bool_default(cwd: &Path, key: &str, default: bool) -> bool {
    let Ok(Some(raw)) = git_lfs_git::config::get_effective(cwd, key) else {
        return default;
    };
    match raw.trim().to_ascii_lowercase().as_str() {
        "true" | "1" | "yes" | "on" => true,
        "false" | "0" | "no" | "off" => false,
        _ => default,
    }
}