posthog_cli/commands/sourcemap/
upload.rs

1use core::str;
2use std::path::PathBuf;
3
4use anyhow::{anyhow, Context, Ok, Result};
5use sha2::Digest;
6use tracing::info;
7
8use crate::utils::auth::load_token;
9use crate::utils::posthog::capture_command_invoked;
10use crate::utils::release::{create_release, CreateReleaseResponse};
11use crate::utils::sourcemaps::{read_pairs, ChunkUpload, SourcePair};
12
13pub fn upload(
14    host: Option<String>,
15    directory: &PathBuf,
16    project: Option<String>,
17    version: Option<String>,
18) -> Result<()> {
19    let token = load_token().context("While starting upload command")?;
20    let host = token.get_host(host.as_deref());
21
22    let capture_handle = capture_command_invoked("sourcemap_upload", Some(&token.env_id));
23
24    let url = format!(
25        "{}/api/environments/{}/error_tracking/symbol_sets",
26        host, token.env_id
27    );
28
29    let pairs = read_pairs(directory)?;
30
31    let uploads = collect_uploads(pairs).context("While preparing files for upload")?;
32    info!("Found {} chunks to upload", uploads.len());
33
34    // See if we have enough information to create a release object
35    // TODO - The use of a hash_id here means repeated attempts to upload the same data will fail.
36    //        We could relax this, such that we instead replace the existing release with the new one,
37    //        or we could even just allow adding new chunks to an existing release, but for now I'm
38    //        leaving it like this... Reviewers, lets chat about the right approach here
39    let release = create_release(
40        &host,
41        &token,
42        Some(directory.clone()),
43        Some(content_hash(&uploads)),
44        project,
45        version,
46    )
47    .context("While creating release")?;
48
49    upload_chunks(&url, &token.token, uploads, release.as_ref())?;
50
51    let _ = capture_handle.join();
52
53    Ok(())
54}
55
56fn collect_uploads(pairs: Vec<SourcePair>) -> Result<Vec<ChunkUpload>> {
57    let uploads: Vec<ChunkUpload> = pairs
58        .into_iter()
59        .map(|pair| pair.into_chunk_upload())
60        .collect::<Result<Vec<ChunkUpload>>>()?;
61    Ok(uploads)
62}
63
64fn upload_chunks(
65    url: &str,
66    token: &str,
67    uploads: Vec<ChunkUpload>,
68    release: Option<&CreateReleaseResponse>,
69) -> Result<()> {
70    let client = reqwest::blocking::Client::new();
71    let release_id = release.map(|r| r.id.to_string());
72    for upload in uploads {
73        info!("Uploading chunk {}", upload.chunk_id);
74
75        let mut params: Vec<(&'static str, &str)> =
76            vec![("chunk_id", &upload.chunk_id), ("multipart", "true")];
77        if let Some(id) = &release_id {
78            params.push(("release_id", id));
79        }
80
81        let part = reqwest::blocking::multipart::Part::bytes(upload.data).file_name("file");
82        let form = reqwest::blocking::multipart::Form::new().part("file", part);
83
84        let res = client
85            .post(url)
86            .multipart(form)
87            .header("Authorization", format!("Bearer {}", token))
88            .query(&params)
89            .send()
90            .context(format!("While uploading chunk to {}", url))?;
91
92        if !res.status().is_success() {
93            return Err(anyhow!("Failed to upload chunk: {:?}", res)
94                .context(format!("Chunk id: {}", upload.chunk_id)));
95        }
96    }
97
98    Ok(())
99}
100
101fn content_hash(uploads: &[ChunkUpload]) -> String {
102    let mut hasher = sha2::Sha512::new();
103    for upload in uploads {
104        hasher.update(&upload.data);
105    }
106    format!("{:x}", hasher.finalize())
107}