git_cliff_core/remote/
mod.rs

1/// GitHub client.
2#[cfg(feature = "github")]
3pub mod github;
4
5/// GitLab client.
6#[cfg(feature = "gitlab")]
7pub mod gitlab;
8
9/// Bitbucket client.
10#[cfg(feature = "bitbucket")]
11pub mod bitbucket;
12
13/// Gitea client.
14#[cfg(feature = "gitea")]
15pub mod gitea;
16
17use crate::config::Remote;
18use crate::contributor::RemoteContributor;
19use crate::error::{
20	Error,
21	Result,
22};
23use dyn_clone::DynClone;
24use futures::{
25	future,
26	stream,
27	StreamExt,
28};
29use http_cache_reqwest::{
30	CACacheManager,
31	Cache,
32	CacheMode,
33	HttpCache,
34	HttpCacheOptions,
35};
36use reqwest::header::{
37	HeaderMap,
38	HeaderValue,
39};
40use reqwest::Client;
41use reqwest_middleware::{
42	ClientBuilder,
43	ClientWithMiddleware,
44};
45use secrecy::ExposeSecret;
46use serde::de::DeserializeOwned;
47use serde::{
48	Deserialize,
49	Serialize,
50};
51use std::env;
52use std::fmt::Debug;
53use std::time::Duration;
54use time::{
55	format_description::well_known::Rfc3339,
56	OffsetDateTime,
57};
58
59/// User agent for interacting with the GitHub API.
60///
61/// This is needed since GitHub API does not accept empty user agent.
62pub(crate) const USER_AGENT: &str =
63	concat!(env!("CARGO_PKG_NAME"), env!("CARGO_PKG_VERSION"));
64
65/// Request timeout value in seconds.
66pub(crate) const REQUEST_TIMEOUT: u64 = 30;
67
68/// TCP keepalive value in seconds.
69pub(crate) const REQUEST_KEEP_ALIVE: u64 = 60;
70
71/// Maximum number of entries to fetch in a single page.
72pub(crate) const MAX_PAGE_SIZE: usize = 100;
73
74/// Trait for handling the different entries returned from the remote.
75pub trait RemoteEntry {
76	/// Returns the API URL for fetching the entries at the specified page.
77	fn url(project_id: i64, api_url: &str, remote: &Remote, page: i32) -> String;
78	/// Returns the request buffer size.
79	fn buffer_size() -> usize;
80	/// Whether if exit early.
81	fn early_exit(&self) -> bool;
82}
83
84/// Trait for handling remote commits.
85pub trait RemoteCommit: DynClone {
86	/// Commit SHA.
87	fn id(&self) -> String;
88	/// Commit author.
89	fn username(&self) -> Option<String>;
90	/// Timestamp.
91	fn timestamp(&self) -> Option<i64>;
92	/// Convert date in RFC3339 format to unix timestamp
93	fn convert_to_unix_timestamp(&self, date: &str) -> i64 {
94		OffsetDateTime::parse(date, &Rfc3339)
95			.expect("failed to parse date")
96			.unix_timestamp()
97	}
98}
99
100dyn_clone::clone_trait_object!(RemoteCommit);
101
102/// Trait for handling remote pull requests.
103pub trait RemotePullRequest: DynClone {
104	/// Number.
105	fn number(&self) -> i64;
106	/// Title.
107	fn title(&self) -> Option<String>;
108	/// Labels of the pull request.
109	fn labels(&self) -> Vec<String>;
110	/// Merge commit SHA.
111	fn merge_commit(&self) -> Option<String>;
112}
113
114dyn_clone::clone_trait_object!(RemotePullRequest);
115
116/// Result of a remote metadata.
117pub type RemoteMetadata =
118	(Vec<Box<dyn RemoteCommit>>, Vec<Box<dyn RemotePullRequest>>);
119
120/// Metadata of a remote release.
121#[derive(Debug, Default, Clone, Eq, PartialEq, Deserialize, Serialize)]
122pub struct RemoteReleaseMetadata {
123	/// Contributors.
124	pub contributors: Vec<RemoteContributor>,
125}
126
127impl Remote {
128	/// Creates a HTTP client for the remote.
129	fn create_client(&self, accept_header: &str) -> Result<ClientWithMiddleware> {
130		if !self.is_set() {
131			return Err(Error::RemoteNotSetError);
132		}
133		let mut headers = HeaderMap::new();
134		headers.insert(
135			reqwest::header::ACCEPT,
136			HeaderValue::from_str(accept_header)?,
137		);
138		if let Some(token) = &self.token {
139			headers.insert(
140				reqwest::header::AUTHORIZATION,
141				format!("Bearer {}", token.expose_secret()).parse()?,
142			);
143		}
144		headers.insert(reqwest::header::USER_AGENT, USER_AGENT.parse()?);
145		let client_builder = Client::builder()
146			.timeout(Duration::from_secs(REQUEST_TIMEOUT))
147			.tcp_keepalive(Duration::from_secs(REQUEST_KEEP_ALIVE))
148			.default_headers(headers)
149			.tls_built_in_root_certs(false);
150		let client_builder = if self.native_tls.unwrap_or(false) {
151			client_builder.tls_built_in_native_certs(true)
152		} else {
153			client_builder.tls_built_in_webpki_certs(true)
154		};
155		let client = client_builder.build()?;
156		let client = ClientBuilder::new(client)
157			.with(Cache(HttpCache {
158				mode:    CacheMode::Default,
159				manager: CACacheManager {
160					path: dirs::cache_dir()
161						.ok_or_else(|| {
162							Error::DirsError(String::from(
163								"failed to find the user's cache directory",
164							))
165						})?
166						.join(env!("CARGO_PKG_NAME")),
167				},
168				options: HttpCacheOptions::default(),
169			}))
170			.build();
171		Ok(client)
172	}
173}
174
175/// Trait for handling the API connection and fetching.
176pub trait RemoteClient {
177	/// API URL for a particular client
178	const API_URL: &'static str;
179
180	/// Name of the environment variable used to set the API URL to a
181	/// self-hosted instance (if applicable).
182	const API_URL_ENV: &'static str;
183
184	/// Returns the API url.
185	fn api_url(&self) -> String {
186		env::var(Self::API_URL_ENV)
187			.ok()
188			.or(self.remote().api_url)
189			.unwrap_or_else(|| Self::API_URL.to_string())
190	}
191
192	/// Returns the remote repository information.
193	fn remote(&self) -> Remote;
194
195	/// Returns the HTTP client for making requests.
196	fn client(&self) -> ClientWithMiddleware;
197
198	/// Returns true if the client should early exit.
199	fn early_exit<T: DeserializeOwned + RemoteEntry>(&self, page: &T) -> bool {
200		page.early_exit()
201	}
202
203	/// Retrieves a single object.
204	async fn get_entry<T: DeserializeOwned + RemoteEntry>(
205		&self,
206		project_id: i64,
207		page: i32,
208	) -> Result<T> {
209		let url = T::url(project_id, &self.api_url(), &self.remote(), page);
210		debug!("Sending request to: {url}");
211		let response = self.client().get(&url).send().await?;
212		let response_text = if response.status().is_success() {
213			let text = response.text().await?;
214			trace!("Response: {:?}", text);
215			text
216		} else {
217			let text = response.text().await?;
218			error!("Request error: {}", text);
219			text
220		};
221		Ok(serde_json::from_str::<T>(&response_text)?)
222	}
223
224	/// Retrieves a single page of entries.
225	async fn get_entries_with_page<T: DeserializeOwned + RemoteEntry>(
226		&self,
227		project_id: i64,
228		page: i32,
229	) -> Result<Vec<T>> {
230		let url = T::url(project_id, &self.api_url(), &self.remote(), page);
231		debug!("Sending request to: {url}");
232		let response = self.client().get(&url).send().await?;
233		let response_text = if response.status().is_success() {
234			let text = response.text().await?;
235			trace!("Response: {:?}", text);
236			text
237		} else {
238			let text = response.text().await?;
239			error!("Request error: {}", text);
240			text
241		};
242		let response = serde_json::from_str::<Vec<T>>(&response_text)?;
243		if response.is_empty() {
244			Err(Error::PaginationError(String::from("end of entries")))
245		} else {
246			Ok(response)
247		}
248	}
249
250	/// Fetches the remote API and returns the given entry.
251	///
252	/// See `fetch_with_early_exit` for the early exit version of this method.
253	async fn fetch<T: DeserializeOwned + RemoteEntry>(
254		&self,
255		project_id: i64,
256	) -> Result<Vec<T>> {
257		let entries: Vec<Vec<T>> = stream::iter(0..)
258			.map(|i| self.get_entries_with_page(project_id, i))
259			.buffered(T::buffer_size())
260			.take_while(|page| {
261				if let Err(e) = page {
262					debug!("Error while fetching page: {:?}", e);
263				}
264				future::ready(page.is_ok())
265			})
266			.map(|page| match page {
267				Ok(v) => v,
268				Err(ref e) => {
269					log::error!("{:#?}", e);
270					page.expect("failed to fetch page: {}")
271				}
272			})
273			.collect()
274			.await;
275		Ok(entries.into_iter().flatten().collect())
276	}
277
278	/// Fetches the remote API and returns the given entry.
279	///
280	/// Early exits based on the response.
281	async fn fetch_with_early_exit<T: DeserializeOwned + RemoteEntry>(
282		&self,
283		project_id: i64,
284	) -> Result<Vec<T>> {
285		let entries: Vec<T> = stream::iter(0..)
286			.map(|i| self.get_entry::<T>(project_id, i))
287			.buffered(T::buffer_size())
288			.take_while(|page| {
289				let status = match page {
290					Ok(v) => !self.early_exit(v),
291					Err(e) => {
292						debug!("Error while fetching page: {:?}", e);
293						true
294					}
295				};
296				future::ready(status && page.is_ok())
297			})
298			.map(|page| match page {
299				Ok(v) => v,
300				Err(ref e) => {
301					log::error!("{:#?}", e);
302					page.expect("failed to fetch page: {}")
303				}
304			})
305			.collect()
306			.await;
307		Ok(entries)
308	}
309}
310
311/// Generates a function for updating the release metadata for a remote.
312#[doc(hidden)]
313#[macro_export]
314macro_rules! update_release_metadata {
315	($remote: ident, $fn: ident) => {
316		impl<'a> Release<'a> {
317			/// Updates the remote metadata that is contained in the release.
318			///
319			/// This function takes two arguments:
320			///
321			/// - Commits: needed for associating the Git user with the GitHub
322			///   username.
323			/// - Pull requests: needed for generating the contributor list for the
324			///   release.
325			#[allow(deprecated)]
326			pub fn $fn(
327				&mut self,
328				mut commits: Vec<Box<dyn RemoteCommit>>,
329				pull_requests: Vec<Box<dyn RemotePullRequest>>,
330			) -> Result<()> {
331				let mut contributors: Vec<RemoteContributor> = Vec::new();
332				let mut release_commit_timestamp: Option<i64> = None;
333				// retain the commits that are not a part of this release for later
334				// on checking the first contributors.
335				commits.retain(|v| {
336					if let Some(commit) =
337						self.commits.iter_mut().find(|commit| commit.id == v.id())
338					{
339						let sha_short =
340							Some(v.id().clone().chars().take(12).collect());
341						let pull_request = pull_requests.iter().find(|pr| {
342							pr.merge_commit() == Some(v.id().clone()) ||
343								pr.merge_commit() == sha_short
344						});
345						commit.$remote.username = v.username();
346						commit.$remote.pr_number = pull_request.map(|v| v.number());
347						commit.$remote.pr_title =
348							pull_request.and_then(|v| v.title().clone());
349						commit.$remote.pr_labels = pull_request
350							.map(|v| v.labels().clone())
351							.unwrap_or_default();
352						if !contributors
353							.iter()
354							.any(|v| commit.$remote.username == v.username)
355						{
356							contributors.push(RemoteContributor {
357								username:      commit.$remote.username.clone(),
358								pr_title:      commit.$remote.pr_title.clone(),
359								pr_number:     commit.$remote.pr_number,
360								pr_labels:     commit.$remote.pr_labels.clone(),
361								is_first_time: false,
362							});
363						}
364						commit.remote = Some(commit.$remote.clone());
365						// if remote commit is the release commit store timestamp for
366						// use in calculation of first time
367						if Some(v.id().clone()) == self.commit_id {
368							release_commit_timestamp = v.timestamp().clone();
369						}
370						false
371					} else {
372						true
373					}
374				});
375				// mark contributors as first-time
376				self.$remote.contributors = contributors
377					.into_iter()
378					.map(|mut v| {
379						v.is_first_time = !commits
380							.iter()
381							.filter(|commit| {
382								// if current release is unreleased no need to filter
383								// commits or filter commits that are from
384								// newer releases
385								self.timestamp == 0 ||
386									commit.timestamp() < release_commit_timestamp
387							})
388							.map(|v| v.username())
389							.any(|login| login == v.username);
390						v
391					})
392					.collect();
393				Ok(())
394			}
395		}
396	};
397}