aliyun_log_rust_sdk/client/
get_logs.rs

1use super::*;
2use crate::{compress::CompressType, error::Result};
3use crate::{RequestError, RequestErrorKind, ResponseResult};
4use getset::Getters;
5use http::header::ACCEPT_ENCODING;
6use serde::{Deserialize, Serialize};
7use std::collections::HashMap;
8
9impl crate::client::Client {
10    /// Get logs from a logstore using the given query.
11    ///
12    /// This method allows you to query logs from a specific logstore within a project.
13    /// It supports various query parameters including time range, filtering, and pagination.
14    /// The query syntax follows the Aliyun Log Service query language.
15    ///
16    /// # Arguments
17    ///
18    /// * `project` - The name of the project containing the logstore
19    /// * `logstore` - The name of the logstore to query logs from
20    ///
21    /// # Examples
22    ///
23    /// Basic query with time range, offset, limit, and filter:
24    ///
25    /// ```
26    /// # async fn example(client: aliyun_log_rust_sdk::Client) -> Result<(), aliyun_log_rust_sdk::Error> {
27    /// use aliyun_log_rust_sdk::GetLogsRequest;
28    /// use chrono::Utc;
29    ///
30    /// let now = Utc::now().timestamp();
31    /// let one_hour_ago = now - 3600;
32    /// let resp = client.get_logs("my-project", "my-logstore")
33    ///     .from(one_hour_ago)         // Start time (required)
34    ///     .to(now)                    // End time (required)
35    ///     .query("level:ERROR")       // Filter for error logs only
36    ///     .offset(0)                  // Start from the first log
37    ///     .lines(100)                 // Return up to 100 logs
38    ///     .send()
39    ///     .await?;
40    ///
41    /// // Check if the query completed successfully
42    /// if resp.get_body().is_complete() {
43    ///     println!("Query completed successfully");
44    /// } else {
45    ///     println!("Query is incomplete, you may need to retry later");
46    /// }
47    ///
48    /// // Process the returned logs
49    /// println!("Retrieved {} logs", resp.get_body().logs_count());
50    ///
51    /// for log in resp.get_body().logs() {
52    ///     // Each log is a HashMap<String, String>, print all fields in the log
53    ///     for (key, value) in log {
54    ///         println!("  {}: {}", key, value);
55    ///     }
56    /// }
57    /// # Ok(())
58    /// # }
59    /// ```
60    pub fn get_logs(
61        &self,
62        project: impl AsRef<str>,
63        logstore: impl AsRef<str>,
64    ) -> GetLogsRequestBuilder {
65        GetLogsRequestBuilder {
66            project: project.as_ref().to_string(),
67            path: format!("/logstores/{}/logs", logstore.as_ref()),
68            handle: self.handle.clone(),
69            from: None,
70            to: None,
71            topic: None,
72            lines: None,
73            offset: None,
74            reverse: None,
75            query: None,
76            power_sql: None,
77            need_highlight: None,
78            from_ns_part: None,
79            to_ns_part: None,
80        }
81    }
82}
83
84#[derive(Serialize)]
85pub struct GetLogsRequest {
86    #[serde(skip_serializing)]
87    project: String,
88    #[serde(skip_serializing)]
89    path: String,
90
91    from: i64,
92    to: i64,
93
94    #[serde(rename = "fromNs")]
95    #[serde(skip_serializing_if = "Option::is_none")]
96    from_ns_part: Option<u32>,
97
98    #[serde(rename = "toNs")]
99    #[serde(skip_serializing_if = "Option::is_none")]
100    to_ns_part: Option<u32>,
101
102    #[serde(skip_serializing_if = "Option::is_none")]
103    topic: Option<String>,
104
105    #[serde(skip_serializing_if = "Option::is_none")]
106    lines: Option<u32>,
107
108    #[serde(skip_serializing_if = "Option::is_none")]
109    offset: Option<u32>,
110
111    #[serde(skip_serializing_if = "Option::is_none")]
112    reverse: Option<bool>,
113
114    #[serde(skip_serializing_if = "Option::is_none")]
115    query: Option<String>,
116
117    #[serde(rename = "powerSql")]
118    #[serde(skip_serializing_if = "Option::is_none")]
119    power_sql: Option<bool>,
120
121    #[serde(skip_serializing_if = "Option::is_none")]
122    need_highlight: Option<bool>,
123}
124
125impl Request for GetLogsRequest {
126    const HTTP_METHOD: http::Method = http::Method::POST;
127    const CONTENT_TYPE: Option<http::HeaderValue> = Some(LOG_JSON);
128    type ResponseBody = GetLogsResponse;
129    fn project(&self) -> Option<&str> {
130        Some(self.project.as_str())
131    }
132    fn path(&self) -> &str {
133        &self.path
134    }
135    fn body(&self) -> Result<Option<bytes::Bytes>, RequestError> {
136        let body = serde_json::to_string(self)
137            .map(|s| s.into_bytes())
138            .map_err(RequestErrorKind::from)
139            .map_err(RequestError::from)?;
140        Ok(Some(body.into()))
141    }
142    fn headers(&self) -> http::HeaderMap {
143        let mut headers = http::HeaderMap::new();
144        headers.insert(
145            ACCEPT_ENCODING,
146            CompressType::Lz4
147                .to_string()
148                .parse()
149                .expect("fail to insert CompressType into headers"),
150        );
151        headers
152    }
153}
154
155pub struct GetLogsRequestBuilder {
156    project: String,
157    path: String,
158    handle: HandleRef,
159
160    from: Option<i64>,
161    to: Option<i64>,
162    topic: Option<String>,
163    lines: Option<u32>,
164    offset: Option<u32>,
165    reverse: Option<bool>,
166    query: Option<String>,
167    power_sql: Option<bool>,
168    from_ns_part: Option<u32>,
169    to_ns_part: Option<u32>,
170    need_highlight: Option<bool>,
171}
172
173impl GetLogsRequestBuilder {
174    #[must_use = "the result future must be awaited"]
175    pub fn send(self) -> ResponseResultBoxFuture<GetLogsResponse> {
176        Box::pin(async move {
177            let (handle, request) = self.build()?;
178            handle.send(request).await
179        })
180    }
181    /// Required, the start time of the query, in unix timestamp, in seconds, e.g., 1609459200.
182    pub fn from(mut self, from: i64) -> Self {
183        self.from = Some(from);
184        self
185    }
186
187    /// Required, the end time of the query, in unix timestamp, in seconds, e.g., 1609459200.
188    pub fn to(mut self, to: i64) -> Self {
189        self.to = Some(to);
190        self
191    }
192
193    /// Optional, the topic of the logs to query.
194    pub fn topic<T: Into<String>>(mut self, topic: T) -> Self {
195        self.topic = Some(topic.into());
196        self
197    }
198
199    /// The number of logs to return, required if the query is not in sql mode.
200    pub fn lines(mut self, lines: u32) -> Self {
201        self.lines = Some(lines);
202        self
203    }
204
205    /// The offset of the logs to return, required if the query is not in sql mode.
206    pub fn offset(mut self, offset: u32) -> Self {
207        self.offset = Some(offset);
208        self
209    }
210
211    /// Optional, whether to return the logs in reverse order, default false.
212    pub fn reverse(mut self, reverse: bool) -> Self {
213        self.reverse = Some(reverse);
214        self
215    }
216
217    /// Required, the query string to use.
218    pub fn query<T: Into<String>>(mut self, query: T) -> Self {
219        self.query = Some(query.into());
220        self
221    }
222
223    /// Optional, whether to use power SQL.
224    pub fn power_sql(mut self, power_sql: bool) -> Self {
225        self.power_sql = Some(power_sql);
226        self
227    }
228
229    /// Optional, the nano part of start time of the query, ranges from 0 to 999999999.
230    pub fn from_ns_part(mut self, from_ns_part: u32) -> Self {
231        self.from_ns_part = Some(from_ns_part);
232        self
233    }
234
235    /// Optional, the nano part of end time of the query, ranges from 0 to 999999999.
236    pub fn to_ns_part(mut self, to_ns_part: u32) -> Self {
237        self.to_ns_part = Some(to_ns_part);
238        self
239    }
240
241    /// Optional, whether to return the highlight of query results.
242    pub fn need_highlight(mut self, need_highlight: bool) -> Self {
243        self.need_highlight = Some(need_highlight);
244        self
245    }
246
247    fn build(self) -> BuildResult<GetLogsRequest> {
248        check_required!(("from", self.from), ("to", self.to));
249
250        Ok((
251            self.handle,
252            GetLogsRequest {
253                from: self.from.unwrap(),
254                to: self.to.unwrap(),
255                topic: self.topic,
256                lines: self.lines,
257                offset: self.offset,
258                reverse: self.reverse,
259                query: self.query,
260                power_sql: self.power_sql,
261                from_ns_part: self.from_ns_part,
262                to_ns_part: self.to_ns_part,
263                need_highlight: self.need_highlight,
264                project: self.project,
265                path: self.path,
266            },
267        ))
268    }
269}
270
271#[derive(Debug, Deserialize)]
272pub struct GetLogsResponse {
273    meta: get_logs_models::GetLogsMeta,
274    #[serde(rename = "data")]
275    logs: Vec<HashMap<String, String>>,
276}
277
278impl GetLogsResponse {
279    /// Returns true if the query is complete.
280    pub fn is_complete(&self) -> bool {
281        self.meta().progress().eq_ignore_ascii_case("complete")
282    }
283    /// Returns the number of logs returned.
284    pub fn logs_count(&self) -> usize {
285        self.logs.len()
286    }
287    /// Takes out the logs from the response.
288    pub fn take_logs(self) -> Vec<HashMap<String, String>> {
289        self.logs
290    }
291    /// Returns the queried logs.
292    pub fn logs(&self) -> &Vec<HashMap<String, String>> {
293        &self.logs
294    }
295    /// Returns the queried logs as mutable.
296    pub fn logs_mut(&mut self) -> &mut Vec<HashMap<String, String>> {
297        &mut self.logs
298    }
299    pub fn meta(&self) -> &get_logs_models::GetLogsMeta {
300        &self.meta
301    }
302}
303
304impl FromHttpResponse for GetLogsResponse {
305    fn try_from(body: bytes::Bytes, http_headers: &http::HeaderMap) -> ResponseResult<Self> {
306        parse_json_response(body.as_ref(), http_headers)
307    }
308}
309
310pub mod get_logs_models {
311    use super::*;
312    #[derive(Debug, Deserialize, Default, Getters)]
313    #[serde(rename_all = "snake_case", default = "GetLogsMeta::default")]
314    #[allow(dead_code)]
315    #[getset(get = "pub")]
316    pub struct GetLogsMeta {
317        progress: String,
318        agg_query: Option<String>,
319        where_query: Option<String>,
320        #[serde(rename = "hasSQL")]
321        has_sql: Option<bool>,
322        processed_rows: Option<i64>,
323        elapsed_millisecond: Option<i64>,
324        cpu_sec: Option<f64>,
325        cpu_cores: Option<f64>,
326        limited: Option<i64>,
327        count: Option<i64>,
328        processed_bytes: Option<i64>,
329        telementry_type: Option<String>,
330        power_sql: Option<bool>,
331        #[serde(rename = "insertedSQL")]
332        inserted_sql: Option<String>,
333        keys: Option<Vec<String>>,
334        terms: Option<Vec<MetaTerm>>,
335        marker: Option<String>,
336        mode: Option<i32>,
337        phrase_query_info: Option<PhraseQueryInfoV3>,
338        shard: Option<i32>,
339        scan_bytes: Option<i64>,
340        is_accurate: Option<bool>,
341        column_types: Option<Vec<String>>,
342        highlights: Option<Vec<HashMap<String, String>>>,
343    }
344
345    #[derive(Debug, Deserialize, Getters)]
346    #[allow(dead_code)]
347    #[getset(get = "pub")]
348    pub struct MetaTerm {
349        key: String,
350        term: String,
351    }
352
353    #[derive(Debug, Deserialize, Getters)]
354    #[allow(dead_code)]
355    #[getset(get = "pub")]
356    pub struct PhraseQueryInfoV3 {
357        scan_all: Option<bool>,
358        begin_offset: Option<i64>,
359        end_offset: Option<i64>,
360        end_time: Option<i64>,
361    }
362}