terracotta 0.4.2

Boilerplate webserver application based on Axum
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
//! Statistics middleware.



//		Packages																										

use super::state::StateProvider;
use axum::http::{Method, StatusCode};
use chrono::{TimeDelta, NaiveDateTime, SubsecRound as _, Utc};
use core::time::Duration;
use serde::{Serialize, Serializer};
use smart_default::SmartDefault;
use std::{
	collections::VecDeque,
	sync::Arc,
};
use tokio::{
	select,
	spawn,
	sync::broadcast,
	time::{interval, sleep},
};
use tracing::error;

#[cfg(feature = "utoipa")]
use utoipa::ToSchema;



//		Structs																											

//		Endpoint																
/// A formalised definition of an endpoint for identification.
#[derive(Clone, Debug, Eq, Hash, PartialEq, SmartDefault)]
#[expect(clippy::exhaustive_structs, reason = "Exhaustive")]
pub struct Endpoint {
	//		Public properties													
	/// The path of the endpoint, minus any query parameters. As this is just
	/// the path, it does not contain scheme or authority (host), and hence is
	/// not a full URI.
	pub path:   String,
	
	/// The HTTP verb of the endpoint.
	pub method: Method,
}

//󰭅		Serialize																
impl Serialize for Endpoint {
	//		serialize															
	fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
	where
		S: Serializer,
	{
		serializer.serialize_str(&format!("{} {}", self.method, self.path))
	}
}

//		StatsForPeriod															
/// Average, maximum, minimum, and count of values for a period of time.
#[derive(Clone, Copy, Debug, PartialEq, Serialize, SmartDefault)]
#[cfg_attr(feature = "utoipa", derive(ToSchema))]
#[non_exhaustive]
pub struct StatsForPeriod {
	//		Public properties													
	/// The date and time the period started.
	#[default(Utc::now().naive_utc())]
	pub started_at: NaiveDateTime,
	
	/// Average value.
	pub average:    f64,
	
	/// Maximum value.
	pub maximum:    u64,
	
	/// Minimum value.
	pub minimum:    u64,
	
	/// The total number of values.
	pub count:      u64,
}

//󰭅		StatsForPeriod															
impl StatsForPeriod {
	//		initialize															
	/// Initialises the stats based on a single starting value.
	/// 
	/// # Parameters
	/// 
	/// * `value` - The single value to start with. This will be applied to the
	///             average, maximum, and minimum values, and the count will be
	///             set to 1.
	/// 
	#[must_use]
	pub fn initialize(value: u64) -> Self {
		#[expect(clippy::cast_precision_loss, reason = "Not expected to get anywhere near 52 bits")]
		Self {
			average: value as f64,
			maximum: value,
			minimum: value,
			count:   1,
			..Default::default()
		}
	}
	
	//		update																
	/// Updates the stats with new data.
	/// 
	/// This function will compare the new data with the existing data and
	/// update the stats accordingly. The maximum and minimum values will be
	/// updated if the new data is higher or lower than the existing values,
	/// and the count will be the combined count of the existing and new data.
	/// 
	/// Of particular note is the treatment of the average value. This is
	/// calculated using a weighted average, combining the existing and new
	/// averages using the count of each set of data as a weighting factor.
	/// This means that the average value will be closer to the average of the
	/// new data if the existing data is much larger than the new data, and vice
	/// versa.
	/// 
	/// The start time will not be updated.
	/// 
	/// # Parameters
	/// 
	/// * `stats` - The stats to update with.
	/// 
	pub fn update(&mut self, stats: &Self) {
		if (stats.minimum < self.minimum && stats.count > 0) || self.count == 0 {
			self.minimum = stats.minimum;
		}
		if stats.maximum > self.maximum {
			self.maximum = stats.maximum;
		}
		self.count       = self.count.saturating_add(stats.count);
		if self.count > 0  && stats.count > 0 {
			#[expect(clippy::cast_precision_loss, reason = "Not expected to get anywhere near 52 bits")]
			let weight   = stats.count as f64 / self.count as f64;
			self.average = self.average.mul_add(1.0 - weight, stats.average * weight);
		}
	}
}

//		AllStatsForPeriod														
/// Average, maximum, minimum, and count of values for a period of time, for all
/// areas being measured.
#[derive(Clone, Debug, Default, PartialEq, Serialize)]
#[cfg_attr(feature = "utoipa", derive(ToSchema))]
#[non_exhaustive]
pub struct AllStatsForPeriod {
	//		Public properties													
	/// The average, maximum, and minimum response times in microseconds, plus
	/// sample count, for the most recent second.
	pub times:       StatsForPeriod,
	
	/// The average, maximum, and minimum open connections, plus sample count,
	/// for the most recent second.
	pub connections: StatsForPeriod,
	
	/// The average, maximum, and minimum memory usage in bytes, plus sample
	/// count, for the most recent second.
	pub memory:      StatsForPeriod,
}

//		ResponseMetrics															
/// Metrics for a single response.
/// 
/// This is used by the statistics queue in [`AppState.stats.Queue`].
/// 
#[derive(Clone, Debug, Eq, PartialEq, SmartDefault)]
#[non_exhaustive]
pub struct ResponseMetrics {
	//		Public properties													
	/// The endpoint that was requested.
	pub endpoint:    Endpoint,
	
	/// The date and time the request started.
	#[default(Utc::now().naive_utc())]
	pub started_at:  NaiveDateTime,
	
	/// The time the response took to be generated, in microseconds.
	pub time_taken:  u64,
	
	/// The status code of the response.
	pub status_code: StatusCode,
	
	/// The number of open connections at the time the response was generated.
	pub connections: u64,
	
	/// The amount of memory allocated at the time the response was generated,
	/// in bytes.
	pub memory:      u64,
}



//		Functions																										

//		start																	
/// Starts the statistics processor.
/// 
/// This function starts a thread that will process the statistics queue in
/// [`AppState.stats.Queue`]. It will run until the channel is disconnected.
/// 
/// The processing of the statistics is done in a separate thread so that the
/// request-handling threads can continue to handle requests without being
/// blocked by the statistics processing. This way, none of them are ever
/// affected more than others. The stats-handling thread blocks on the queue, so
/// it will only process a response time when one is available.
/// 
/// The thread will also wake up every second to ensure that the period that has 
/// just ended gets wrapped up. This is necessary because the thread otherwise
/// only wakes up when the queue has data in it, and if there is a period of
/// inactivity then the current period will not be completed until the next
/// request comes in. This can lead to a long delay until the statistics are
/// updated, which is undesirable because the buffer will be stuck at the
/// position of the last period to be completed.
/// 
/// Although this periodic wake-up does incur a very slight overhead, it is
/// extremely small, and ensures that the statistics are always up-to-date.
/// 
/// # Parameters
/// 
/// * `receiver` - The receiving end of the queue.
/// * `state`    - The application state.
/// 
pub async fn start<SP: StateProvider>(state: &Arc<SP>) {
	if !state.config().enabled {
		return;
	}
	let appstate            = Arc::clone(state);
	let (sender, receiver)  = flume::unbounded();
	let (tx, rx)            = broadcast::channel(10);
	let mut stats_state     = appstate.state().write().await;
	stats_state.queue       = Some(sender);
	stats_state.broadcaster = Some(tx);
	stats_state.listener    = Some(rx);
	//	Fixed time period of the current second
	let mut current_second  = Utc::now().naive_utc().trunc_subsecs(0);
	//	Cumulative stats for the current second
	let mut timing_stats    = StatsForPeriod::default();
	let mut conn_stats      = StatsForPeriod::default();
	let mut memory_stats    = StatsForPeriod::default();
	
	//	Initialise circular buffers. We reserve the capacities here right at the
	//	start so that the application always uses exactly the same amount of
	//	memory for the buffers, so that any memory-usage issues will be spotted
	//	immediately. For instance, if someone set the config value high enough
	//	to store a year's worth of data (around 1.8GB) and the system didn't
	//	have enough memory it would fail right away, instead of gradually
	//	building up to that point which would make it harder to diagnose.
	{
		let mut buffers = stats_state.data.buffers.write();
		buffers.responses  .reserve(appstate.config().timing_buffer_size);
		buffers.connections.reserve(appstate.config().connection_buffer_size);
		buffers.memory     .reserve(appstate.config().memory_buffer_size);
	}
	drop(stats_state);
	
	drop(spawn(async move {
		//	Wait until the start of the next second, to align with it so that the
		//	tick interval change happens right after the second change, to wrap up
		//	the data for the period that has just ended.
		sleep(
			current_second
				.checked_add_signed(TimeDelta::seconds(1))
				.unwrap_or(current_second)
				.signed_duration_since(Utc::now().naive_utc())
				.to_std()
				.unwrap_or(Duration::from_secs(0))
		).await;
		
		//	Queue processing loop
		let mut timer = interval(Duration::from_secs(1));
		loop { select!{
			_ = timer.tick() => {
				//	Ensure last period is wrapped up
				stats_processor(
					&*appstate,
					None,
					&mut timing_stats,
					&mut conn_stats,
					&mut memory_stats,
					&mut current_second,
				).await;
			}
			//	Wait for message - this is a blocking call
			message = receiver.recv_async() => {
				if let Ok(response_metrics) = message {
					//	Process response time
					stats_processor(
						&*appstate,
						Some(response_metrics),
						&mut timing_stats,
						&mut conn_stats,
						&mut memory_stats,
						&mut current_second,
					).await;
				} else {
					error!("Channel has been disconnected, exiting thread.");
					break;
				}
			}
		}}
	}));
}

//		stats_processor															
/// Processes a single response time.
/// 
/// This function processes a single response metrics sample, updating the
/// calculated statistics accordingly.
/// 
/// # Parameters
/// 
/// * `state`          - The application state.
/// * `metrics`        - The response metrics to process, received from the
///                      statistics queue in [`AppState.stats.Queue`]. If
///                      [`None`], then no stats will be added or altered, and
///                      no counters will be incremented, but the most-recent
///                      period will be checked and wrapped up if not already
///                      done.
/// * `timing_stats`   - The cumulative timing stats for the current second.
/// * `conn_stats`     - The cumulative connection stats for the current second.
/// * `memory_stats`   - The cumulative memory stats for the current second.
/// * `current_second` - The current second.
/// 
async fn stats_processor<SP: StateProvider>(
	state:          &SP,
	metrics:        Option<ResponseMetrics>,
	timing_stats:   &mut StatsForPeriod,
	conn_stats:     &mut StatsForPeriod,
	memory_stats:   &mut StatsForPeriod,
	current_second: &mut NaiveDateTime
) {
	//		Helper functions													
	/// Updates a buffer with new data.
	fn update_buffer(
		buffer:             &mut VecDeque<StatsForPeriod>,
		buffer_size:        usize,
		stats:              &mut StatsForPeriod,
		current_second:     &NaiveDateTime,
		elapsed:            i64,
		message:            &mut AllStatsForPeriod,
		mut update_message: impl FnMut(&mut StatsForPeriod, &mut AllStatsForPeriod),
	) {
		for i in 0..elapsed {
			if buffer.len() == buffer_size {
				_ = buffer.pop_back();
			}
			stats.started_at = current_second.checked_add_signed(TimeDelta::seconds(i)).unwrap_or(*current_second);
			buffer.push_front(*stats);
			update_message(stats, message);
			*stats = StatsForPeriod::default();
		}
	}
	
	//		Preparation															
	let new_second: NaiveDateTime;
	#[expect(clippy::shadow_reuse, reason = "Clear purpose")]
	if let Some(metrics) = metrics {
		//	Prepare new stats
		let new_timing_stats = StatsForPeriod::initialize(metrics.time_taken);
		let new_conn_stats   = StatsForPeriod::initialize(metrics.connections);
		let new_memory_stats = StatsForPeriod::initialize(metrics.memory);
		
		//	Increment cumulative stats
		timing_stats.update(&new_timing_stats);
		conn_stats  .update(&new_conn_stats);
		memory_stats.update(&new_memory_stats);
		
	//		Update statistics													
		//	Lock source data
		let stats_state = state.state().read().await;
		let mut totals = stats_state.data.totals.lock();
		
		//	Update responses counter
		_ = totals.codes.entry(metrics.status_code).and_modify(|e| *e = e.saturating_add(1)).or_insert(1);
		
		//	Update response time stats
		totals.times.update(&new_timing_stats);
		
		//	Update endpoint response time stats
		_ = totals.endpoints
			.entry(metrics.endpoint)
			.and_modify(|ep_stats| ep_stats.update(&new_timing_stats))
			.or_insert(new_timing_stats)
		;
		
		//	Update connections usage stats
		totals.connections.update(&new_conn_stats);
		
		//	Update memory usage stats
		totals.memory.update(&new_memory_stats);
		
		//	Unlock source data
		drop(totals);
		drop(stats_state);
		
	//		Check time period													
		new_second = metrics.started_at.trunc_subsecs(0);
	} else {
		new_second = Utc::now().naive_utc().trunc_subsecs(0);
	}
	
	//	Check to see if we've moved into a new time period. We want to increment
	//	the request count and total response time until it "ticks" over into
	//	another second. At this point it will calculate an average and add this
	//	data (average, min, max) to a fixed-length circular buffer of seconds.
	//	This way, the last period's data can be calculated by looking through
	//	the circular buffer of seconds.
	if new_second > *current_second {
		#[expect(clippy::arithmetic_side_effects, reason = "Nothing interesting can happen here")]
		let elapsed     = (new_second - *current_second).num_seconds();
		let stats_state = state.state().read().await;
		let mut buffers = stats_state.data.buffers.write();
		let mut message = AllStatsForPeriod::default();
		//	Timing stats buffer
		update_buffer(
			&mut buffers.responses,
			state.config().timing_buffer_size,
			timing_stats,
			current_second,
			elapsed,
			&mut message,
			|stats, msg| { msg.times = *stats; },
		);
		//	Connections stats buffer
		update_buffer(
			&mut buffers.connections,
			state.config().connection_buffer_size,
			conn_stats,
			current_second,
			elapsed,
			&mut message,
			|stats, msg| { msg.connections = *stats; },
		);
		//	Memory stats buffer
		update_buffer(
			&mut buffers.memory,
			state.config().memory_buffer_size,
			memory_stats,
			current_second,
			elapsed,
			&mut message,
			|stats, msg| { msg.memory = *stats; },
		);
		drop(buffers);
		*stats_state.data.last_second.write() = *current_second;
		*current_second = new_second;
		if let Some(ref broadcaster) = stats_state.broadcaster {
			drop(broadcaster.send(message).inspect_err(|err| error!("Failed to broadcast stats: {err}")));
		}
		drop(stats_state);
	}
}