1pub mod auth;
2pub mod base;
3pub mod config;
4pub mod constants;
5pub mod entity;
6pub mod error;
7pub mod http;
8pub mod logging;
9pub mod server;
10pub mod state;
11pub mod utils;
12
13#[cfg(any(feature = "mysql", feature = "postgres", feature = "sqlite"))]
14pub mod database;
15
16pub mod cache;
17
18#[cfg(feature = "balance")]
19pub mod balance;
20
21#[cfg(feature = "nacos")]
22pub mod nacos;
23
24#[cfg(feature = "kafka")]
25pub mod messaging;
26
27pub use base::{CursorPageBaseResp, R};
28pub use config::Config;
29#[cfg(feature = "kafka")]
30pub use config::{
31 KafkaConfig, KafkaConsumerConfig as ConfigKafkaConsumerConfig,
32 KafkaProducerConfig as ConfigKafkaProducerConfig,
33};
34pub use constants::{
35 CREATE_BY, CREATE_BY_FIELD, CREATE_ORG_ID_FIELD, CREATE_TIME, CREATE_TIME_FIELD, DELETE_FIELD,
36 ID_FIELD, LABEL, PARENT_ID, PARENT_ID_FIELD, SORT_VALUE, SORT_VALUE_FIELD, TENANT_ID,
37 UPDATE_BY, UPDATE_BY_FIELD, UPDATE_TIME, UPDATE_TIME_FIELD,
38};
39
40pub use auth::{RequestContext, user_context_middleware, Claims, JwtService};
41pub use entity::*;
42pub use error::{AppError, AppResult};
43pub use http::{
44 create_cors_layer, request_logging_middleware, grpc_log_request, grpc_log_response,
45 health_check, root,
46};
47#[cfg(feature = "kafka")]
48pub use messaging::{
49 kafka::{KafkaConsumer, KafkaProducer},
50 Message, MessageConsumer, MessageConsumerType, MessageProducer, MessageProducerType,
51};
52#[cfg(feature = "consumer")]
53pub use messaging::{KafkaMessageHandler, KafkaMessageRouter};
54#[cfg(feature = "nacos")]
55pub use nacos::{
56 deregister_service, get_config, get_config_client, get_naming_client, get_service_instances,
57 get_subscribed_configs, get_subscribed_services, init_nacos, register_service,
58 subscribe_configs, subscribe_services,
59};
60pub use server::{Server, ServerBuilder};
61pub use state::AppState;
62pub use utils::get_uid_from_headers;
63
64pub(crate) fn init_logging(config: &Config) -> Result<(), anyhow::Error> {
71 use time::UtcOffset;
72 use tracing_subscriber::{
73 fmt::time::OffsetTime, layer::SubscriberExt, util::SubscriberInitExt,
74 };
75
76 let tz_hour = config.log.timezone;
78 let offset = UtcOffset::from_hms(tz_hour as i8, 0, 0)
79 .map_err(|e| {
80 if tz_hour >= 0 {
81 anyhow::anyhow!("无效的时区偏移 UTC+{}: {}", tz_hour, e)
82 } else {
83 anyhow::anyhow!("无效的时区偏移 UTC{}: {}", tz_hour, e)
84 }
85 })?;
86
87 eprintln!(
88 "ℹ 日志时区设置: UTC{}",
89 if tz_hour >= 0 {
90 format!("+{}", tz_hour)
91 } else {
92 tz_hour.to_string()
93 }
94 );
95
96 let timer = OffsetTime::new(
97 offset,
98 time::format_description::parse(
99 "[year]-[month]-[day] [hour]:[minute]:[second].[subsecond digits:3]",
100 )
101 .map_err(|e| anyhow::anyhow!("时间格式解析失败: {}", e))?,
102 );
103
104 let env_filter = tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| {
106 config.log.level.as_str().into()
107 });
108
109 let stdout_json = if config.log.json {
111 Some(
112 tracing_subscriber::fmt::layer()
113 .json()
114 .with_timer(timer.clone())
115 .with_line_number(true),
116 )
117 } else {
118 None
119 };
120 let stdout_plain = if !config.log.json {
121 Some(
122 tracing_subscriber::fmt::layer()
123 .with_timer(timer.clone())
124 .with_line_number(true),
125 )
126 } else {
127 None
128 };
129
130 let (file_json, file_plain) = if let Some(ref file_config) = config.log.file {
132 std::fs::create_dir_all(&file_config.directory)
134 .map_err(|e| anyhow::anyhow!("无法创建日志目录 {}: {}", file_config.directory, e))?;
135
136 if file_config.count_limit > 0 {
138 crate::logging::cleanup_old_logs(
139 &file_config.directory,
140 &file_config.filename,
141 file_config.count_limit,
142 ).ok();
143 }
144
145 let file_appender = match file_config.rotation.as_str() {
147 "size" => {
148 eprintln!(
149 "ℹ 日志配置: 按大小滚动 (限制: {}MB, 保留: {} 个文件)",
150 if file_config.size_limit_mb == 0 {
151 "无限制".to_string()
152 } else {
153 file_config.size_limit_mb.to_string()
154 },
155 if file_config.count_limit == 0 {
156 "无限制".to_string()
157 } else {
158 file_config.count_limit.to_string()
159 }
160 );
161 tracing_appender::rolling::daily(
162 &file_config.directory,
163 &file_config.filename,
164 )
165 }
166 _ => {
167 tracing_appender::rolling::daily(
168 &file_config.directory,
169 &file_config.filename,
170 )
171 }
172 };
173
174 let is_file_json = file_config.format.as_str() == "json";
175 let fj = if is_file_json {
176 Some(
177 tracing_subscriber::fmt::layer()
178 .json()
179 .with_writer(file_appender)
180 .with_timer(timer.clone())
181 .with_line_number(true)
182 .with_ansi(false),
183 )
184 } else {
185 None
186 };
187 let fp = if !is_file_json {
189 let file_appender2 = tracing_appender::rolling::daily(
190 &file_config.directory,
191 &file_config.filename,
192 );
193 Some(
194 tracing_subscriber::fmt::layer()
195 .with_writer(file_appender2)
196 .with_timer(timer)
197 .with_line_number(true)
198 .with_ansi(false),
199 )
200 } else {
201 None
202 };
203 (fj, fp)
204 } else {
205 (None, None)
206 };
207
208 tracing_subscriber::registry()
210 .with(env_filter)
211 .with(stdout_json)
212 .with(stdout_plain)
213 .with(file_json)
214 .with(file_plain)
215 .try_init()
216 .map_err(|e| anyhow::anyhow!("日志系统初始化失败: {}", e))?;
217
218 Ok(())
219}
220
221
222#[cfg(feature = "balance")]
223pub use balance::{
224 create_grpc_channel, create_grpc_client, get_load_balancer, get_service_endpoints,
225 grpc_call, get_instance_circuit_breaker, get_existing_instance_circuit_breaker,
226 CircuitBreaker, CircuitBreakerConfig, CircuitState, GrpcClientBuilder,
227 HealthCheckConfig, HealthStatus, LoadBalancer, ResilientGrpcClient, RetryConfig,
228 RoundRobinLoadBalancer, ServiceEndpoint, start_health_checker,
229};
230