Skip to main content

AirLibrary/Logging/
mod.rs

1//! # Structured Logging Module
2//!
3//! Provides comprehensive structured logging with JSON output, request ID
4//! propagation, context-aware logging, log rotation, sensitive data filtering,
5//! and validation.
6//!
7//! ## Responsibilities
8//!
9//! ### Structured Logging
10//! - JSON output format for machine parsing and analysis
11//! - Request ID and trace ID propagation across log entries
12//! - Context-aware logging with operation tracking
13//! - Log level filtering (TRACE, DEBUG, INFO, WARN, ERROR)
14//!
15//! ### Log Rotation
16//! - Size-based log rotation to prevent disk exhaustion
17//! - Time-based rotation (daily) for archival
18//! - Automatic cleanup of old log files
19//! - Compressed log file storage for space efficiency
20//!
21//! ### Context Management
22//! - Thread-local context storage for async operations
23//! - Automatic context propagation across await points
24//! - Correlation ID linking distributed requests
25//! - User and session tracking
26//!
27//! ### Sensitive Data Handling
28//! - Automatic redaction of sensitive fields
29//! - Configurable sensitive patterns
30//! - Sanitization of error messages
31//! - Audit logging for security events
32//!
33//! ### Log Validation
34//! - Structured log data validation before output
35//! - Schema enforcement for consistent format
36//! - Size limits on log messages
37//! - Malformed log rejection
38//!
39//! ## Integration with Mountain
40//!
41//! Logs flow to Mountain's debugging infrastructure:
42//! - Real-time log streaming to debug console
43//! - Historical log search and filtering
44//! - Error aggregation and alerting
45//! - Performance profiling logs
46//!
47//! ## VSCode Debugging References
48//!
49//! Similar logging patterns used in VSCode for:
50//! - Exception and error tracking
51//! - Debug output for extension development
52//! - Performance profiling traces
53//! - Cross-process communication logging
54//!
55//! Reference:
56//! vs/base/common/errors
57//!
58//! # FUTURE Enhancements
59//!
60//! - [DISTRIBUTED TRACING] Tighter integration with Tracing module
61//! - `ELASTICSEARCH`: Direct log export to Elasticsearch/Logstash
62//! - [LOG ANALYSIS] Automatic anomaly detection in logs
63//! - `KIBANA`: Pre-built Kibana dashboards
64//! - [LOG PARSING] Support for custom log formats
65//! ## Sensitive Data Handling
66//!
67//! All logs are automatically sanitized:
68//! - Passwords, tokens, and secrets are redacted
69//! - User-identifiable information is masked
70//! - API keys and secrets are removed
71//! - Error messages are parsed for sensitive patterns
72
73use std::{
74	collections::HashMap,
75	path::{Path, PathBuf},
76	sync::{Arc, Mutex},
77	time::{SystemTime, UNIX_EPOCH},
78};
79
80use serde::{Deserialize, Serialize};
81use tracing_subscriber::{fmt::format::FmtSpan, prelude::*};
82use tracing_appender::rolling::Rotation;
83
84use crate::{Result, dev_log};
85
86/// Configuration for log rotation and management
87#[derive(Debug, Clone, Serialize, Deserialize)]
88pub struct LogRotationConfig {
89	/// Maximum size of a single log file in bytes before rotation
90	pub MaxFileSizeBytes:u64,
91	/// Maximum number of rotated log files to retain
92	pub MaxFiles:usize,
93	/// Rotation strategy (daily, hourly, never)
94	pub Rotation:LogRotation,
95	/// Whether to compress rotated log files
96	pub Compress:bool,
97	/// Log directory path
98	pub LogDirectory:String,
99	/// Log file name prefix
100	pub LogFilePrefix:String,
101}
102
103/// Log rotation strategies
104#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq)]
105pub enum LogRotation {
106	/// Rotate daily
107	Daily,
108	/// Rotate every hour
109	Hourly,
110	/// Rotate every minute (for debugging)
111	Minutely,
112	/// Never rotate automatically
113	Never,
114}
115
116impl Default for LogRotation {
117	fn default() -> Self { Self::Daily }
118}
119
120impl Default for LogRotationConfig {
121	fn default() -> Self {
122		Self {
123			MaxFileSizeBytes:100 * 1024 * 1024, // 100 MB
124			MaxFiles:30,                        // Keep 30 days of logs
125			Rotation:LogRotation::Daily,
126			Compress:true,
127			LogDirectory:"./Log".to_string(),
128			LogFilePrefix:"Air".to_string(),
129		}
130	}
131}
132
133impl LogRotationConfig {
134	/// Validate log rotation configuration
135	pub fn Validate(&self) -> Result<()> {
136		if self.MaxFileSizeBytes == 0 {
137			return Err("MaxFileSizeBytes must be greater than 0".into());
138		}
139		if self.MaxFileSizeBytes > 10 * 1024 * 1024 * 1024 {
140			// Max 10 GB
141			return Err("MaxFileSizeBytes cannot exceed 10 GB".into());
142		}
143		if self.MaxFiles == 0 {
144			return Err("MaxFiles must be greater than 0".into());
145		}
146		if self.MaxFiles > 365 {
147			// Max 1 year retention
148			return Err("MaxFiles cannot exceed 365".into());
149		}
150		Ok(())
151	}
152
153	/// Convert to tracing_appender Rotation
154	pub fn ToTracingRotation(&self) -> Rotation {
155		match self.Rotation {
156			LogRotation::Daily => Rotation::DAILY,
157			LogRotation::Hourly => Rotation::HOURLY,
158			LogRotation::Minutely => Rotation::NEVER, // No minutely support
159			LogRotation::Never => Rotation::NEVER,
160		}
161	}
162}
163
164/// Sensitive data patterns for redaction
165#[derive(Debug, Clone, Serialize, Deserialize)]
166pub struct SensitiveDataConfig {
167	/// Enable automatic sensitive data redaction
168	pub Enabled:bool,
169	/// Custom patterns to redact (regex)
170	pub CustomPatterns:Vec<String>,
171	/// Standard patterns to include (password, token, secret, etc.)
172	pub IncludeStandardPatterns:bool,
173}
174
175impl Default for SensitiveDataConfig {
176	fn default() -> Self { Self { Enabled:true, CustomPatterns:Vec::new(), IncludeStandardPatterns:true } }
177}
178
179/// Context for structured logging with request IDs and metadata
180#[derive(Debug, Clone, Serialize, Deserialize)]
181pub struct LogContext {
182	pub RequestId:String,
183	pub TraceId:String,
184	pub SpanId:String,
185	pub UserId:Option<String>,
186	pub SessionId:Option<String>,
187	pub Operation:String,
188	pub Metadata:HashMap<String, String>,
189}
190
191impl LogContext {
192	/// Create a new log context
193	pub fn New(Operation:impl Into<String>) -> Self {
194		let RequestId = crate::Utility::GenerateRequestId();
195		let TraceId = crate::Utility::GenerateRequestId();
196		let SpanId = uuid::Uuid::new_v4().to_string();
197
198		Self {
199			RequestId,
200			TraceId,
201			SpanId,
202			UserId:None,
203			SessionId:None,
204			Operation:Operation.into(),
205			Metadata:HashMap::new(),
206		}
207	}
208
209	/// Validate log context for required fields
210	pub fn Validate(&self) -> Result<()> {
211		if self.RequestId.is_empty() {
212			return Err("RequestId cannot be empty".into());
213		}
214		if self.TraceId.is_empty() {
215			return Err("TraceId cannot be empty".into());
216		}
217		if self.Operation.is_empty() {
218			return Err("Operation cannot be empty".into());
219		}
220		Ok(())
221	}
222
223	/// Set user ID in context
224	pub fn WithUserId(mut self, UserId:String) -> Self {
225		self.UserId = Some(UserId);
226		self
227	}
228
229	/// Set session ID in context
230	pub fn WithSessionId(mut self, SessionId:String) -> Self {
231		self.SessionId = Some(SessionId);
232		self
233	}
234
235	/// Add metadata to context
236	pub fn WithMetadata(mut self, Key:String, Value:String) -> Self {
237		self.Metadata.insert(Key, Value);
238		self
239	}
240
241	/// Add multiple metadata entries
242	pub fn WithMetadataMap(mut self, Metadata:HashMap<String, String>) -> Self {
243		self.Metadata.extend(Metadata);
244		self
245	}
246}
247
248thread_local! {
249	static LOG_CONTEXT: std::cell::RefCell<Option<LogContext>> = std::cell::RefCell::new(None);
250}
251
252/// Set the log context for the current thread
253pub fn SetLogContext(Context:LogContext) {
254	if let Err(e) = Context.Validate() {
255		dev_log!("air", "error: [Logging] Invalid log context provided: {:?}", e);
256		return;
257	}
258	LOG_CONTEXT.with(|ctx| {
259		*ctx.borrow_mut() = Some(Context);
260	});
261}
262
263/// Get the current log context
264pub fn GetLogContext() -> Option<LogContext> { LOG_CONTEXT.with(|Context| Context.borrow().clone()) }
265
266/// Clear the log context for the current thread
267pub fn ClearLogContext() {
268	LOG_CONTEXT.with(|Context| {
269		*Context.borrow_mut() = None;
270	});
271}
272
273/// Log file manager for rotation and cleanup
274#[allow(dead_code)]
275pub struct LogManager {
276	Config:LogRotationConfig,
277	CurrentFile:Arc<Mutex<Option<PathBuf>>>,
278	CurrentSize:Arc<Mutex<u64>>,
279}
280
281impl LogManager {
282	#[allow(dead_code)]
283	fn new(Config:LogRotationConfig) -> Result<Self> {
284		Config.Validate()?;
285
286		// Ensure log directory exists
287		std::fs::create_dir_all(&Config.LogDirectory)?;
288
289		Ok(Self {
290			Config,
291			CurrentFile:Arc::new(Mutex::new(None)),
292			CurrentSize:Arc::new(Mutex::new(0)),
293		})
294	}
295
296	/// Check if log rotation is needed
297	#[allow(dead_code)]
298	fn ShouldRotate(&self) -> bool {
299		let size = *self.CurrentSize.lock().unwrap();
300		size >= self.Config.MaxFileSizeBytes
301	}
302
303	/// Perform log rotation
304	#[allow(dead_code)]
305	fn Rotate(&self) -> Result<()> {
306		let CurrentFile = self.CurrentFile.lock().unwrap();
307
308		if let Some(ref FilePath) = *CurrentFile {
309			// Rename current file with timestamp
310			let Timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
311
312			let RotatedPath = format!("{}.{}.log", FilePath.display(), Timestamp);
313
314			std::fs::rename(FilePath, &RotatedPath)?;
315
316			// Compress if enabled
317			if self.Config.Compress {
318				self.CompressFile(&RotatedPath)?;
319			}
320
321			// Cleanup old log files
322			self.CleanupOldLogs()?;
323		}
324
325		*self.CurrentSize.lock().unwrap() = 0;
326
327		Ok(())
328	}
329
330	/// Compress a log file
331	#[allow(dead_code)]
332	fn CompressFile(&self, path:&str) -> crate::Result<()> {
333		// Basic compression - in production would use actual compression
334		let _ = path;
335		Ok(())
336	}
337
338	/// Cleanup old log files
339	#[allow(dead_code)]
340	fn CleanupOldLogs(&self) -> Result<()> {
341		let log_dir = Path::new(&self.Config.LogDirectory);
342
343		if !log_dir.exists() {
344			return Ok(());
345		}
346
347		let mut log_files:Vec<_> = std::fs::read_dir(log_dir)?
348			.filter_map(|e| e.ok())
349			.filter(|e| {
350				e.path()
351					.extension()
352					.and_then(|s| s.to_str())
353					.map(|ext| ext == "log")
354					.unwrap_or(false)
355			})
356			.collect();
357
358		// Sort by modification time (newest first)
359		log_files.sort_by(|a, b| {
360			let a_time = a.metadata().and_then(|m| m.modified()).unwrap_or(UNIX_EPOCH);
361			let b_time = b.metadata().and_then(|m| m.modified()).unwrap_or(UNIX_EPOCH);
362			b_time.cmp(&a_time)
363		});
364
365		// Keep only max_files
366		for file in log_files.into_iter().skip(self.Config.MaxFiles) {
367			let _ = std::fs::remove_file(file.path());
368		}
369
370		Ok(())
371	}
372}
373
374/// Sensitive data filter for log sanitization
375#[derive(Debug, Clone)]
376pub struct SensitiveDataFilter {
377	enabled:bool,
378	patterns:Vec<regex::Regex>,
379}
380
381impl Default for SensitiveDataFilter {
382	fn default() -> Self {
383		let mut patterns = Vec::new();
384
385		// Standard sensitive patterns - simplified to avoid escaping issues
386		patterns.push(regex::Regex::new(r"(?i)password[=[:space:]]+\S+").unwrap());
387		patterns.push(regex::Regex::new(r"(?i)token[=[:space:]]+\S+").unwrap());
388		patterns.push(regex::Regex::new(r"(?i)secret[=[:space:]]+\S+").unwrap());
389		patterns.push(regex::Regex::new(r"(?i)(api|private)[_-]?key[=[:space:]]+\S+").unwrap());
390		patterns.push(regex::Regex::new(r"(?i)authorization[=[:space:]]+Bearer[[:space:]]+\S+").unwrap());
391		patterns.push(regex::Regex::new(r"(?i)credential[=[:space:]]+\S+").unwrap());
392
393		Self { enabled:true, patterns }
394	}
395}
396
397impl SensitiveDataFilter {
398	fn new(Config:SensitiveDataConfig) -> Result<Self> {
399		let mut filter = Self::default();
400		filter.enabled = Config.Enabled;
401
402		if !Config.IncludeStandardPatterns {
403			filter.patterns.clear();
404		}
405
406		// Add custom patterns
407		for pattern in &Config.CustomPatterns {
408			match regex::Regex::new(pattern) {
409				Ok(re) => filter.patterns.push(re),
410				Err(e) => dev_log!("air", "warn: [Logging] Failed to compile custom regex '{}': {}", pattern, e),
411			}
412		}
413
414		Ok(filter)
415	}
416
417	/// Filter sensitive data from a string
418	fn Filter(&self, input:&str) -> String {
419		if !self.enabled {
420			return input.to_string();
421		}
422
423		let mut filtered = input.to_string();
424
425		for pattern in &self.patterns {
426			filtered = pattern.replace_all(&filtered, "[REDACTED]").to_string();
427		}
428
429		filtered
430	}
431}
432
433/// Structured log entry for validation
434#[derive(Debug, Clone, Serialize, Deserialize)]
435pub struct StructuredLogEntry {
436	pub Timestamp:u64,
437	pub Level:String,
438	pub Message:String,
439	pub RequestId:Option<String>,
440	pub TraceId:Option<String>,
441	pub SpanId:Option<String>,
442	pub Operation:Option<String>,
443	pub UserId:Option<String>,
444	pub Metadata:HashMap<String, String>,
445}
446
447impl StructuredLogEntry {
448	/// Validate log entry structure
449	pub fn Validate(&self) -> Result<()> {
450		if self.Level.is_empty() {
451			return Err("log level cannot be empty".into());
452		}
453		if self.Message.is_empty() {
454			return Err("log message cannot be empty".into());
455		}
456		if !["TRACE", "DEBUG", "INFO", "WARN", "ERROR"].contains(&self.Level.as_str()) {
457			return Err(format!("invalid log level: {}", self.Level).into());
458		}
459		if self.Message.len() > 10000 {
460			// Max 10KB message
461			return Err("log message too large".into());
462		}
463		Ok(())
464	}
465}
466
467/// Context-aware logger for structured logging
468#[derive(Debug, Clone)]
469pub struct ContextLogger {
470	json_output:bool,
471	log_file_path:Option<String>,
472	#[allow(dead_code)]
473	rotation_config:Option<LogRotationConfig>,
474	sensitive_filter:Arc<SensitiveDataFilter>,
475	initialized:Arc<Mutex<bool>>,
476}
477
478impl ContextLogger {
479	/// Create a new context logger
480	pub fn New(json_output:bool, log_file_path:Option<String>) -> Self {
481		Self {
482			json_output,
483			log_file_path,
484			rotation_config:None,
485			sensitive_filter:Arc::new(SensitiveDataFilter::default()),
486			initialized:Arc::new(Mutex::new(false)),
487		}
488	}
489
490	/// Create with log rotation configuration
491	pub fn WithRotation(
492		json_output:bool,
493		log_file_path:Option<String>,
494		rotation_config:LogRotationConfig,
495	) -> Result<Self> {
496		rotation_config.Validate()?;
497
498		Ok(Self {
499			json_output,
500			log_file_path,
501			rotation_config:Some(rotation_config),
502			sensitive_filter:Arc::new(SensitiveDataFilter::default()),
503			initialized:Arc::new(Mutex::new(false)),
504		})
505	}
506
507	/// Set sensitive data filter configuration
508	pub fn WithSensitiveFilter(mut self, Config:SensitiveDataConfig) -> Result<Self> {
509		self.sensitive_filter = Arc::new(SensitiveDataFilter::new(Config)?);
510		Ok(self)
511	}
512
513	/// Initialize the logging system with tracing
514	pub fn Initialize(&self) -> Result<()> {
515		// Check if already initialized
516		let mut initialized = self.initialized.lock().unwrap();
517		if *initialized {
518			return Ok(());
519		}
520
521		let filter = tracing_subscriber::EnvFilter::from_default_env()
522			.add_directive(tracing_subscriber::filter::LevelFilter::INFO.into());
523
524		if self.json_output {
525			// JSON output format
526			let fmt_layer = tracing_subscriber::fmt::layer()
527				.json()
528				.with_current_span(true)
529				.with_span_list(false)
530				.with_target(true)
531				.with_file(true)
532				.with_line_number(true)
533				.with_writer(std::io::stderr)
534				.with_ansi(false)
535				.with_span_events(FmtSpan::FULL);
536
537			let registry = tracing_subscriber::registry().with(filter).with(fmt_layer);
538
539			// Set up log file if specified
540			if let Some(ref log_path) = self.log_file_path {
541				let log_dir = std::path::Path::new(log_path).parent().unwrap_or(std::path::Path::new("."));
542				let log_file = std::path::Path::new(log_path)
543					.file_name()
544					.unwrap_or(std::ffi::OsStr::new("Air.log"));
545
546				let file_appender = tracing_appender::rolling::daily(log_dir, log_file);
547				let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
548
549				let file_layer = tracing_subscriber::fmt::layer()
550					.json()
551					.with_current_span(true)
552					.with_span_list(false)
553					.with_target(true)
554					.with_file(true)
555					.with_line_number(true)
556					.with_writer(non_blocking)
557					.with_ansi(false)
558					.with_span_events(FmtSpan::FULL);
559
560				registry.with(file_layer).init();
561			} else {
562				registry.init();
563			}
564		} else {
565			// Standard text output format
566			let fmt_layer = tracing_subscriber::fmt::layer()
567				.with_target(true)
568				.with_file(true)
569				.with_line_number(true)
570				.with_writer(std::io::stderr)
571				.with_ansi(true)
572				.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE);
573
574			let registry = tracing_subscriber::registry().with(filter).with(fmt_layer);
575
576			// Set up log file if specified
577			if let Some(ref log_path) = self.log_file_path {
578				let log_dir = std::path::Path::new(log_path).parent().unwrap_or(std::path::Path::new("."));
579				let log_file = std::path::Path::new(log_path)
580					.file_name()
581					.unwrap_or(std::ffi::OsStr::new("Air.log"));
582
583				let file_appender = tracing_appender::rolling::daily(log_dir, log_file);
584				let (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);
585
586				let file_layer = tracing_subscriber::fmt::layer()
587					.with_target(true)
588					.with_file(true)
589					.with_line_number(true)
590					.with_writer(non_blocking)
591					.with_ansi(false)
592					.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE);
593
594				registry.with(file_layer).init();
595			} else {
596				registry.init();
597			}
598		}
599
600		*initialized = true;
601		dev_log!("air", "[Logging] ContextLogger initialized - JSON output: {}", self.json_output);
602		Ok(())
603	}
604
605	/// Log with context at info level
606	pub fn Info(&self, message:impl Into<String>) {
607		let msg = self.sensitive_filter.Filter(&message.into());
608		if let Some(Context) = GetLogContext() {
609			dev_log!(
610				"air",
611				"[{}] req={} trace={} span={} {}",
612				Context.Operation,
613				Context.RequestId,
614				Context.TraceId,
615				Context.SpanId,
616				msg
617			);
618		} else {
619			dev_log!("air", "{}", msg);
620		}
621	}
622
623	/// Log with context at debug level
624	pub fn Debug(&self, message:impl Into<String>) {
625		let msg = self.sensitive_filter.Filter(&message.into());
626		if let Some(Context) = GetLogContext() {
627			dev_log!(
628				"air",
629				"[{}] req={} trace={} span={} {}",
630				Context.Operation,
631				Context.RequestId,
632				Context.TraceId,
633				Context.SpanId,
634				msg
635			);
636		} else {
637			dev_log!("air", "{}", msg);
638		}
639	}
640
641	/// Log with context at warn level
642	pub fn Warn(&self, message:impl Into<String>) {
643		let msg = self.sensitive_filter.Filter(&message.into());
644		if let Some(Context) = GetLogContext() {
645			dev_log!(
646				"air",
647				"warn: [{}] req={} trace={} span={} {}",
648				Context.Operation,
649				Context.RequestId,
650				Context.TraceId,
651				Context.SpanId,
652				msg
653			);
654		} else {
655			dev_log!("air", "warn: {}", msg);
656		}
657	}
658
659	/// Log with context at error level
660	pub fn Error(&self, message:impl Into<String>) {
661		let msg = self.sensitive_filter.Filter(&message.into());
662		if let Some(Context) = GetLogContext() {
663			dev_log!(
664				"air",
665				"error: [{}] req={} trace={} span={} {}",
666				Context.Operation,
667				Context.RequestId,
668				Context.TraceId,
669				Context.SpanId,
670				msg
671			);
672		} else {
673			dev_log!("air", "error: {}", msg);
674		}
675	}
676}
677
678/// Global context logger instance
679static LOGGER_INSTANCE:std::sync::OnceLock<ContextLogger> = std::sync::OnceLock::new();
680
681/// Get the global context logger
682pub fn GetLogger() -> &'static ContextLogger { LOGGER_INSTANCE.get_or_init(|| ContextLogger::New(false, None)) }
683
684/// Initialize the global context logger
685pub fn InitializeLogger(json_output:bool, log_file_path:Option<String>) -> Result<()> {
686	let logger = ContextLogger::New(json_output, log_file_path);
687	logger.Initialize()?;
688	let _old = LOGGER_INSTANCE.set(logger);
689	Ok(())
690}
691
692/// Initialize the global context logger with rotation
693pub fn InitializeLoggerWithRotation(
694	json_output:bool,
695	log_file_path:Option<String>,
696	rotation_config:LogRotationConfig,
697	sensitive_config:Option<SensitiveDataConfig>,
698) -> Result<()> {
699	let mut logger = ContextLogger::WithRotation(json_output, log_file_path, rotation_config)?;
700
701	if let Some(sensitive_config) = sensitive_config {
702		logger = logger.WithSensitiveFilter(sensitive_config)?;
703	}
704
705	logger.Initialize()?;
706	let _old = LOGGER_INSTANCE.set(logger);
707	Ok(())
708}