Skip to main content

Mountain/Binary/Main/
Entry.rs

1#![allow(unused_imports)]
2
3//! # Entry (Binary/Main)
4//!
5//! ## RESPONSIBILITIES
6//!
7//! Main application entry point that orchestrates the complete application
8//! lifecycle. This function coordinates:
9//! - Tokio runtime creation and management
10//! - CLI argument parsing
11//! - Application state initialization
12//! - Tauri application builder setup
13//! - Service initialization (Vine, Cocoon, Configuration)
14//! - Graceful shutdown handling
15//!
16//! ## ARCHITECTURAL ROLE
17//!
18//! The Entry module is the **primary entry point** in Mountain's architecture:
19//!
20//! ```text
21//! main.rs ──► Binary::Main::Entry::Fn()
22//!                                    │
23//!                                    ▼
24//! AppLifecycle ──► Service Initialization ──► Tauri App Run
25//!                                           │
26//!                                           ▼
27//!                                   Graceful Shutdown
28//! ```
29//!
30//! ## KEY COMPONENTS
31//!
32//! - **Fn()**: Main entry point exported as `Binary::Main::Fn()`
33//! - Tokio runtime management
34//! - Application state initialization via StateBuild
35//! - Tauri builder configuration via TauriBuild
36//! - Service orchestration (Vine, Cocoon, Configuration)
37//! - Event-driven lifecycle management
38//!
39//! ## ERROR HANDLING
40//!
41//! - Panics on fatal errors (Tokio runtime failure, Tauri build failure)
42//! - Logs errors for service initialization failures
43//! - Graceful degradation for non-critical service failures
44//!
45//! ## LOGGING
46//!
47//! Uses the TraceStep! macro for checkpoint logging at TRACE level.
48//! Additional logging at DEBUG, INFO, WARN, and ERROR levels throughout.
49//!
50//! ## PERFORMANCE CONSIDERATIONS
51//!
52//! - Tokio multi-threaded runtime for optimal performance
53//! - Asynchronous service initialization
54//! - Lazy initialization where possible
55//!
56//! ## TODO
57//! - [ ] Add crash recovery mechanism
58//! - [ ] Implement proper error dialog for startup failures
59//! - [ ] Add startup performance metrics
60
61use std::sync::Arc;
62
63use tauri::{App, Manager, RunEvent, Wry};
64use Echo::Scheduler::{Scheduler::Scheduler, SchedulerBuilder::SchedulerBuilder};
65
66use crate::dev_log;
67use crate::{
68	// Crate root imports
69	ApplicationState::ApplicationState,
70	Binary::Build::DnsCommands::init_dns_startup_time,
71	Binary::Build::DnsCommands::{
72		self,
73		dns_get_forward_allowlist,
74		dns_get_health_status,
75		dns_get_server_info,
76		dns_get_zone_info,
77		dns_health_check,
78		dns_resolve,
79		dns_test_resolution,
80	},
81	// Binary submodule imports
82	Binary::Build::LocalhostPlugin::LocalhostPlugin as LocalhostPluginFn,
83	Binary::Build::LoggingPlugin::LoggingPlugin as LoggingPluginFn,
84	Binary::Build::Scheme::{self, DnsPort, init_service_registry, land_scheme_handler, register_land_service},
85	Binary::Build::ServiceRegistry::ServiceRegistry as ServiceRegistryFn,
86	Binary::Build::TauriBuild::TauriBuild as TauriBuildFn,
87	Binary::Build::WindowBuild::WindowBuild as WindowBuildFn,
88	Binary::Extension::ExtensionPopulate::ExtensionPopulate as ExtensionPopulateFn,
89	Binary::Extension::ScanPathConfigure::ScanPathConfigure as ScanPathConfigureFn,
90	Binary::Initialize::CliParse::Parse as CliParseFn,
91	Binary::Initialize::LogLevel::Resolve as ResolveLogLevel,
92	Binary::Initialize::PortSelector::BuildUrl as BuildPortUrl,
93	Binary::Initialize::PortSelector::Select as SelectPort,
94	Binary::Initialize::StateBuild::Build as BuildStateFn,
95	Binary::Register::AdvancedFeaturesRegister::AdvancedFeaturesRegister as AdvancedFeaturesRegisterFn,
96	Binary::Register::CommandRegister::CommandRegister as CommandRegisterFn,
97	Binary::Register::IPCServerRegister::IPCServerRegister as IPCServerRegisterFn,
98	Binary::Register::StatusReporterRegister::StatusReporterRegister as StatusReporterRegisterFn,
99	Binary::Register::WindSyncRegister::WindSyncRegister as WindSyncRegisterFn,
100	Binary::Service::CocoonStart::CocoonStart as CocoonStartFn,
101	Binary::Service::ConfigurationInitialize::ConfigurationInitialize as ConfigurationInitializeFn,
102	Binary::Service::VineStart::VineStart as VineStartFn,
103	Binary::Shutdown::RuntimeShutdown::RuntimeShutdown as RuntimeShutdownFn,
104	Binary::Shutdown::SchedulerShutdown::SchedulerShutdown as SchedulerShutdownFn,
105	Command,
106	Environment::MountainEnvironment::MountainEnvironment,
107	ProcessManagement::InitializationData,
108	RunTime::ApplicationRunTime::ApplicationRunTime,
109	Track,
110};
111use super::AppLifecycle::AppLifecycleSetup;
112
113// Note: Tauri commands are used with fully qualified paths in generate_handler
114// because the __cmd_* macros generated by #[tauri::command] are module-local.
115
116/// Logs a checkpoint message at TRACE level.
117macro_rules! TraceStep {
118	($($arg:tt)*) => {{
119		dev_log!("lifecycle", $($arg)*);
120	}};
121}
122
123/// The main function that orchestrates the application lifecycle.
124///
125/// This function:
126/// 1. Creates a Tokio runtime
127/// 2. Parses CLI arguments
128/// 3. Builds application state
129/// 4. Creates a scheduler
130/// 5. Selects a port for the local server
131/// 6. Resolves the log level
132/// 7. Sets up the Tauri builder
133/// 8. Configures the application lifecycle
134/// 9. Runs the Tauri application
135/// 10. Handles graceful shutdown
136pub fn Fn() {
137	// -------------------------------------------------------------------------
138	// [Boot] [Runtime] Tokio runtime creation
139	// -------------------------------------------------------------------------
140	TraceStep!("[Boot] [Runtime] Building Tokio runtime...");
141
142	let Runtime = tokio::runtime::Builder::new_multi_thread()
143		.enable_all()
144		.build()
145		.expect("FATAL: Cannot build Tokio runtime.");
146
147	TraceStep!("[Boot] [Runtime] Tokio runtime built.");
148
149	Runtime.block_on(async {
150		// ---------------------------------------------------------------------
151		// [Boot] [Args] CLI parsing (using CliParse module)
152		// ---------------------------------------------------------------------
153		let _WorkspaceConfigurationPath = CliParseFn();
154		let _InitialFolders:Vec<String> = vec![];
155
156		// ---------------------------------------------------------------------
157		// [Boot] [State] ApplicationState (using StateBuild module)
158		// ---------------------------------------------------------------------
159		dev_log!("lifecycle", "[Boot] [State] Building ApplicationState...");
160
161		// Create application state directly (StateBuild::Build with default config)
162		let AppState = ApplicationState::default();
163
164		dev_log!(
165			"lifecycle",
166			"[Boot] [State] ApplicationState created with {} workspace folders.",
167			AppState.Workspace.WorkspaceFolders.lock().map(|f| f.len()).unwrap_or(0)
168		);
169
170		// Create Arc for application state to be managed by Tauri
171		let AppStateArcForClosure = Arc::new(AppState.clone());
172
173		// ---------------------------------------------------------------------
174		// [Boot] [Runtime] Scheduler handles (using RuntimeBuild module)
175		// ---------------------------------------------------------------------
176		let Scheduler = Arc::new(SchedulerBuilder::Create().Build());
177		let SchedulerForClosure = Scheduler.clone();
178		TraceStep!("[Boot] [Echo] Scheduler handles prepared.");
179
180		// ---------------------------------------------------------------------
181		// [Boot] [Localhost] Port selection (using PortSelector module)
182		// ---------------------------------------------------------------------
183		let ServerPort = SelectPort();
184		let LocalhostUrl = BuildPortUrl(ServerPort);
185
186		// ---------------------------------------------------------------------
187		// [Boot] [Logging] Log level resolution (using LogLevel module)
188		// ---------------------------------------------------------------------
189		let log_level = ResolveLogLevel();
190
191		// ---------------------------------------------------------------------
192		// [Boot] [Tauri] Builder setup (using TauriBuild module)
193		// ---------------------------------------------------------------------
194		let Builder = TauriBuildFn();
195
196		Builder
197			.plugin(LoggingPluginFn(log_level))
198			.plugin(LocalhostPluginFn(ServerPort))
199			.manage(AppStateArcForClosure.clone())
200			.setup({
201				let LocalhostUrl = LocalhostUrl.clone();
202				let ServerPortForClosure = ServerPort;
203				move |app:&mut App| {
204					dev_log!("lifecycle", "[Lifecycle] [Setup] Setup hook started.");
205					dev_log!("lifecycle", "[Lifecycle] [Setup] LocalhostUrl={}", LocalhostUrl);
206
207					// ---------------------------------------------------------
208					// [Service Registry] Initialize service registry for land:// routing
209					// ---------------------------------------------------------
210					dev_log!(
211						"lifecycle",
212						"[Lifecycle] [Setup] Initializing ServiceRegistry for land:// scheme..."
213					);
214					let service_registry = ServiceRegistryFn::new();
215					init_service_registry(service_registry.clone());
216
217					// ---------------------------------------------------------
218					// [Service Registry] Register local HTTP services
219					// ---------------------------------------------------------
220					// Register the main code editor service
221					dev_log!(
222						"lifecycle",
223						"[Lifecycle] [Setup] Registering code.editor.land service on port {}",
224						ServerPortForClosure
225					);
226					register_land_service("code.editor.land", ServerPortForClosure);
227
228					// Register API editor service (same port for now, can be separate later)
229					register_land_service("api.editor.land", ServerPortForClosure);
230
231					// Register assets editor service (same port for now, can be separate later)
232					register_land_service("assets.editor.land", ServerPortForClosure);
233
234					// Make the registry available as managed state for Tauri commands
235					app.manage(service_registry);
236					dev_log!(
237						"lifecycle",
238						"[Lifecycle] [Setup] ServiceRegistry initialized and services registered."
239					);
240
241					// ---------------------------------------------------------
242					// [DNS Server] Start the Hickory DNS server
243					// ---------------------------------------------------------
244					// The DNS server must start BEFORE any webview loads to ensure
245					// that land:// protocol_resolution is available
246					dev_log!("lifecycle", "[Lifecycle] [Setup] Starting DNS server on preferred port 5380...");
247					let dns_port = Mist::start(5380).unwrap_or_else(|e| {
248						dev_log!(
249							"lifecycle",
250							"warn: [Lifecycle] [Setup] Failed to start DNS server on port 5380: {}",
251							e
252						);
253						// Fallback to random port if preferred port fails
254						Mist::start(0).unwrap_or_else(|e| {
255							dev_log!(
256								"lifecycle",
257								"error: [Lifecycle] [Setup] Completely failed to start DNS server: {}",
258								e
259							);
260							0 // Return 0 as error indicator
261						})
262					});
263
264					if dns_port == 0 {
265						dev_log!(
266							"lifecycle",
267							"warn: [Lifecycle] [Setup] DNS server failed to start, land:// protocol will not be \
268							 available"
269						);
270					} else {
271						dev_log!(
272							"lifecycle",
273							"[Lifecycle] [Setup] DNS server started successfully on port {}",
274							dns_port
275						);
276						// Initialize DNS startup time for tracking
277						crate::Binary::Build::DnsCommands::init_dns_startup_time();
278					}
279
280					// Register DnsPort as managed state for Tauri commands
281					app.manage(DnsPort(dns_port));
282
283					let AppHandle = app.handle().clone();
284					TraceStep!("[Lifecycle] [Setup] AppHandle acquired.");
285
286					// ---------------------------------------------------------
287					// Setup application lifecycle through AppLifecycle module
288					// ---------------------------------------------------------
289					let AppStateArcFromClosure = AppStateArcForClosure.clone();
290
291					if let Err(e) = AppLifecycleSetup(
292						app,
293						AppHandle.clone(),
294						LocalhostUrl.clone(),
295						SchedulerForClosure.clone(),
296						AppStateArcFromClosure,
297					) {
298						dev_log!("lifecycle", "error: [Lifecycle] [Setup] Failed to setup lifecycle: {}", e);
299					}
300
301					Ok(())
302				}
303			})
304			.register_asynchronous_uri_scheme_protocol("land", |_ctx, request, responder| {
305				// Implemented: delegate to synchronous scheme handler
306				let response = crate::Binary::Build::Scheme::land_scheme_handler(&request);
307				responder.respond(response);
308			})
309			.register_asynchronous_uri_scheme_protocol("vscode-file", |ctx, request, responder| {
310				// VS Code Electron workbench uses vscode-file:// to load assets.
311				// Maps to embedded frontend assets from Sky/Target.
312				let AppHandle = ctx.app_handle().clone();
313				std::thread::spawn(move || {
314					let response = crate::Binary::Build::Scheme::VscodeFileSchemeHandler(&AppHandle, &request);
315					responder.respond(response);
316				});
317			})
318			.plugin(tauri_plugin_dialog::init())
319			.plugin(tauri_plugin_fs::init())
320			.invoke_handler(tauri::generate_handler![
321				crate::Binary::Tray::SwitchTrayIcon::SwitchTrayIcon,
322				crate::Binary::IPC::WorkbenchConfigurationCommand::MountainGetWorkbenchConfiguration,
323				Command::TreeView::GetTreeViewChildren,
324				Command::LanguageFeature::MountainProvideHover,
325				Command::LanguageFeature::MountainProvideCompletions,
326				Command::LanguageFeature::MountainProvideDefinition,
327				Command::LanguageFeature::MountainProvideReferences,
328				Command::SourceControlManagement::GetAllSourceControlManagementState,
329				Command::Keybinding::GetResolvedKeybinding,
330				Track::FrontendCommand::DispatchFrontendCommand,
331				Track::UIRequest::ResolveUIRequest,
332				Track::Webview::MountainWebviewPostMessageFromGuest,
333				crate::Binary::IPC::MessageReceiveCommand::MountainIPCReceiveMessage,
334				crate::Binary::IPC::StatusGetCommand::MountainIPCGetStatus,
335				crate::Binary::IPC::InvokeCommand::MountainIPCInvoke,
336				crate::Binary::IPC::WindConfigurationCommand::MountainGetWindDesktopConfiguration,
337				crate::Binary::IPC::ConfigurationUpdateCommand::MountainUpdateConfigurationFromWind,
338				crate::Binary::IPC::ConfigurationSyncCommand::MountainSynchronizeConfiguration,
339				crate::Binary::IPC::ConfigurationStatusCommand::MountainGetConfigurationStatus,
340				crate::Binary::IPC::IPCStatusCommand::MountainGetIPCStatus,
341				crate::Binary::IPC::IPCStatusHistoryCommand::MountainGetIPCStatusHistory,
342				crate::Binary::IPC::IPCStatusReportingStartCommand::MountainStartIPCStatusReporting,
343				crate::Binary::IPC::PerformanceStatsCommand::MountainGetPerformanceStats,
344				crate::Binary::IPC::CacheStatsCommand::MountainGetCacheStats,
345				crate::Binary::IPC::CollaborationSessionCommand::MountainCreateCollaborationSession,
346				crate::Binary::IPC::CollaborationSessionCommand::MountainGetCollaborationSessions,
347				crate::Binary::IPC::DocumentSyncCommand::MountainAddDocumentForSync,
348				crate::Binary::IPC::DocumentSyncCommand::MountainGetSyncStatus,
349				crate::Binary::IPC::UpdateSubscriptionCommand::MountainSubscribeToUpdates,
350				crate::Binary::IPC::ConfigurationDataCommand::GetConfigurationData,
351				crate::Binary::IPC::ConfigurationDataCommand::SaveConfigurationData,
352				crate::Binary::Build::DnsCommands::dns_get_server_info,
353				crate::Binary::Build::DnsCommands::dns_get_zone_info,
354				crate::Binary::Build::DnsCommands::dns_get_forward_allowlist,
355				crate::Binary::Build::DnsCommands::dns_get_health_status,
356				crate::Binary::Build::DnsCommands::dns_resolve,
357				crate::Binary::Build::DnsCommands::dns_test_resolution,
358				crate::Binary::Build::DnsCommands::dns_health_check,
359				// Process commands (direct Tauri invoke from ProcessPolyfill)
360				crate::Binary::IPC::ProcessCommand::process_get_exec_path,
361				crate::Binary::IPC::ProcessCommand::process_get_platform,
362				crate::Binary::IPC::ProcessCommand::process_get_arch,
363				crate::Binary::IPC::ProcessCommand::process_get_pid,
364				crate::Binary::IPC::ProcessCommand::process_get_shell_env,
365				crate::Binary::IPC::ProcessCommand::process_get_memory_info,
366				// Health check commands (direct Tauri invoke from SharedProcessProxy)
367				crate::Binary::IPC::HealthCommand::cocoon_extension_host_health,
368				crate::Binary::IPC::HealthCommand::cocoon_search_service_health,
369				crate::Binary::IPC::HealthCommand::cocoon_debug_service_health,
370				crate::Binary::IPC::HealthCommand::shared_process_service_health,
371			])
372			.build(tauri::generate_context!())
373			.expect("FATAL: Error while building Mountain Tauri application")
374			.run(move |app_handle:&tauri::AppHandle, event:tauri::RunEvent| {
375				// Debug-only: log selected lifecycle events
376				if cfg!(debug_assertions) {
377					match &event {
378						RunEvent::MainEventsCleared => {},
379						RunEvent::WindowEvent { .. } => {},
380						_ => dev_log!("lifecycle", "[Lifecycle] [RunEvent] {:?}", event),
381					}
382				}
383
384				if let RunEvent::ExitRequested { api, .. } = event {
385					dev_log!(
386						"lifecycle",
387						"warn: [Lifecycle] [Shutdown] Exit requested. Starting graceful shutdown..."
388					);
389					api.prevent_exit();
390
391					let SchedulerHandle = Scheduler.clone();
392					let app_handle_clone = app_handle.clone();
393
394					tokio::spawn(async move {
395						dev_log!("lifecycle", "[Lifecycle] [Shutdown] Shutting down ApplicationRunTime...");
396						let _ = RuntimeShutdownFn(&app_handle_clone).await;
397
398						dev_log!("lifecycle", "[Lifecycle] [Shutdown] Stopping Echo scheduler...");
399						let _ = SchedulerShutdownFn(SchedulerHandle).await;
400
401						dev_log!("lifecycle", "[Lifecycle] [Shutdown] Done. Exiting process.");
402						app_handle_clone.exit(0);
403					});
404				}
405			});
406
407		dev_log!("lifecycle", "[Lifecycle] [Exit] Mountain application has shut down.");
408	});
409}