/// Test ONNX model loading with NPU/DirectML use activity_tracker::ai::NpuDevice; use std::path::PathBuf; #[test] fn test_onnx_model_exists() { let model_path = PathBuf::from("models/distilbert-base.onnx"); if model_path.exists() { println!("✅ Model file found: {}", model_path.display()); let metadata = std::fs::metadata(&model_path).unwrap(); println!(" Size: {} MB", metadata.len() / 1_000_000); } else { println!("⚠️ Model not found. Download it first:"); println!(" cargo run --release -- models download distilbert"); } } #[test] fn test_npu_session_with_onnx() { let npu = NpuDevice::detect(); let model_path = PathBuf::from("models/distilbert-base.onnx"); println!("\n=== ONNX Model Loading Test ==="); println!("NPU Device: {}", npu.device_name()); println!("NPU Available: {}", npu.is_available()); #[cfg(windows)] { assert!(npu.is_available(), "NPU should be available on Intel Core Ultra"); if model_path.exists() { println!("\n📦 Model: {}", model_path.display()); match npu.create_session(model_path.to_str().unwrap()) { Ok(session) => { println!("✅ ONNX session created successfully with DirectML!"); println!(" Inputs: {:?}", session.inputs.len()); println!(" Outputs: {:?}", session.outputs.len()); // Print input details for (i, input) in session.inputs.iter().enumerate() { println!(" Input {}: {}", i, input.name); } // Print output details for (i, output) in session.outputs.iter().enumerate() { println!(" Output {}: {}", i, output.name); } } Err(e) => { println!("❌ Failed to create session: {}", e); panic!("Session creation failed"); } } } else { println!("⚠️ Skipping test - model not downloaded"); println!(" Run: cargo run --release -- models download distilbert"); } } #[cfg(not(windows))] { println!("⚠️ NPU/DirectML only available on Windows"); } } #[test] fn test_npu_performance_info() { let npu = NpuDevice::detect(); println!("\n=== NPU Performance Information ==="); println!("Device: {}", npu.device_name()); println!("Status: {}", if npu.is_available() { "Ready" } else { "Not Available" }); #[cfg(windows)] { println!("\nDirectML Configuration:"); println!(" • Execution Provider: DirectML"); println!(" • Hardware: Intel AI Boost NPU"); println!(" • API: Windows Machine Learning"); println!(" • Quantization: INT4/FP16 support"); println!(" • Expected speedup: 10-30x vs CPU"); } println!("\n✅ NPU info test complete"); }