diff --git a/.gitignore b/.gitignore index a2f0e43..55a621f 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ .env apikey.txt .noentropy_cache.json +.HACKATHON_REVIEW.md diff --git a/src/cache_tests.rs b/src/cache_tests.rs deleted file mode 100644 index f9a8673..0000000 --- a/src/cache_tests.rs +++ /dev/null @@ -1,202 +0,0 @@ -use crate::cache::*; -use crate::files::FileCategory; -use std::fs::File; -use std::io::Write; - -#[test] -fn test_cache_new() { - let cache = Cache::new(); - assert_eq!(cache.max_entries, 1000); - assert_eq!(cache.entries.len(), 0); -} - -#[test] -fn test_cache_with_max_entries() { - let cache = Cache::with_max_entries(5); - assert_eq!(cache.max_entries, 5); -} - -#[test] -fn test_cache_default() { - let cache = Cache::default(); - assert_eq!(cache.max_entries, 1000); -} - -#[test] -fn test_cache_response_and_retrieve() { - let temp_dir = tempfile::tempdir().unwrap(); - let base_path = temp_dir.path(); - - let mut cache = Cache::new(); - let filenames = vec!["file1.txt".to_string(), "file2.txt".to_string()]; - - for filename in &filenames { - let file_path = base_path.join(filename); - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"test content").unwrap(); - } - - let response = OrganizationPlan { - files: vec![FileCategory { - filename: "file1.txt".to_string(), - category: "Documents".to_string(), - sub_category: "Text".to_string(), - }], - }; - - cache.cache_response(&filenames, response.clone(), base_path); - - let cached = cache.get_cached_response(&filenames, base_path); - assert!(cached.is_some()); - assert_eq!(cached.unwrap().files[0].category, "Documents"); -} - -#[test] -fn test_cache_response_file_changed() { - let temp_dir = tempfile::tempdir().unwrap(); - let base_path = temp_dir.path(); - - let mut cache = Cache::new(); - let filenames = vec!["file1.txt".to_string()]; - - let file_path = base_path.join("file1.txt"); - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"original content").unwrap(); - - let response = OrganizationPlan { - files: vec![FileCategory { - filename: "file1.txt".to_string(), - category: "Documents".to_string(), - sub_category: "Text".to_string(), - }], - }; - - cache.cache_response(&filenames, response.clone(), base_path); - - std::thread::sleep(std::time::Duration::from_millis(100)); - - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"modified content longer than original") - .unwrap(); - - let cached = cache.get_cached_response(&filenames, base_path); - assert!(cached.is_none()); -} - -#[test] -fn test_cache_save_and_load() { - let temp_dir = tempfile::tempdir().unwrap(); - let cache_path = temp_dir.path().join("cache.json"); - let base_path = temp_dir.path(); - - let mut cache = Cache::new(); - let filenames = vec!["file1.txt".to_string()]; - - let file_path = base_path.join("file1.txt"); - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"test").unwrap(); - - let response = OrganizationPlan { - files: vec![FileCategory { - filename: "file1.txt".to_string(), - category: "Documents".to_string(), - sub_category: "Text".to_string(), - }], - }; - - cache.cache_response(&filenames, response, base_path); - cache.save(&cache_path).unwrap(); - - let loaded_cache = Cache::load_or_create(&cache_path); - assert_eq!(loaded_cache.entries.len(), 1); -} - -#[test] -fn test_cache_cleanup_old_entries() { - let temp_dir = tempfile::tempdir().unwrap(); - let base_path = temp_dir.path(); - - let mut cache = Cache::new(); - let filenames = vec!["file1.txt".to_string()]; - - let file_path = base_path.join("file1.txt"); - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"test").unwrap(); - - let response = OrganizationPlan { - files: vec![FileCategory { - filename: "file1.txt".to_string(), - category: "Documents".to_string(), - sub_category: "Text".to_string(), - }], - }; - - cache.cache_response(&filenames, response, base_path); - - cache.cleanup_old_entries(0); - assert_eq!(cache.entries.len(), 0); -} - -#[test] -fn test_cache_max_entries_eviction() { - let temp_dir = tempfile::tempdir().unwrap(); - let base_path = temp_dir.path(); - - let mut cache = Cache::with_max_entries(2); - - for i in 1..=3 { - let filename = format!("file{}.txt", i); - let file_path = base_path.join(&filename); - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"test").unwrap(); - - let response = OrganizationPlan { - files: vec![FileCategory { - filename: filename.clone(), - category: "Documents".to_string(), - sub_category: "Text".to_string(), - }], - }; - - cache.cache_response(&vec![filename], response, base_path); - } - - assert_eq!(cache.entries.len(), 2); -} - -#[test] -fn test_cache_serialization() { - let cache = Cache::new(); - let json = serde_json::to_string(&cache).unwrap(); - let deserialized: Cache = serde_json::from_str(&json).unwrap(); - assert_eq!(cache.max_entries, deserialized.max_entries); -} - -#[test] -fn test_file_metadata_equality() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("test.txt"); - - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"test content").unwrap(); - - let metadata1 = Cache::get_file_metadata(&file_path).unwrap(); - let metadata2 = Cache::get_file_metadata(&file_path).unwrap(); - - assert_eq!(metadata1, metadata2); -} - -#[test] -fn test_cache_key_generation() { - let cache = Cache::new(); - let filenames1 = vec!["a.txt".to_string(), "b.txt".to_string()]; - let filenames2 = vec!["b.txt".to_string(), "a.txt".to_string()]; - let filenames3 = vec!["c.txt".to_string()]; - - let key1 = cache.generate_cache_key(&filenames1); - let key2 = cache.generate_cache_key(&filenames2); - let key3 = cache.generate_cache_key(&filenames3); - - assert_eq!(key1, key2); - assert_ne!(key1, key3); -} diff --git a/src/cli/args.rs b/src/cli/args.rs new file mode 100644 index 0000000..c5ad0e2 --- /dev/null +++ b/src/cli/args.rs @@ -0,0 +1,22 @@ +use clap::Parser; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct Args { + #[arg(short, long, help = "Preview changes without moving files")] + pub dry_run: bool, + + #[arg( + short, + long, + default_value_t = 5, + help = "Maximum concurrent API requests" + )] + pub max_concurrent: usize, + + #[arg(long, help = "Recursively searches files in subdirectory")] + pub recursive: bool, + + #[arg(long, help = "Undo the last file organization")] + pub undo: bool, +} diff --git a/src/cli/mod.rs b/src/cli/mod.rs new file mode 100644 index 0000000..07d9f6b --- /dev/null +++ b/src/cli/mod.rs @@ -0,0 +1,5 @@ +pub mod args; +pub mod orchestrator; + +pub use args::Args; +pub use orchestrator::{handle_gemini_error, handle_organization, handle_undo}; diff --git a/src/cli/orchestrator.rs b/src/cli/orchestrator.rs new file mode 100644 index 0000000..adb5b8e --- /dev/null +++ b/src/cli/orchestrator.rs @@ -0,0 +1,222 @@ +use colored::*; +use futures::future::join_all; +use crate::cli::Args; +use crate::settings::Config; +use crate::files::{execute_move, FileBatch, is_text_file, read_file_sample}; +use crate::gemini::GeminiClient; +use crate::models::OrganizationPlan; +use crate::storage::{Cache, UndoLog}; +use std::path::PathBuf; +use std::sync::Arc; + +pub fn handle_gemini_error(error: crate::gemini::GeminiError) { + use colored::*; + + match error { + crate::gemini::GeminiError::RateLimitExceeded { retry_after } => { + println!( + "{} API rate limit exceeded. Please wait {} seconds before trying again.", + "ERROR:".red(), + retry_after + ); + } + crate::gemini::GeminiError::QuotaExceeded { limit } => { + println!( + "{} Quota exceeded: {}. Please check your Gemini API usage.", + "ERROR:".red(), + limit + ); + } + crate::gemini::GeminiError::ModelNotFound { model } => { + println!( + "{} Model '{}' not found. Please check the model name in the configuration.", + "ERROR:".red(), + model + ); + } + crate::gemini::GeminiError::InvalidApiKey => { + println!( + "{} Invalid API key. Please check your GEMINI_API_KEY environment variable.", + "ERROR:".red() + ); + } + crate::gemini::GeminiError::ContentPolicyViolation { reason } => { + println!("{} Content policy violation: {}", "ERROR:".red(), reason); + } + crate::gemini::GeminiError::ServiceUnavailable { reason } => { + println!( + "{} Gemini service is temporarily unavailable: {}", + "ERROR:".red(), + reason + ); + } + crate::gemini::GeminiError::NetworkError(e) => { + println!("{} Network error: {}", "ERROR:".red(), e); + } + crate::gemini::GeminiError::Timeout { seconds } => { + println!( + "{} Request timed out after {} seconds.", + "ERROR:".red(), + seconds + ); + } + crate::gemini::GeminiError::InvalidRequest { details } => { + println!("{} Invalid request: {}", "ERROR:".red(), details); + } + crate::gemini::GeminiError::ApiError { status, message } => { + println!( + "{} API error (HTTP {}): {}", + "ERROR:".red(), + status, + message + ); + } + crate::gemini::GeminiError::InvalidResponse(msg) => { + println!("{} Invalid response from Gemini: {}", "ERROR:".red(), msg); + } + crate::gemini::GeminiError::InternalError { details } => { + println!("{} Internal server error: {}", "ERROR:".red(), details); + } + crate::gemini::GeminiError::SerializationError(e) => { + println!("{} JSON serialization error: {}", "ERROR:".red(), e); + } + } + + println!("\n{} Check the following:", "HINT:".yellow()); + println!(" • Your GEMINI_API_KEY is correctly set"); + println!(" • Your internet connection is working"); + println!(" • Gemini API service is available"); + println!(" • You haven't exceeded your API quota"); +} + +pub async fn handle_organization( + args: Args, + api_key: String, + download_path: PathBuf, +) -> Result<(), Box> { + let client: GeminiClient = GeminiClient::new(api_key); + + let mut cache_path = std::env::var("HOME") + .map(PathBuf::from) + .expect("No Home found"); + cache_path.push(".config/noentropy/data/.noentropy_cache.json"); + let mut cache = Cache::load_or_create(cache_path.as_path()); + + cache.cleanup_old_entries(7 * 24 * 60 * 60); + + let undo_log_path = Config::get_undo_log_path()?; + let mut undo_log = UndoLog::load_or_create(&undo_log_path); + undo_log.cleanup_old_entries(30 * 24 * 60 * 60); + + let batch = FileBatch::from_path(download_path.clone(), args.recursive); + + if batch.filenames.is_empty() { + println!("{}", "No files found to organize!".yellow()); + return Ok(()); + } + + println!( + "Found {} files. Asking Gemini to organize...", + batch.count() + ); + + let mut plan: OrganizationPlan = match client + .organize_files_with_cache(batch.filenames, Some(&mut cache), Some(&download_path)) + .await + { + Ok(plan) => plan, + Err(e) => { + handle_gemini_error(e); + return Ok(()); + } + }; + + println!( + "{}", + "Gemini Plan received! Performing deep inspection...".green() + ); + + let client_arc: Arc = Arc::new(client); + let semaphore: Arc = Arc::new(tokio::sync::Semaphore::new(args.max_concurrent)); + + let tasks: Vec<_> = plan + .files + .iter_mut() + .zip(batch.paths.iter()) + .map(|(file_category, path): (&mut crate::models::FileCategory, &PathBuf)| { + let client: Arc = Arc::clone(&client_arc); + let filename: String = file_category.filename.clone(); + let category: String = file_category.category.clone(); + let path: PathBuf = path.clone(); + let semaphore: Arc = Arc::clone(&semaphore); + + async move { + if is_text_file(&path) { + let _permit = semaphore.acquire().await.unwrap(); + if let Some(content) = read_file_sample(&path, 5000) { + println!("Reading content of {}...", filename.green()); + client + .get_ai_sub_category(&filename, &category, &content) + .await + } else { + String::new() + } + } else { + String::new() + } + } + }) + .collect(); + + let sub_categories: Vec = join_all(tasks).await; + + for (file_category, sub_category) in plan.files.iter_mut().zip(sub_categories) { + file_category.sub_category = sub_category; + } + + println!("{}", "Deep inspection complete! Moving Files.....".green()); + + if args.dry_run { + println!("{} Dry run mode - skipping file moves.", "INFO:".cyan()); + } else { + execute_move(&download_path, plan, Some(&mut undo_log)); + } + println!("{}", "Done!".green().bold()); + + if let Err(e) = cache.save(cache_path.as_path()) { + eprintln!("Warning: Failed to save cache: {}", e); + } + + if let Err(e) = undo_log.save(&undo_log_path) { + eprintln!("Warning: Failed to save undo log: {}", e); + } + + Ok(()) +} + +pub async fn handle_undo( + args: Args, + download_path: PathBuf, +) -> Result<(), Box> { + let undo_log_path = Config::get_undo_log_path()?; + + if !undo_log_path.exists() { + println!("{}", "No undo log found. Nothing to undo.".yellow()); + return Ok(()); + } + + let mut undo_log = UndoLog::load_or_create(&undo_log_path); + + if !undo_log.has_completed_moves() { + println!("{}", "No completed moves to undo.".yellow()); + return Ok(()); + } + + crate::files::undo_moves(&download_path, &mut undo_log, args.dry_run)?; + + if let Err(e) = undo_log.save(&undo_log_path) { + eprintln!("Warning: Failed to save undo log: {}", e); + } + + Ok(()) +} diff --git a/src/files.rs b/src/files.rs deleted file mode 100644 index 266f1d3..0000000 --- a/src/files.rs +++ /dev/null @@ -1,385 +0,0 @@ -use colored::*; -use serde::{Deserialize, Serialize}; -use std::io; -use std::{ffi::OsStr, fs, path::Path, path::PathBuf}; -use walkdir::WalkDir; - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct FileCategory { - pub filename: String, - pub category: String, - pub sub_category: String, -} -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct OrganizationPlan { - pub files: Vec, -} -#[derive(Debug)] -pub struct FileBatch { - pub filenames: Vec, - pub paths: Vec, -} -impl FileBatch { - /// Reads a directory path and populates lists of all files inside it. - /// It skips sub-directories (does not read recursively). - pub fn from_path(root_path: PathBuf, recursive: bool) -> Self { - let mut filenames = Vec::new(); - let mut paths = Vec::new(); - let walker = if recursive { - WalkDir::new(&root_path).min_depth(1).follow_links(false) - } else { - WalkDir::new(&root_path) - .min_depth(1) - .max_depth(1) - .follow_links(false) - }; - for entry in walker.into_iter().filter_map(|e| e.ok()) { - let path = entry.path(); - if path.is_file() { - match path.strip_prefix(&root_path) { - Ok(relative_path) => { - filenames.push(relative_path.to_string_lossy().into_owned()); - paths.push(path.to_path_buf()); - } - Err(e) => { - eprintln!("Error getting relative path for {:?}: {}", path, e); - } - } - } - } - FileBatch { filenames, paths } - } - - /// Helper to get the number of files found - pub fn count(&self) -> usize { - self.filenames.len() - } -} - -/// Move a file with cross-platform compatibility -/// Tries rename first (fastest), falls back to copy+delete if needed (e.g., cross-filesystem on Windows) -fn move_file_cross_platform(source: &Path, target: &Path) -> io::Result<()> { - match fs::rename(source, target) { - Ok(()) => Ok(()), - Err(e) => { - if cfg!(windows) || e.kind() == io::ErrorKind::CrossesDevices { - fs::copy(source, target)?; - fs::remove_file(source)?; - Ok(()) - } else { - Err(e) - } - } - } -} - -pub fn execute_move( - base_path: &Path, - plan: OrganizationPlan, - mut undo_log: Option<&mut crate::undo::UndoLog>, -) { - println!("\n{}", "--- EXECUTION PLAN ---".bold().underline()); - - if plan.files.is_empty() { - println!("{}", "No files to organize.".yellow()); - return; - } - - for item in &plan.files { - let mut target_display = format!("{}", item.category.green()); - if !item.sub_category.is_empty() { - target_display = format!("{}/{}", target_display, item.sub_category.blue()); - } - - println!("Plan: {} -> {}/", item.filename, target_display); - } - - eprint!("\nDo you want to apply these changes? [y/N]: "); - - let mut input = String::new(); - if io::stdin().read_line(&mut input).is_err() { - eprintln!("\n{}", "Failed to read input. Operation cancelled.".red()); - return; - } - - let input = input.trim().to_lowercase(); - - if input != "y" && input != "yes" { - println!("\n{}", "Operation cancelled.".red()); - return; - } - - println!("\n{}", "--- MOVING FILES ---".bold().underline()); - - let mut moved_count = 0; - let mut error_count = 0; - - for item in plan.files { - let source = base_path.join(&item.filename); - - let mut final_path = base_path.join(&item.category); - - if !item.sub_category.is_empty() { - final_path = final_path.join(&item.sub_category); - } - - let file_name = Path::new(&item.filename) - .file_name() - .unwrap_or_else(|| OsStr::new(&item.filename)) - .to_string_lossy() - .into_owned(); - - let target = final_path.join(&file_name); - - if let Err(e) = fs::create_dir_all(&final_path) { - eprintln!( - "{} Failed to create dir {:?}: {}", - "ERROR:".red(), - final_path, - e - ); - error_count += 1; - continue; - } - - if let Ok(metadata) = fs::metadata(&source) { - if metadata.is_file() { - match move_file_cross_platform(&source, &target) { - Ok(_) => { - if item.sub_category.is_empty() { - println!("Moved: {} -> {}/", item.filename, item.category.green()); - } else { - println!( - "Moved: {} -> {}/{}", - item.filename, - item.category.green(), - item.sub_category.blue() - ); - } - moved_count += 1; - - if let Some(ref mut log) = undo_log { - log.record_move(source, target); - } - } - Err(e) => { - eprintln!("{} Failed to move {}: {}", "ERROR:".red(), item.filename, e); - error_count += 1; - - if let Some(ref mut log) = undo_log { - log.record_failed_move(source, target); - } - } - } - } else { - eprintln!( - "{} Skipping {}: Not a file", - "WARN:".yellow(), - item.filename - ); - } - } else { - eprintln!( - "{} Skipping {}: File not found", - "WARN:".yellow(), - item.filename - ); - error_count += 1; - } - } - - println!("\n{}", "Organization Complete!".bold().green()); - println!( - "Files moved: {}, Errors: {}", - moved_count.to_string().green(), - error_count.to_string().red() - ); -} - -pub fn undo_moves( - base_path: &Path, - undo_log: &mut crate::undo::UndoLog, - dry_run: bool, -) -> Result<(usize, usize, usize), Box> { - let completed_moves: Vec<_> = undo_log - .get_completed_moves() - .into_iter() - .cloned() - .collect(); - - if completed_moves.is_empty() { - println!("{}", "No completed moves to undo.".yellow()); - return Ok((0, 0, 0)); - } - - println!("\n{}", "--- UNDO PREVIEW ---".bold().underline()); - println!( - "{} will restore {} files:", - "INFO:".cyan(), - completed_moves.len() - ); - - for record in &completed_moves { - if let Ok(rel_dest) = record.destination_path.strip_prefix(base_path) { - if let Ok(rel_source) = record.source_path.strip_prefix(base_path) { - println!( - " {} -> {}", - rel_dest.display().to_string().red(), - rel_source.display().to_string().green() - ); - } else { - println!( - " {} -> {}", - record.destination_path.display(), - record.source_path.display() - ); - } - } - } - - if dry_run { - println!("\n{}", "Dry run mode - skipping undo operation.".cyan()); - return Ok((completed_moves.len(), 0, 0)); - } - - eprint!("\nDo you want to undo these changes? [y/N]: "); - - let mut input = String::new(); - if io::stdin().read_line(&mut input).is_err() { - eprintln!("\n{}", "Failed to read input. Undo cancelled.".red()); - return Ok((0, 0, 0)); - } - - let input = input.trim().to_lowercase(); - - if input != "y" && input != "yes" { - println!("\n{}", "Undo cancelled.".red()); - return Ok((0, 0, 0)); - } - - println!("\n{}", "--- UNDOING MOVES ---".bold().underline()); - - let mut restored_count = 0; - let mut skipped_count = 0; - let mut failed_count = 0; - - for record in completed_moves { - let source = &record.source_path; - let destination = &record.destination_path; - - if !destination.exists() { - eprintln!( - "{} File not found at destination: {}", - "WARN:".yellow(), - destination.display() - ); - failed_count += 1; - continue; - } - - if source.exists() { - eprintln!( - "{} Skipping {} - source already exists", - "WARN:".yellow(), - source.display() - ); - skipped_count += 1; - continue; - } - - match move_file_cross_platform(destination, source) { - Ok(_) => { - println!( - "Restored: {} -> {}", - destination.display().to_string().red(), - source.display().to_string().green() - ); - restored_count += 1; - undo_log.mark_as_undone(destination); - } - Err(e) => { - eprintln!( - "{} Failed to restore {}: {}", - "ERROR:".red(), - source.display(), - e - ); - failed_count += 1; - } - } - } - - cleanup_empty_directories(base_path, undo_log)?; - - println!("\n{}", "UNDO COMPLETE!".bold().green()); - println!( - "Files restored: {}, Skipped: {}, Failed: {}", - restored_count.to_string().green(), - skipped_count.to_string().yellow(), - failed_count.to_string().red() - ); - - Ok((restored_count, skipped_count, failed_count)) -} - -fn cleanup_empty_directories( - base_path: &Path, - undo_log: &mut crate::undo::UndoLog, -) -> Result<(), Box> { - let directory_usage = undo_log.get_directory_usage(base_path); - - for dir_path in directory_usage.keys() { - let full_path = base_path.join(dir_path); - if full_path.is_dir() - && let Ok(mut entries) = fs::read_dir(&full_path) - && entries.next().is_none() - && fs::remove_dir(&full_path).is_ok() - { - println!("{} Removed empty directory: {}", "INFO:".cyan(), dir_path); - } - } - - Ok(()) -} - -pub fn is_text_file(path: &Path) -> bool { - let text_extensions = [ - "txt", "md", "rs", "py", "js", "ts", "jsx", "tsx", "html", "css", "json", "xml", "csv", - "yaml", "yml", "toml", "ini", "cfg", "conf", "log", "sh", "bat", "ps1", "sql", "c", "cpp", - "h", "hpp", "java", "go", "rb", "php", "swift", "kt", "scala", "lua", "r", "m", - ]; - - if let Some(ext) = path.extension() - && let Some(ext_str) = ext.to_str() - { - return text_extensions.contains(&ext_str.to_lowercase().as_str()); - } - false -} - -// --- 2. Helper to safely read content (with limit) --- -pub fn read_file_sample(path: &Path, max_chars: usize) -> Option { - use std::io::Read; - // Attempt to open the file - let file = match fs::File::open(path) { - Ok(f) => f, - Err(_) => return None, - }; - - // Buffer to hold file contents - let mut buffer = Vec::new(); - - // Read the whole file (or you could use take() to limit bytes read for speed) - // For safety, let's limit the read to avoidance huge memory spikes on massive logs - let mut handle = file.take(max_chars as u64); - if handle.read_to_end(&mut buffer).is_err() { - return None; - } - - // Try to convert to UTF-8. If it fails (binary data), return None. - String::from_utf8(buffer).ok() -} - -#[cfg(test)] -#[path = "files_tests.rs"] -mod tests; diff --git a/src/files/batch.rs b/src/files/batch.rs new file mode 100644 index 0000000..232fd22 --- /dev/null +++ b/src/files/batch.rs @@ -0,0 +1,108 @@ +use std::path::PathBuf; +use walkdir::WalkDir; + +#[derive(Debug)] +pub struct FileBatch { + pub filenames: Vec, + pub paths: Vec, +} + +impl FileBatch { + pub fn from_path(root_path: PathBuf, recursive: bool) -> Self { + let mut filenames = Vec::new(); + let mut paths = Vec::new(); + let walker = if recursive { + WalkDir::new(&root_path).min_depth(1).follow_links(false) + } else { + WalkDir::new(&root_path) + .min_depth(1) + .max_depth(1) + .follow_links(false) + }; + for entry in walker.into_iter().filter_map(|e| e.ok()) { + let path = entry.path(); + if path.is_file() { + match path.strip_prefix(&root_path) { + Ok(relative_path) => { + filenames.push(relative_path.to_string_lossy().into_owned()); + paths.push(path.to_path_buf()); + } + Err(e) => { + eprintln!("Error getting relative path for {:?}: {}", path, e); + } + } + } + } + FileBatch { filenames, paths } + } + + pub fn count(&self) -> usize { + self.filenames.len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs::{self, File}; + use std::path::Path; + + #[test] + fn test_file_batch_from_path() { + let temp_dir = tempfile::tempdir().unwrap(); + let dir_path = temp_dir.path(); + + File::create(dir_path.join("file1.txt")).unwrap(); + File::create(dir_path.join("file2.rs")).unwrap(); + fs::create_dir(dir_path.join("subdir")).unwrap(); + + let batch = FileBatch::from_path(dir_path.to_path_buf(), false); + assert_eq!(batch.count(), 2); + assert!(batch.filenames.contains(&"file1.txt".to_string())); + assert!(batch.filenames.contains(&"file2.rs".to_string())); + } + + #[test] + fn test_file_batch_from_path_nonexistent() { + let batch = FileBatch::from_path(PathBuf::from("/nonexistent/path"), false); + assert_eq!(batch.count(), 0); + } + + #[test] + fn test_file_batch_from_path_non_recursive() { + let temp_dir = tempfile::tempdir().unwrap(); + let dir_path = temp_dir.path(); + File::create(dir_path.join("file1.txt")).unwrap(); + File::create(dir_path.join("file2.rs")).unwrap(); + fs::create_dir(dir_path.join("subdir")).unwrap(); + File::create(dir_path.join("subdir").join("file3.txt")).unwrap(); + let batch = FileBatch::from_path(dir_path.to_path_buf(), false); + assert_eq!(batch.count(), 2); + assert!(batch.filenames.contains(&"file1.txt".to_string())); + assert!(batch.filenames.contains(&"file2.rs".to_string())); + assert!(!batch.filenames.contains(&"subdir/file3.txt".to_string())); + } + + #[test] + fn test_file_batch_from_path_recursive() { + let temp_dir = tempfile::tempdir().unwrap(); + let dir_path = temp_dir.path(); + File::create(dir_path.join("file1.txt")).unwrap(); + fs::create_dir(dir_path.join("subdir1")).unwrap(); + File::create(dir_path.join("subdir1").join("file2.rs")).unwrap(); + fs::create_dir(dir_path.join("subdir1").join("nested")).unwrap(); + File::create(dir_path.join("subdir1").join("nested").join("file3.md")).unwrap(); + fs::create_dir(dir_path.join("subdir2")).unwrap(); + File::create(dir_path.join("subdir2").join("file4.py")).unwrap(); + let batch = FileBatch::from_path(dir_path.to_path_buf(), true); + assert_eq!(batch.count(), 4); + assert!(batch.filenames.contains(&"file1.txt".to_string())); + assert!(batch.filenames.contains(&"subdir1/file2.rs".to_string())); + assert!( + batch + .filenames + .contains(&"subdir1/nested/file3.md".to_string()) + ); + assert!(batch.filenames.contains(&"subdir2/file4.py".to_string())); + } +} diff --git a/src/files/detector.rs b/src/files/detector.rs new file mode 100644 index 0000000..13e88c5 --- /dev/null +++ b/src/files/detector.rs @@ -0,0 +1,107 @@ +use std::{fs, path::Path}; + +pub fn is_text_file(path: &Path) -> bool { + let text_extensions = [ + "txt", "md", "rs", "py", "js", "ts", "jsx", "tsx", "html", "css", "json", "xml", "csv", + "yaml", "yml", "toml", "ini", "cfg", "conf", "log", "sh", "bat", "ps1", "sql", "c", "cpp", + "h", "hpp", "java", "go", "rb", "php", "swift", "kt", "scala", "lua", "r", "m", + ]; + + if let Some(ext) = path.extension() + && let Some(ext_str) = ext.to_str() + { + return text_extensions.contains(&ext_str.to_lowercase().as_str()); + } + false +} + +pub fn read_file_sample(path: &Path, max_chars: usize) -> Option { + use std::io::Read; + let file = match fs::File::open(path) { + Ok(f) => f, + Err(_) => return None, + }; + + let mut buffer = Vec::new(); + let mut handle = file.take(max_chars as u64); + if handle.read_to_end(&mut buffer).is_err() { + return None; + } + + String::from_utf8(buffer).ok() +} + +#[cfg(test)] +mod tests { + use super::*; + use std::fs::File; + use std::io::Write; + use std::path::Path; + + #[test] + fn test_is_text_file_with_text_extensions() { + assert!(is_text_file(Path::new("test.txt"))); + assert!(is_text_file(Path::new("test.rs"))); + assert!(is_text_file(Path::new("test.py"))); + assert!(is_text_file(Path::new("test.md"))); + assert!(is_text_file(Path::new("test.json"))); + } + + #[test] + fn test_is_text_file_with_binary_extensions() { + assert!(!is_text_file(Path::new("test.exe"))); + assert!(!is_text_file(Path::new("test.bin"))); + assert!(!is_text_file(Path::new("test.jpg"))); + assert!(!is_text_file(Path::new("test.pdf"))); + } + + #[test] + fn test_is_text_file_case_insensitive() { + assert!(is_text_file(Path::new("test.TXT"))); + assert!(is_text_file(Path::new("test.RS"))); + assert!(is_text_file(Path::new("test.Py"))); + } + + #[test] + fn test_read_file_sample() { + let temp_dir = tempfile::tempdir().unwrap(); + let file_path = temp_dir.path().join("test.txt"); + + let mut file = File::create(&file_path).unwrap(); + file.write_all(b"Hello, World!").unwrap(); + + let content = read_file_sample(&file_path, 1000); + assert_eq!(content, Some("Hello, World!".to_string())); + } + + #[test] + fn test_read_file_sample_with_limit() { + let temp_dir = tempfile::tempdir().unwrap(); + let file_path = temp_dir.path().join("test.txt"); + + let mut file = File::create(&file_path).unwrap(); + file.write_all(b"Hello, World! This is a long text.") + .unwrap(); + + let content = read_file_sample(&file_path, 5); + assert_eq!(content, Some("Hello".to_string())); + } + + #[test] + fn test_read_file_sample_binary_file() { + let temp_dir = tempfile::tempdir().unwrap(); + let file_path = temp_dir.path().join("test.bin"); + + let mut file = File::create(&file_path).unwrap(); + file.write_all(&[0x00, 0xFF, 0x80, 0x90]).unwrap(); + + let content = read_file_sample(&file_path, 1000); + assert_eq!(content, None); + } + + #[test] + fn test_read_file_sample_nonexistent() { + let content = read_file_sample(Path::new("/nonexistent/file.txt"), 1000); + assert_eq!(content, None); + } +} diff --git a/src/files/mod.rs b/src/files/mod.rs new file mode 100644 index 0000000..c7e78fc --- /dev/null +++ b/src/files/mod.rs @@ -0,0 +1,49 @@ +pub mod batch; +pub mod detector; +pub mod mover; +pub mod undo; + +pub use batch::FileBatch; +pub use detector::{is_text_file, read_file_sample}; +pub use mover::execute_move; +pub use undo::undo_moves; + +#[cfg(test)] +mod tests { + use crate::models::{FileCategory, OrganizationPlan}; + use serde_json; + + #[test] + fn test_organization_plan_serialization() { + let plan = OrganizationPlan { + files: vec![FileCategory { + filename: "test.txt".to_string(), + category: "Documents".to_string(), + sub_category: "Text".to_string(), + }], + }; + + let json = serde_json::to_string(&plan).unwrap(); + assert!(json.contains("test.txt")); + assert!(json.contains("Documents")); + + let deserialized: OrganizationPlan = serde_json::from_str(&json).unwrap(); + assert_eq!(deserialized.files[0].filename, "test.txt"); + } + + #[test] + fn test_file_category_serialization() { + let fc = FileCategory { + filename: "file.rs".to_string(), + category: "Code".to_string(), + sub_category: "Rust".to_string(), + }; + + let json = serde_json::to_string(&fc).unwrap(); + let deserialized: FileCategory = serde_json::from_str(&json).unwrap(); + + assert_eq!(fc.filename, deserialized.filename); + assert_eq!(fc.category, deserialized.category); + assert_eq!(fc.sub_category, deserialized.sub_category); + } +} diff --git a/src/files/mover.rs b/src/files/mover.rs new file mode 100644 index 0000000..d72a28f --- /dev/null +++ b/src/files/mover.rs @@ -0,0 +1,143 @@ +use colored::*; +use crate::models::OrganizationPlan; +use crate::storage::UndoLog; +use std::io; +use std::{ffi::OsStr, fs, path::Path}; + +pub fn execute_move( + base_path: &Path, + plan: OrganizationPlan, + mut undo_log: Option<&mut UndoLog>, +) { + println!("\n{}", "--- EXECUTION PLAN ---".bold().underline()); + + if plan.files.is_empty() { + println!("{}", "No files to organize.".yellow()); + return; + } + + for item in &plan.files { + let mut target_display = format!("{}", item.category.green()); + if !item.sub_category.is_empty() { + target_display = format!("{}/{}", target_display, item.sub_category.blue()); + } + + println!("Plan: {} -> {}/", item.filename, target_display); + } + + eprint!("\nDo you want to apply these changes? [y/N]: "); + + let mut input = String::new(); + if io::stdin().read_line(&mut input).is_err() { + eprintln!("\n{}", "Failed to read input. Operation cancelled.".red()); + return; + } + + let input = input.trim().to_lowercase(); + + if input != "y" && input != "yes" { + println!("\n{}", "Operation cancelled.".red()); + return; + } + + println!("\n{}", "--- MOVING FILES ---".bold().underline()); + + let mut moved_count = 0; + let mut error_count = 0; + + for item in plan.files { + let source = base_path.join(&item.filename); + + let mut final_path = base_path.join(&item.category); + + if !item.sub_category.is_empty() { + final_path = final_path.join(&item.sub_category); + } + + let file_name = Path::new(&item.filename) + .file_name() + .unwrap_or_else(|| OsStr::new(&item.filename)) + .to_string_lossy() + .into_owned(); + + let target = final_path.join(&file_name); + + if let Err(e) = fs::create_dir_all(&final_path) { + eprintln!( + "{} Failed to create dir {:?}: {}", + "ERROR:".red(), + final_path, + e + ); + error_count += 1; + continue; + } + + if let Ok(metadata) = fs::metadata(&source) { + if metadata.is_file() { + match move_file_cross_platform(&source, &target) { + Ok(_) => { + if item.sub_category.is_empty() { + println!("Moved: {} -> {}/", item.filename, item.category.green()); + } else { + println!( + "Moved: {} -> {}/{}", + item.filename, + item.category.green(), + item.sub_category.blue() + ); + } + moved_count += 1; + + if let Some(ref mut log) = undo_log { + log.record_move(source, target); + } + } + Err(e) => { + eprintln!("{} Failed to move {}: {}", "ERROR:".red(), item.filename, e); + error_count += 1; + + if let Some(ref mut log) = undo_log { + log.record_failed_move(source, target); + } + } + } + } else { + eprintln!( + "{} Skipping {}: Not a file", + "WARN:".yellow(), + item.filename + ); + } + } else { + eprintln!( + "{} Skipping {}: File not found", + "WARN:".yellow(), + item.filename + ); + error_count += 1; + } + } + + println!("\n{}", "Organization Complete!".bold().green()); + println!( + "Files moved: {}, Errors: {}", + moved_count.to_string().green(), + error_count.to_string().red() + ); +} + +fn move_file_cross_platform(source: &Path, target: &Path) -> io::Result<()> { + match fs::rename(source, target) { + Ok(()) => Ok(()), + Err(e) => { + if cfg!(windows) || e.kind() == io::ErrorKind::CrossesDevices { + fs::copy(source, target)?; + fs::remove_file(source)?; + Ok(()) + } else { + Err(e) + } + } + } +} diff --git a/src/files/undo.rs b/src/files/undo.rs new file mode 100644 index 0000000..fc2523f --- /dev/null +++ b/src/files/undo.rs @@ -0,0 +1,166 @@ +use colored::*; +use crate::storage::UndoLog; +use std::fs; +use std::io; +use std::path::Path; + +pub fn undo_moves( + base_path: &Path, + undo_log: &mut UndoLog, + dry_run: bool, +) -> Result<(usize, usize, usize), Box> { + let completed_moves: Vec<_> = undo_log + .get_completed_moves() + .into_iter() + .cloned() + .collect(); + + if completed_moves.is_empty() { + println!("{}", "No completed moves to undo.".yellow()); + return Ok((0, 0, 0)); + } + + println!("\n{}", "--- UNDO PREVIEW ---".bold().underline()); + println!( + "{} will restore {} files:", + "INFO:".cyan(), + completed_moves.len() + ); + + for record in &completed_moves { + if let Ok(rel_dest) = record.destination_path.strip_prefix(base_path) { + if let Ok(rel_source) = record.source_path.strip_prefix(base_path) { + println!( + " {} -> {}", + rel_dest.display().to_string().red(), + rel_source.display().to_string().green() + ); + } else { + println!( + " {} -> {}", + record.destination_path.display(), + record.source_path.display() + ); + } + } + } + + if dry_run { + println!("\n{}", "Dry run mode - skipping undo operation.".cyan()); + return Ok((completed_moves.len(), 0, 0)); + } + + eprint!("\nDo you want to undo these changes? [y/N]: "); + + let mut input = String::new(); + if io::stdin().read_line(&mut input).is_err() { + eprintln!("\n{}", "Failed to read input. Undo cancelled.".red()); + return Ok((0, 0, 0)); + } + + let input = input.trim().to_lowercase(); + + if input != "y" && input != "yes" { + println!("\n{}", "Undo cancelled.".red()); + return Ok((0, 0, 0)); + } + + println!("\n{}", "--- UNDOING MOVES ---".bold().underline()); + + let mut restored_count = 0; + let mut skipped_count = 0; + let mut failed_count = 0; + + for record in completed_moves { + let source = &record.source_path; + let destination = &record.destination_path; + + if !destination.exists() { + eprintln!( + "{} File not found at destination: {}", + "WARN:".yellow(), + destination.display() + ); + failed_count += 1; + continue; + } + + if source.exists() { + eprintln!( + "{} Skipping {} - source already exists", + "WARN:".yellow(), + source.display() + ); + skipped_count += 1; + continue; + } + + match move_file_cross_platform(destination, source) { + Ok(_) => { + println!( + "Restored: {} -> {}", + destination.display().to_string().red(), + source.display().to_string().green() + ); + restored_count += 1; + undo_log.mark_as_undone(destination); + } + Err(e) => { + eprintln!( + "{} Failed to restore {}: {}", + "ERROR:".red(), + source.display(), + e + ); + failed_count += 1; + } + } + } + + cleanup_empty_directories(base_path, undo_log)?; + + println!("\n{}", "UNDO COMPLETE!".bold().green()); + println!( + "Files restored: {}, Skipped: {}, Failed: {}", + restored_count.to_string().green(), + skipped_count.to_string().yellow(), + failed_count.to_string().red() + ); + + Ok((restored_count, skipped_count, failed_count)) +} + +fn cleanup_empty_directories( + base_path: &Path, + undo_log: &mut UndoLog, +) -> Result<(), Box> { + let directory_usage = undo_log.get_directory_usage(base_path); + + for dir_path in directory_usage.keys() { + let full_path = base_path.join(dir_path); + if full_path.is_dir() + && let Ok(mut entries) = fs::read_dir(&full_path) + && entries.next().is_none() + && fs::remove_dir(&full_path).is_ok() + { + println!("{} Removed empty directory: {}", "INFO:".cyan(), dir_path); + } + } + + Ok(()) +} + +fn move_file_cross_platform(source: &Path, target: &Path) -> io::Result<()> { + match fs::rename(source, target) { + Ok(()) => Ok(()), + Err(e) => { + if cfg!(windows) || e.kind() == io::ErrorKind::CrossesDevices { + fs::copy(source, target)?; + fs::remove_file(source)?; + Ok(()) + } else { + Err(e) + } + } + } +} diff --git a/src/files_tests.rs b/src/files_tests.rs deleted file mode 100644 index d207b2e..0000000 --- a/src/files_tests.rs +++ /dev/null @@ -1,162 +0,0 @@ -use crate::files::*; -use std::fs::{self, File}; -use std::io::Write; - -#[test] -fn test_is_text_file_with_text_extensions() { - assert!(is_text_file(Path::new("test.txt"))); - assert!(is_text_file(Path::new("test.rs"))); - assert!(is_text_file(Path::new("test.py"))); - assert!(is_text_file(Path::new("test.md"))); - assert!(is_text_file(Path::new("test.json"))); -} - -#[test] -fn test_is_text_file_with_binary_extensions() { - assert!(!is_text_file(Path::new("test.exe"))); - assert!(!is_text_file(Path::new("test.bin"))); - assert!(!is_text_file(Path::new("test.jpg"))); - assert!(!is_text_file(Path::new("test.pdf"))); -} - -#[test] -fn test_is_text_file_case_insensitive() { - assert!(is_text_file(Path::new("test.TXT"))); - assert!(is_text_file(Path::new("test.RS"))); - assert!(is_text_file(Path::new("test.Py"))); -} - -#[test] -fn test_file_batch_from_path() { - let temp_dir = tempfile::tempdir().unwrap(); - let dir_path = temp_dir.path(); - - File::create(dir_path.join("file1.txt")).unwrap(); - File::create(dir_path.join("file2.rs")).unwrap(); - fs::create_dir(dir_path.join("subdir")).unwrap(); - - let batch = FileBatch::from_path(dir_path.to_path_buf(), false); - assert_eq!(batch.count(), 2); - assert!(batch.filenames.contains(&"file1.txt".to_string())); - assert!(batch.filenames.contains(&"file2.rs".to_string())); -} - -#[test] -fn test_file_batch_from_path_nonexistent() { - let batch = FileBatch::from_path(PathBuf::from("/nonexistent/path"), false); - assert_eq!(batch.count(), 0); -} - -#[test] -fn test_file_batch_from_path_non_recursive() { - let temp_dir = tempfile::tempdir().unwrap(); - let dir_path = temp_dir.path(); - File::create(dir_path.join("file1.txt")).unwrap(); - File::create(dir_path.join("file2.rs")).unwrap(); - fs::create_dir(dir_path.join("subdir")).unwrap(); - File::create(dir_path.join("subdir").join("file3.txt")).unwrap(); - let batch = FileBatch::from_path(dir_path.to_path_buf(), false); - assert_eq!(batch.count(), 2); - assert!(batch.filenames.contains(&"file1.txt".to_string())); - assert!(batch.filenames.contains(&"file2.rs".to_string())); - assert!(!batch.filenames.contains(&"subdir/file3.txt".to_string())); -} - -#[test] -fn test_file_batch_from_path_recursive() { - let temp_dir = tempfile::tempdir().unwrap(); - let dir_path = temp_dir.path(); - File::create(dir_path.join("file1.txt")).unwrap(); - fs::create_dir(dir_path.join("subdir1")).unwrap(); - File::create(dir_path.join("subdir1").join("file2.rs")).unwrap(); - fs::create_dir(dir_path.join("subdir1").join("nested")).unwrap(); - File::create(dir_path.join("subdir1").join("nested").join("file3.md")).unwrap(); - fs::create_dir(dir_path.join("subdir2")).unwrap(); - File::create(dir_path.join("subdir2").join("file4.py")).unwrap(); - let batch = FileBatch::from_path(dir_path.to_path_buf(), true); - assert_eq!(batch.count(), 4); - assert!(batch.filenames.contains(&"file1.txt".to_string())); - assert!(batch.filenames.contains(&"subdir1/file2.rs".to_string())); - assert!( - batch - .filenames - .contains(&"subdir1/nested/file3.md".to_string()) - ); - assert!(batch.filenames.contains(&"subdir2/file4.py".to_string())); -} -#[test] -fn test_read_file_sample() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("test.txt"); - - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"Hello, World!").unwrap(); - - let content = read_file_sample(&file_path, 1000); - assert_eq!(content, Some("Hello, World!".to_string())); -} - -#[test] -fn test_read_file_sample_with_limit() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("test.txt"); - - let mut file = File::create(&file_path).unwrap(); - file.write_all(b"Hello, World! This is a long text.") - .unwrap(); - - let content = read_file_sample(&file_path, 5); - assert_eq!(content, Some("Hello".to_string())); -} - -#[test] -fn test_read_file_sample_binary_file() { - let temp_dir = tempfile::tempdir().unwrap(); - let file_path = temp_dir.path().join("test.bin"); - - let mut file = File::create(&file_path).unwrap(); - file.write_all(&[0x00, 0xFF, 0x80, 0x90]).unwrap(); - - let content = read_file_sample(&file_path, 1000); - assert_eq!(content, None); -} - -#[test] -fn test_read_file_sample_nonexistent() { - let content = read_file_sample(Path::new("/nonexistent/file.txt"), 1000); - assert_eq!(content, None); -} - -#[test] -fn test_organization_plan_serialization() { - let plan = OrganizationPlan { - files: vec![FileCategory { - filename: "test.txt".to_string(), - category: "Documents".to_string(), - sub_category: "Text".to_string(), - }], - }; - - let json = serde_json::to_string(&plan).unwrap(); - assert!(json.contains("test.txt")); - assert!(json.contains("Documents")); - - let deserialized: OrganizationPlan = serde_json::from_str(&json).unwrap(); - assert_eq!(deserialized.files[0].filename, "test.txt"); -} - -#[test] -fn test_file_category_serialization() { - let fc = FileCategory { - filename: "file.rs".to_string(), - category: "Code".to_string(), - sub_category: "Rust".to_string(), - }; - - let json = serde_json::to_string(&fc).unwrap(); - let deserialized: FileCategory = serde_json::from_str(&json).unwrap(); - - assert_eq!(fc.filename, deserialized.filename); - assert_eq!(fc.category, deserialized.category); - assert_eq!(fc.sub_category, deserialized.sub_category); -} diff --git a/src/gemini.rs b/src/gemini/client.rs similarity index 97% rename from src/gemini.rs rename to src/gemini/client.rs index 783df50..242008d 100644 --- a/src/gemini.rs +++ b/src/gemini/client.rs @@ -1,8 +1,8 @@ -use crate::cache::Cache; -use crate::files::OrganizationPlan; -use crate::gemini_errors::GeminiError; -use crate::gemini_helpers::PromptBuilder; -use crate::gemini_types::{GeminiResponse, OrganizationPlanResponse}; +use crate::models::OrganizationPlan; +use crate::storage::Cache; +use crate::gemini::errors::GeminiError; +use crate::gemini::prompt::PromptBuilder; +use crate::gemini::types::{GeminiResponse, OrganizationPlanResponse}; use reqwest::Client; use serde_json::json; use std::path::Path; diff --git a/src/gemini_errors.rs b/src/gemini/errors.rs similarity index 100% rename from src/gemini_errors.rs rename to src/gemini/errors.rs diff --git a/src/gemini/mod.rs b/src/gemini/mod.rs new file mode 100644 index 0000000..27166a5 --- /dev/null +++ b/src/gemini/mod.rs @@ -0,0 +1,8 @@ +pub mod client; +pub mod errors; +pub mod prompt; +pub mod types; + +pub use client::GeminiClient; +pub use errors::GeminiError; +pub use types::{Candidate, Content, FileCategoryResponse, GeminiResponse, OrganizationPlanResponse, Part}; diff --git a/src/gemini_helpers.rs b/src/gemini/prompt.rs similarity index 93% rename from src/gemini_helpers.rs rename to src/gemini/prompt.rs index b569416..4211da8 100644 --- a/src/gemini_helpers.rs +++ b/src/gemini/prompt.rs @@ -1,5 +1,5 @@ -use crate::files::{FileCategory, OrganizationPlan}; -use crate::gemini_types::OrganizationPlanResponse; +use crate::models::{FileCategory, OrganizationPlan}; +use crate::gemini::types::OrganizationPlanResponse; impl OrganizationPlanResponse { pub fn to_organization_plan(self) -> OrganizationPlan { diff --git a/src/gemini_types.rs b/src/gemini/types.rs similarity index 100% rename from src/gemini_types.rs rename to src/gemini/types.rs diff --git a/src/lib.rs b/src/lib.rs index e7487a4..5c2a45f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,9 +1,14 @@ -pub mod cache; -pub mod config; +pub mod cli; +pub mod settings; pub mod files; pub mod gemini; -pub mod gemini_errors; -pub mod gemini_helpers; -pub mod gemini_types; -pub mod prompt; -pub mod undo; +pub mod models; +pub mod storage; + +pub use cli::Args; +pub use settings::Config; +pub use files::{FileBatch, execute_move, is_text_file, read_file_sample, undo_moves}; +pub use gemini::GeminiClient; +pub use gemini::GeminiError; +pub use models::{FileCategory, FileMoveRecord, MoveStatus, OrganizationPlan}; +pub use storage::{Cache, UndoLog}; diff --git a/src/main.rs b/src/main.rs index 1b3ade7..bf74a91 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,242 +1,21 @@ +use noentropy::cli::{Args, orchestrator::{handle_organization, handle_undo}}; +use noentropy::settings::{get_or_prompt_api_key, get_or_prompt_download_folder}; use clap::Parser; -use colored::*; -use futures::future::join_all; -use noentropy::cache::Cache; -use noentropy::config::{self, Config}; -use noentropy::files::{FileBatch, OrganizationPlan, execute_move}; -use noentropy::gemini::GeminiClient; -use noentropy::gemini_errors::GeminiError; -use noentropy::undo::UndoLog; -use std::path::PathBuf; -use std::sync::Arc; - -#[derive(Parser, Debug)] -#[command(author, version, about, long_about = None)] -struct Args { - #[arg(short, long, help = "Preview changes without moving files")] - dry_run: bool, - - #[arg( - short, - long, - default_value_t = 5, - help = "Maximum concurrent API requests" - )] - max_concurrent: usize, - #[arg(long, help = "Recursively searches files in subdirectory")] - recursive: bool, - #[arg(long, help = "Undo the last file organization")] - undo: bool, -} #[tokio::main] async fn main() -> Result<(), Box> { let args = Args::parse(); if args.undo { - let download_path = config::get_or_prompt_download_folder()?; - let undo_log_path = Config::get_undo_log_path()?; - - if !undo_log_path.exists() { - println!("{}", "No undo log found. Nothing to undo.".yellow()); - return Ok(()); - } - - let mut undo_log = UndoLog::load_or_create(&undo_log_path); - - if !undo_log.has_completed_moves() { - println!("{}", "No completed moves to undo.".yellow()); - return Ok(()); - } - - noentropy::files::undo_moves(&download_path, &mut undo_log, args.dry_run)?; - - if let Err(e) = undo_log.save(&undo_log_path) { - eprintln!("Warning: Failed to save undo log: {}", e); - } - + let download_path = get_or_prompt_download_folder()?; + handle_undo(args, download_path).await?; return Ok(()); } - let api_key = config::get_or_prompt_api_key()?; - let download_path = config::get_or_prompt_download_folder()?; + let api_key = get_or_prompt_api_key()?; + let download_path = get_or_prompt_download_folder()?; - let client: GeminiClient = GeminiClient::new(api_key); - - let mut cache_path = std::env::var("HOME") - .map(PathBuf::from) - .expect("No Home found"); - cache_path.push(".config/noentropy/data/.noentropy_cache.json"); - let mut cache = Cache::load_or_create(cache_path.as_path()); - - cache.cleanup_old_entries(7 * 24 * 60 * 60); - - let undo_log_path = Config::get_undo_log_path()?; - let mut undo_log = UndoLog::load_or_create(&undo_log_path); - undo_log.cleanup_old_entries(30 * 24 * 60 * 60); - - let batch = FileBatch::from_path(download_path.clone(), args.recursive); - - if batch.filenames.is_empty() { - println!("{}", "No files found to organize!".yellow()); - return Ok(()); - } - - println!( - "Found {} files. Asking Gemini to organize...", - batch.count() - ); - - let mut plan: OrganizationPlan = match client - .organize_files_with_cache(batch.filenames, Some(&mut cache), Some(&download_path)) - .await - { - Ok(plan) => plan, - Err(e) => { - handle_gemini_error(e); - return Ok(()); - } - }; - - println!( - "{}", - "Gemini Plan received! Performing deep inspection...".green() - ); - - let client = Arc::new(client); - let semaphore = Arc::new(tokio::sync::Semaphore::new(args.max_concurrent)); - - let tasks: Vec<_> = plan - .files - .iter_mut() - .zip(batch.paths.iter()) - .map(|(file_category, path)| { - let client = Arc::clone(&client); - let filename = file_category.filename.clone(); - let category = file_category.category.clone(); - let path = path.clone(); - let semaphore = Arc::clone(&semaphore); - - async move { - if noentropy::files::is_text_file(&path) { - let _permit = semaphore.acquire().await.unwrap(); - if let Some(content) = noentropy::files::read_file_sample(&path, 5000) { - println!("Reading content of {}...", filename.green()); - client - .get_ai_sub_category(&filename, &category, &content) - .await - } else { - String::new() - } - } else { - String::new() - } - } - }) - .collect(); - - let sub_categories = join_all(tasks).await; - - for (file_category, sub_category) in plan.files.iter_mut().zip(sub_categories) { - file_category.sub_category = sub_category; - } - - println!("{}", "Deep inspection complete! Moving Files.....".green()); - - if args.dry_run { - println!("{} Dry run mode - skipping file moves.", "INFO:".cyan()); - } else { - execute_move(&download_path, plan, Some(&mut undo_log)); - } - println!("{}", "Done!".green().bold()); - - if let Err(e) = cache.save(cache_path.as_path()) { - eprintln!("Warning: Failed to save cache: {}", e); - } - - if let Err(e) = undo_log.save(&undo_log_path) { - eprintln!("Warning: Failed to save undo log: {}", e); - } + handle_organization(args, api_key, download_path).await?; Ok(()) } - -fn handle_gemini_error(error: GeminiError) { - use colored::*; - - match error { - GeminiError::RateLimitExceeded { retry_after } => { - println!( - "{} API rate limit exceeded. Please wait {} seconds before trying again.", - "ERROR:".red(), - retry_after - ); - } - GeminiError::QuotaExceeded { limit } => { - println!( - "{} Quota exceeded: {}. Please check your Gemini API usage.", - "ERROR:".red(), - limit - ); - } - GeminiError::ModelNotFound { model } => { - println!( - "{} Model '{}' not found. Please check the model name in the configuration.", - "ERROR:".red(), - model - ); - } - GeminiError::InvalidApiKey => { - println!( - "{} Invalid API key. Please check your GEMINI_API_KEY environment variable.", - "ERROR:".red() - ); - } - GeminiError::ContentPolicyViolation { reason } => { - println!("{} Content policy violation: {}", "ERROR:".red(), reason); - } - GeminiError::ServiceUnavailable { reason } => { - println!( - "{} Gemini service is temporarily unavailable: {}", - "ERROR:".red(), - reason - ); - } - GeminiError::NetworkError(e) => { - println!("{} Network error: {}", "ERROR:".red(), e); - } - GeminiError::Timeout { seconds } => { - println!( - "{} Request timed out after {} seconds.", - "ERROR:".red(), - seconds - ); - } - GeminiError::InvalidRequest { details } => { - println!("{} Invalid request: {}", "ERROR:".red(), details); - } - GeminiError::ApiError { status, message } => { - println!( - "{} API error (HTTP {}): {}", - "ERROR:".red(), - status, - message - ); - } - GeminiError::InvalidResponse(msg) => { - println!("{} Invalid response from Gemini: {}", "ERROR:".red(), msg); - } - GeminiError::InternalError { details } => { - println!("{} Internal server error: {}", "ERROR:".red(), details); - } - GeminiError::SerializationError(e) => { - println!("{} JSON serialization error: {}", "ERROR:".red(), e); - } - } - - println!("\n{} Check the following:", "HINT:".yellow()); - println!(" • Your GEMINI_API_KEY is correctly set"); - println!(" • Your internet connection is working"); - println!(" • Gemini API service is available"); - println!(" • You haven't exceeded your API quota"); -} diff --git a/src/models/metadata.rs b/src/models/metadata.rs new file mode 100644 index 0000000..08c7857 --- /dev/null +++ b/src/models/metadata.rs @@ -0,0 +1,31 @@ +use crate::models::organization::OrganizationPlan; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::fs; +use std::path::Path; +use std::time::UNIX_EPOCH; + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub struct FileMetadata { + pub size: u64, + pub modified: u64, +} + +impl FileMetadata { + pub fn from_path(file_path: &Path) -> Result> { + let metadata = fs::metadata(file_path)?; + let modified = metadata.modified()?.duration_since(UNIX_EPOCH)?.as_secs(); + + Ok(Self { + size: metadata.len(), + modified, + }) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct CacheEntry { + pub response: OrganizationPlan, + pub timestamp: u64, + pub file_metadata: HashMap, +} diff --git a/src/models/mod.rs b/src/models/mod.rs new file mode 100644 index 0000000..3d69ea3 --- /dev/null +++ b/src/models/mod.rs @@ -0,0 +1,7 @@ +pub mod organization; +pub mod metadata; +pub mod move_record; + +pub use organization::{FileCategory, OrganizationPlan}; +pub use metadata::{CacheEntry, FileMetadata}; +pub use move_record::{FileMoveRecord, MoveStatus}; diff --git a/src/models/move_record.rs b/src/models/move_record.rs new file mode 100644 index 0000000..9a1b73f --- /dev/null +++ b/src/models/move_record.rs @@ -0,0 +1,34 @@ +use serde::{Deserialize, Serialize}; +use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FileMoveRecord { + pub source_path: PathBuf, + pub destination_path: PathBuf, + pub timestamp: u64, + pub status: MoveStatus, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] +pub enum MoveStatus { + Completed, + Undone, + Failed, +} + +impl FileMoveRecord { + pub fn new(source_path: PathBuf, destination_path: PathBuf, status: MoveStatus) -> Self { + let timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + Self { + source_path, + destination_path, + timestamp, + status, + } + } +} diff --git a/src/models/organization.rs b/src/models/organization.rs new file mode 100644 index 0000000..05b9890 --- /dev/null +++ b/src/models/organization.rs @@ -0,0 +1,13 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FileCategory { + pub filename: String, + pub category: String, + pub sub_category: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct OrganizationPlan { + pub files: Vec, +} diff --git a/src/config.rs b/src/settings/config.rs similarity index 95% rename from src/config.rs rename to src/settings/config.rs index 590de8c..8d5a5f0 100644 --- a/src/config.rs +++ b/src/settings/config.rs @@ -3,6 +3,8 @@ use serde::{Deserialize, Serialize}; use std::fs; use std::path::PathBuf; +use super::prompt::Prompter; + #[derive(Debug, Serialize, Deserialize, Clone)] pub struct Config { pub api_key: String, @@ -95,7 +97,7 @@ pub fn get_or_prompt_api_key() -> Result> { println!("{}", "🔑 NoEntropy Configuration".bold().cyan()); println!("{}", "─────────────────────────────".cyan()); - let api_key = crate::prompt::Prompter::prompt_api_key()?; + let api_key = Prompter::prompt_api_key()?; let mut config = Config::load().unwrap_or_default(); config.api_key = api_key.clone(); @@ -116,7 +118,7 @@ pub fn get_or_prompt_download_folder() -> Result, -} - #[derive(Serialize, Deserialize, Debug)] pub struct Cache { entries: HashMap, @@ -213,6 +200,3 @@ impl Cache { } } -#[cfg(test)] -#[path = "cache_tests.rs"] -mod tests; diff --git a/src/storage/mod.rs b/src/storage/mod.rs new file mode 100644 index 0000000..d25efb0 --- /dev/null +++ b/src/storage/mod.rs @@ -0,0 +1,100 @@ +pub mod cache; +pub mod undo_log; + +pub use cache::Cache; +pub use undo_log::UndoLog; + +#[cfg(test)] +mod tests { + use crate::storage::{Cache, UndoLog}; + use crate::models::{FileMoveRecord, MoveStatus}; + use std::path::PathBuf; + + #[test] + fn test_cache_new() { + let cache = Cache::new(); + // Just verify we can create a cache + let _ = cache; + } + + #[test] + fn test_cache_with_max_entries() { + let cache = Cache::with_max_entries(100); + let _ = cache; + } + + #[test] + fn test_undo_log_new() { + let log = UndoLog::new(); + assert!(!log.has_completed_moves()); + assert_eq!(log.get_completed_count(), 0); + } + + #[test] + fn test_undo_log_with_max_entries() { + let log = UndoLog::with_max_entries(500); + assert!(!log.has_completed_moves()); + assert_eq!(log.get_completed_count(), 0); + } + + #[test] + fn test_undo_log_record_move() { + let mut log = UndoLog::new(); + let source = PathBuf::from("/from/file.txt"); + let dest = PathBuf::from("/to/file.txt"); + + log.record_move(source.clone(), dest.clone()); + + assert!(log.has_completed_moves()); + assert_eq!(log.get_completed_count(), 1); + } + + #[test] + fn test_undo_log_record_failed_move() { + let mut log = UndoLog::new(); + let source = PathBuf::from("/from/file.txt"); + let dest = PathBuf::from("/to/file.txt"); + + log.record_failed_move(source.clone(), dest.clone()); + + assert!(!log.has_completed_moves()); + assert_eq!(log.get_completed_count(), 0); + } + + #[test] + fn test_undo_log_mark_as_undone() { + let mut log = UndoLog::new(); + let source = PathBuf::from("/from/file.txt"); + let dest = PathBuf::from("/to/file.txt"); + + log.record_move(source.clone(), dest.clone()); + assert_eq!(log.get_completed_count(), 1); + + log.mark_as_undone(&dest); + assert_eq!(log.get_completed_count(), 0); + } + + #[test] + fn test_file_move_record_status() { + let record = FileMoveRecord::new( + PathBuf::from("/from"), + PathBuf::from("/to"), + MoveStatus::Completed + ); + assert_eq!(record.status, MoveStatus::Completed); + } + + #[test] + fn test_get_completed_moves_empty() { + let log: UndoLog = UndoLog::new(); + let moves = log.get_completed_moves(); + assert!(moves.is_empty()); + } + + #[test] + fn test_get_directory_usage_empty() { + let log: UndoLog = UndoLog::new(); + let usage = log.get_directory_usage(PathBuf::from("/").as_path()); + assert!(usage.is_empty()); + } +} \ No newline at end of file diff --git a/src/undo.rs b/src/storage/undo_log.rs similarity index 84% rename from src/undo.rs rename to src/storage/undo_log.rs index dc22c49..6480857 100644 --- a/src/undo.rs +++ b/src/storage/undo_log.rs @@ -1,24 +1,10 @@ +use crate::models::{FileMoveRecord, MoveStatus}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::fs; use std::path::{Path, PathBuf}; use std::time::{SystemTime, UNIX_EPOCH}; -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct FileMoveRecord { - pub source_path: PathBuf, - pub destination_path: PathBuf, - pub timestamp: u64, - pub status: MoveStatus, -} - -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] -pub enum MoveStatus { - Completed, - Undone, - Failed, -} - #[derive(Serialize, Deserialize, Debug)] pub struct UndoLog { entries: Vec, @@ -78,18 +64,7 @@ impl UndoLog { } pub fn record_move(&mut self, source_path: PathBuf, destination_path: PathBuf) { - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_default() - .as_secs(); - - let record = FileMoveRecord { - source_path, - destination_path, - timestamp, - status: MoveStatus::Completed, - }; - + let record = FileMoveRecord::new(source_path, destination_path, MoveStatus::Completed); self.entries.push(record); if self.entries.len() > self.max_entries { @@ -98,18 +73,7 @@ impl UndoLog { } pub fn record_failed_move(&mut self, source_path: PathBuf, destination_path: PathBuf) { - let timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap_or_default() - .as_secs(); - - let record = FileMoveRecord { - source_path, - destination_path, - timestamp, - status: MoveStatus::Failed, - }; - + let record = FileMoveRecord::new(source_path, destination_path, MoveStatus::Failed); self.entries.push(record); if self.entries.len() > self.max_entries { @@ -216,6 +180,3 @@ impl UndoLog { } } -#[cfg(test)] -#[path = "undo_tests.rs"] -mod tests; diff --git a/src/undo_tests.rs b/src/undo_tests.rs deleted file mode 100644 index 70e7023..0000000 --- a/src/undo_tests.rs +++ /dev/null @@ -1,179 +0,0 @@ -use super::*; -use std::fs; -use tempfile::TempDir; - -#[test] -fn test_undo_log_creation() { - let log = UndoLog::new(); - assert_eq!(log.get_completed_count(), 0); - assert!(!log.has_completed_moves()); -} - -#[test] -fn test_record_move() { - let mut log = UndoLog::new(); - let source = PathBuf::from("/test/source.txt"); - let dest = PathBuf::from("/test/dest/source.txt"); - - log.record_move(source.clone(), dest.clone()); - - assert_eq!(log.get_completed_count(), 1); - assert!(log.has_completed_moves()); - - let completed = log.get_completed_moves(); - assert_eq!(completed.len(), 1); - assert_eq!(completed[0].source_path, source); - assert_eq!(completed[0].destination_path, dest); - assert_eq!(completed[0].status, MoveStatus::Completed); -} - -#[test] -fn test_record_failed_move() { - let mut log = UndoLog::new(); - let source = PathBuf::from("/test/source.txt"); - let dest = PathBuf::from("/test/dest/source.txt"); - - log.record_failed_move(source.clone(), dest.clone()); - - assert_eq!(log.get_completed_count(), 0); - assert!(!log.has_completed_moves()); -} - -#[test] -fn test_mark_as_undone() { - let mut log = UndoLog::new(); - let source = PathBuf::from("/test/source.txt"); - let dest = PathBuf::from("/test/dest/source.txt"); - - log.record_move(source.clone(), dest.clone()); - assert_eq!(log.get_completed_count(), 1); - - log.mark_as_undone(&dest); - assert_eq!(log.get_completed_count(), 0); -} - -#[test] -fn test_save_and_load() { - let temp_dir = TempDir::new().unwrap(); - let undo_log_path = temp_dir.path().join("undo_log.json"); - - let mut log = UndoLog::new(); - log.record_move( - PathBuf::from("/test/source.txt"), - PathBuf::from("/test/dest/source.txt"), - ); - - log.save(&undo_log_path).unwrap(); - assert!(undo_log_path.exists()); - - let loaded_log = UndoLog::load_or_create(&undo_log_path); - assert_eq!(loaded_log.get_completed_count(), 1); -} - -#[test] -fn test_cleanup_old_entries() { - let mut log = UndoLog::new(); - - let old_timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs() - - (10 * 24 * 60 * 60); - - let source = PathBuf::from("/test/source.txt"); - let dest = PathBuf::from("/test/dest/source.txt"); - - let old_record = FileMoveRecord { - source_path: source.clone(), - destination_path: dest.clone(), - timestamp: old_timestamp, - status: MoveStatus::Undone, - }; - - log.entries.push(old_record.clone()); - log.record_move(source.clone(), dest); - - assert_eq!(log.entries.len(), 2); - - log.cleanup_old_entries(7 * 24 * 60 * 60); - - assert_eq!(log.entries.len(), 1); - assert_eq!(log.get_completed_count(), 1); -} - -#[test] -fn test_evict_oldest() { - let mut log = UndoLog::with_max_entries(2); - - log.record_move( - PathBuf::from("/test/source1.txt"), - PathBuf::from("/test/dest/source1.txt"), - ); - - std::thread::sleep(std::time::Duration::from_millis(10)); - - log.record_move( - PathBuf::from("/test/source2.txt"), - PathBuf::from("/test/dest/source2.txt"), - ); - - log.record_move( - PathBuf::from("/test/source3.txt"), - PathBuf::from("/test/dest/source3.txt"), - ); - - assert_eq!(log.get_completed_count(), 2); -} - -#[test] -fn test_get_directory_usage() { - let mut log = UndoLog::new(); - let base_path = PathBuf::from("/test"); - - log.record_move( - PathBuf::from("/test/source1.txt"), - PathBuf::from("/test/Documents/report.txt"), - ); - - log.record_move( - PathBuf::from("/test/source2.txt"), - PathBuf::from("/test/Documents/notes.txt"), - ); - - log.record_move( - PathBuf::from("/test/source3.txt"), - PathBuf::from("/test/Images/photo.png"), - ); - - let usage = log.get_directory_usage(&base_path); - - assert_eq!(usage.get("Documents"), Some(&2)); - assert_eq!(usage.get("Images"), Some(&1)); -} - -#[test] -fn test_load_corrupted_log() { - let temp_dir = TempDir::new().unwrap(); - let undo_log_path = temp_dir.path().join("undo_log.json"); - - fs::write(&undo_log_path, "invalid json").unwrap(); - - let log = UndoLog::load_or_create(&undo_log_path); - assert_eq!(log.get_completed_count(), 0); -} - -#[test] -fn test_multiple_moves_same_file() { - let mut log = UndoLog::new(); - let source = PathBuf::from("/test/source.txt"); - let dest1 = PathBuf::from("/test/dest1/source.txt"); - let dest2 = PathBuf::from("/test/dest2/source.txt"); - - log.record_move(source.clone(), dest1.clone()); - log.record_move(source.clone(), dest2); - - assert_eq!(log.get_completed_count(), 2); - - log.mark_as_undone(&dest1); - assert_eq!(log.get_completed_count(), 1); -}