From 7369fd63b88d0f797cac5d65a2faeb9e4f0af986 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=93=D1=80=D0=B8=D0=B3=D0=BE=D1=80=D0=B8=D0=B9=20=D0=A1?= =?UTF-8?q?=D0=B0=D1=84=D1=80=D0=BE=D0=BD=D0=BE=D0=B2?= Date: Mon, 21 Jul 2025 18:05:54 +0000 Subject: [PATCH] tests Written first test's --- tests/cfg test mod regression.rs | 220 ++++++++++++++++++++++++++++++ tests/cfg test mod smoke tests.rs | 157 +++++++++++++++++++++ tests/cfg test mod unit tests.rs | 158 +++++++++++++++++++++ 3 files changed, 535 insertions(+) create mode 100644 tests/cfg test mod regression.rs create mode 100644 tests/cfg test mod smoke tests.rs create mode 100644 tests/cfg test mod unit tests.rs diff --git a/tests/cfg test mod regression.rs b/tests/cfg test mod regression.rs new file mode 100644 index 0000000..bbc2205 --- /dev/null +++ b/tests/cfg test mod regression.rs @@ -0,0 +1,220 @@ +#[cfg(test)] +mod regression_tests { + use super::*; + use serde_json::json; + use std::sync::Arc; + use parking_lot::RwLock; + use std::collections::HashMap; + use std::time::{SystemTime, UNIX_EPOCH}; + + #[test] + fn test_insert_get_flow() { + let db = Arc::new(RwLock::new(HashMap::new())); + let server = FutriixServer { + db: db.clone(), + time_series_db: Arc::new(RwLock::new(HashMap::new())), + command_queue: Arc::new(SegQueue::new()), + transactions: Arc::new(RwLock::new(HashMap::new())), + indexes: Arc::new(RwLock::new(HashMap::new())), + replication_enabled: false, + peer_nodes: Vec::new(), + sync_interval: 1000, + last_sync_timestamp: Arc::new(RwLock::new(0)), + command_history: Arc::new(RwLock::new(Vec::new())), + next_tx_id: Arc::new(RwLock::new(1)), + }; + + // Тест базовой вставки и получения + let insert_cmd = Command::Insert { + key: "test_key".to_string(), + value: json!({"field": "value"}), + }; + let response = server.process_command_internal(insert_cmd); + assert!(matches!(response, Response::Success(None))); + + let get_cmd = Command::Get { + key: "test_key".to_string(), + version: None, + }; + let response = server.process_command_internal(get_cmd); + assert!(matches!(response, Response::Success(Some(_)))); + + if let Response::Success(Some(value)) = response { + assert_eq!(value, json!({"field": "value"})); + } + + // Тест обновления + let update_cmd = Command::Update { + key: "test_key".to_string(), + value: json!({"field": "new_value"}), + }; + let response = server.process_command_internal(update_cmd); + assert!(matches!(response, Response::Success(None))); + + // Тест получения конкретной версии + let get_cmd = Command::Get { + key: "test_key".to_string(), + version: Some(0), + }; + let response = server.process_command_internal(get_cmd); + assert!(matches!(response, Response::Success(Some(_)))); + + if let Response::Success(Some(value)) = response { + assert_eq!(value, json!({"field": "value"})); + } + + // Тест удаления + let delete_cmd = Command::Delete { + key: "test_key".to_string(), + }; + let response = server.process_command_internal(delete_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что данные действительно удалены + let get_cmd = Command::Get { + key: "test_key".to_string(), + version: None, + }; + let response = server.process_command_internal(get_cmd); + assert!(matches!(response, Response::Error(_))); + } + + #[test] + fn test_transaction_flow() { + let db = Arc::new(RwLock::new(HashMap::new())); + let server = FutriixServer { + db: db.clone(), + time_series_db: Arc::new(RwLock::new(HashMap::new())), + command_queue: Arc::new(SegQueue::new()), + transactions: Arc::new(RwLock::new(HashMap::new())), + indexes: Arc::new(RwLock::new(HashMap::new())), + replication_enabled: false, + peer_nodes: Vec::new(), + sync_interval: 1000, + last_sync_timestamp: Arc::new(RwLock::new(0)), + command_history: Arc::new(RwLock::new(Vec::new())), + next_tx_id: Arc::new(RwLock::new(1)), + }; + + // Начало транзакции + let begin_cmd = Command::BeginTransaction; + let response = server.process_command_internal(begin_cmd); + let tx_id = if let Response::Success(Some(id)) = response { + id.as_u64().unwrap() + } else { + panic!("Transaction begin failed"); + }; + + // Вставка в транзакции + let insert_cmd = Command::Insert { + key: "tx_key".to_string(), + value: json!({"tx": "data"}), + }; + let response = server.process_command_internal(insert_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что данные не видны вне транзакции + let get_cmd = Command::Get { + key: "tx_key".to_string(), + version: None, + }; + let response = server.process_command_internal(get_cmd); + assert!(matches!(response, Response::Error(_))); + + // Коммит транзакции + let commit_cmd = Command::CommitTransaction(tx_id); + let response = server.process_command_internal(commit_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что данные теперь видны + let get_cmd = Command::Get { + key: "tx_key".to_string(), + version: None, + }; + let response = server.process_command_internal(get_cmd); + assert!(matches!(response, Response::Success(Some(_)))); + + // Тест отката транзакции + let begin_cmd = Command::BeginTransaction; + let response = server.process_command_internal(begin_cmd); + let tx_id = if let Response::Success(Some(id)) = response { + id.as_u64().unwrap() + } else { + panic!("Transaction begin failed"); + }; + + let insert_cmd = Command::Insert { + key: "rollback_key".to_string(), + value: json!({"should": "not_exist"}), + }; + let response = server.process_command_internal(insert_cmd); + assert!(matches!(response, Response::Success(None))); + + let rollback_cmd = Command::RollbackTransaction(tx_id); + let response = server.process_command_internal(rollback_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что данные не сохранились + let get_cmd = Command::Get { + key: "rollback_key".to_string(), + version: None, + }; + let response = server.process_command_internal(get_cmd); + assert!(matches!(response, Response::Error(_))); + } + + #[test] + fn test_time_series_operations() { + let db = Arc::new(RwLock::new(HashMap::new())); + let server = FutriixServer { + db: db.clone(), + time_series_db: Arc::new(RwLock::new(HashMap::new())), + command_queue: Arc::new(SegQueue::new()), + transactions: Arc::new(RwLock::new(HashMap::new())), + indexes: Arc::new(RwLock::new(HashMap::new())), + replication_enabled: false, + peer_nodes: Vec::new(), + sync_interval: 1000, + last_sync_timestamp: Arc::new(RwLock::new(0)), + command_history: Arc::new(RwLock::new(Vec::new())), + next_tx_id: Arc::new(RwLock::new(1)), + }; + + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_millis(); + + // Вставка временных рядов + let ts_insert = Command::TimeSeriesInsert { + key: "ts_data".to_string(), + value: json!({"temp": 25}), + timestamp: Some(now), + }; + let response = server.process_command_internal(ts_insert); + assert!(matches!(response, Response::Success(None))); + + // Вставка без явного timestamp + let ts_insert = Command::TimeSeriesInsert { + key: "ts_data".to_string(), + value: json!({"temp": 26}), + timestamp: None, + }; + let response = server.process_command_internal(ts_insert); + assert!(matches!(response, Response::Success(None))); + + // Запрос временных рядов + let ts_query = Command::TimeSeriesQuery { + key: "ts_data".to_string(), + start: now - 1000, + end: now + 1000, + }; + let response = server.process_command_internal(ts_query); + assert!(matches!(response, Response::TimeSeriesData(_))); + + if let Response::TimeSeriesData(data) = response { + assert!(data.len() >= 1); + assert_eq!(data[0].1, json!({"temp": 25})); + } + } +} diff --git a/tests/cfg test mod smoke tests.rs b/tests/cfg test mod smoke tests.rs new file mode 100644 index 0000000..d402538 --- /dev/null +++ b/tests/cfg test mod smoke tests.rs @@ -0,0 +1,157 @@ +#[cfg(test)] +mod smoke_tests { + use super::*; + use serde_json::json; + use tokio::net::TcpStream; + use tokio::io::{AsyncReadExt, AsyncWriteExt}; + use std::time::Duration; + use tokio::time; + + #[tokio::test] + async fn test_server_startup() { + // Запуск сервера в фоновом режиме + let config: Value = toml::from_str(r#" + [server] + ip = "127.0.0.1" + port = 9090 + log_path = "smoke_test.log" + + [client] + ip = "127.0.0.1" + port = 9090 + + [replication] + enabled = false + peer_nodes = [] + sync_interval = 1000 + + [http_api] + enabled = false + port = 9091 + "#).unwrap(); + + let server = FutriixServer::new(&config); + let addr = "127.0.0.1:9090".to_string(); + + tokio::spawn(async move { + if let Err(e) = server.run(&addr).await { + eprintln!("Server error: {}", e); + } + }); + + // Даем серверу время на запуск + time::sleep(Duration::from_millis(100)).await; + + // Проверка подключения + match TcpStream::connect("127.0.0.1:9090").await { + Ok(_) => println!("Server is running"), + Err(e) => panic!("Failed to connect to server: {}", e), + } + } + + #[tokio::test] + async fn test_basic_operations() { + let config: Value = toml::from_str(r#" + [server] + ip = "127.0.0.1" + port = 9092 + log_path = "smoke_test_ops.log" + + [client] + ip = "127.0.0.1" + port = 9092 + + [replication] + enabled = false + peer_nodes = [] + sync_interval = 1000 + + [http_api] + enabled = false + port = 9093 + "#).unwrap(); + + let server = FutriixServer::new(&config); + let addr = "127.0.0.1:9092".to_string(); + + tokio::spawn(async move { + if let Err(e) = server.run(&addr).await { + eprintln!("Server error: {}", e); + } + }); + + // Даем серверу время на запуск + time::sleep(Duration::from_millis(100)).await; + + // Подключаемся к серверу + let mut stream = TcpStream::connect("127.0.0.1:9092").await.unwrap(); + + // Тест вставки + let insert_cmd = Command::Insert { + key: "smoke_key".to_string(), + value: json!({"test": "value"}), + }; + send_command(&mut stream, &insert_cmd).await.unwrap(); + + // Тест получения + let get_cmd = Command::Get { + key: "smoke_key".to_string(), + version: None, + }; + let response = send_command(&mut stream, &get_cmd).await.unwrap(); + assert!(matches!(response, Response::Success(Some(_)))); + + if let Response::Success(Some(value)) = response { + assert_eq!(value, json!({"test": "value"})); + } + + // Тест обновления + let update_cmd = Command::Update { + key: "smoke_key".to_string(), + value: json!({"test": "updated"}), + }; + send_command(&mut stream, &update_cmd).await.unwrap(); + + // Проверка обновления + let get_cmd = Command::Get { + key: "smoke_key".to_string(), + version: None, + }; + let response = send_command(&mut stream, &get_cmd).await.unwrap(); + assert!(matches!(response, Response::Success(Some(_)))); + + if let Response::Success(Some(value)) = response { + assert_eq!(value, json!({"test": "updated"})); + } + + // Тест удаления + let delete_cmd = Command::Delete { + key: "smoke_key".to_string(), + }; + send_command(&mut stream, &delete_cmd).await.unwrap(); + + // Проверка удаления + let get_cmd = Command::Get { + key: "smoke_key".to_string(), + version: None, + }; + let response = send_command(&mut stream, &get_cmd).await.unwrap(); + assert!(matches!(response, Response::Error(_))); + } + + async fn send_command(stream: &mut TcpStream, cmd: &Command) -> Result> { + let cmd_bytes = rmp_serde::to_vec(cmd)?; + let len = cmd_bytes.len() as u32; + stream.write_all(&len.to_be_bytes()).await?; + stream.write_all(&cmd_bytes).await?; + + let mut len_buf = [0u8; 4]; + stream.read_exact(&mut len_buf).await?; + let len = u32::from_be_bytes(len_buf) as usize; + + let mut buf = vec![0u8; len]; + stream.read_exact(&mut buf).await?; + + Ok(rmp_serde::from_slice(&buf)?) + } +} diff --git a/tests/cfg test mod unit tests.rs b/tests/cfg test mod unit tests.rs new file mode 100644 index 0000000..b87c9db --- /dev/null +++ b/tests/cfg test mod unit tests.rs @@ -0,0 +1,158 @@ +#[cfg(test)] +mod unit_tests { + use super::*; + use serde_json::json; + use std::sync::Arc; + use parking_lot::RwLock; + use std::collections::HashMap; + + #[test] + fn test_index_operations() { + let indexes = Arc::new(RwLock::new(HashMap::new())); + let key = "test_key".to_string(); + let value = json!({"name": "test", "age": 30}); + + // Создание индекса + let create_cmd = Command::CreateIndex { + field: "name".to_string(), + }; + let server = create_test_server(); + let response = server.process_command_internal(create_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что индекс создан + { + let indexes = server.indexes.read(); + assert!(indexes.contains_key("name")); + } + + // Вставка данных для индексации + let insert_cmd = Command::Insert { + key: key.clone(), + value: value.clone(), + }; + let response = server.process_command_internal(insert_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что данные проиндексированы + { + let indexes = server.indexes.read(); + let name_index = indexes.get("name").unwrap(); + assert!(name_index.contains_key(&json!("test"))); + assert_eq!(name_index[&json!("test")], vec!["test_key"]); + } + + // Удаление индекса + let drop_cmd = Command::DropIndex { + field: "name".to_string(), + }; + let response = server.process_command_internal(drop_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что индекс удален + { + let indexes = server.indexes.read(); + assert!(!indexes.contains_key("name")); + } + } + + #[test] + fn test_backup_operations() { + let server = create_test_server(); + let temp_path = "test_backup.json"; + + // Вставка тестовых данных + let insert_cmd = Command::Insert { + key: "backup_key".to_string(), + value: json!({"backup": "test"}), + }; + let response = server.process_command_internal(insert_cmd); + assert!(matches!(response, Response::Success(None))); + + // Создание бэкапа + let backup_cmd = Command::BackupCreate { + path: temp_path.to_string(), + }; + let response = server.process_command_internal(backup_cmd); + assert!(matches!(response, Response::BackupStatus { success: true, .. })); + + // Очистка базы + let delete_cmd = Command::Delete { + key: "backup_key".to_string(), + }; + let response = server.process_command_internal(delete_cmd); + assert!(matches!(response, Response::Success(None))); + + // Восстановление из бэкапа + let restore_cmd = Command::BackupRestore { + path: temp_path.to_string(), + }; + let response = server.process_command_internal(restore_cmd); + assert!(matches!(response, Response::BackupStatus { success: true, .. })); + + // Проверка что данные восстановились + let get_cmd = Command::Get { + key: "backup_key".to_string(), + version: None, + }; + let response = server.process_command_internal(get_cmd); + assert!(matches!(response, Response::Success(Some(_)))); + + if let Response::Success(Some(value)) = response { + assert_eq!(value, json!({"backup": "test"})); + } + + // Удаление временного файла + std::fs::remove_file(temp_path).unwrap(); + } + + #[test] + fn test_replication_commands() { + let server = create_test_server(); + + // Проверка статуса репликации + let status_cmd = Command::ReplicationStatus; + let response = server.process_command_internal(status_cmd); + assert!(matches!(response, Response::ReplicationStatus { .. })); + + // Добавление пира + let add_peer_cmd = Command::ReplicationAddPeer { + addr: "127.0.0.1:8081".to_string(), + }; + let response = server.process_command_internal(add_peer_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что пир добавился + if let Response::ReplicationStatus { peers, .. } = server.process_command_internal(Command::ReplicationStatus) { + assert!(peers.contains(&"127.0.0.1:8081".to_string())); + } + + // Удаление пира + let remove_peer_cmd = Command::ReplicationRemovePeer { + addr: "127.0.0.1:8081".to_string(), + }; + let response = server.process_command_internal(remove_peer_cmd); + assert!(matches!(response, Response::Success(None))); + + // Проверка что пир удалился + if let Response::ReplicationStatus { peers, .. } = server.process_command_internal(Command::ReplicationStatus) { + assert!(!peers.contains(&"127.0.0.1:8081".to_string())); + } + } + + fn create_test_server() -> FutriixServer { + FutriixServer { + db: Arc::new(RwLock::new(HashMap::new())), + time_series_db: Arc::new(RwLock::new(HashMap::new())), + command_queue: Arc::new(SegQueue::new()), + transactions: Arc::new(RwLock::new(HashMap::new())), + indexes: Arc::new(RwLock::new(HashMap::new())), + replication_enabled: false, + peer_nodes: Vec::new(), + sync_interval: 1000, + last_sync_timestamp: Arc::new(RwLock::new(0)), + command_history: Arc::new(RwLock::new(Vec::new())), + next_tx_id: Arc::new(RwLock::new(1)), + } + } +}