diff --git a/venv/main.py b/venv/main.py
index 194c91bae8bdf8ea787060f2e787198c3dfe2262..2308655849f882a74923471406f76ea5b96990ef 100644
--- a/venv/main.py
+++ b/venv/main.py
@@ -2,9 +2,12 @@ from fastapi import FastAPI, UploadFile, File, HTTPException
 from fastapi.responses import JSONResponse
 import pandas as pd
 from models.kmeans_model import clusterize_data
-
 app = FastAPI()
 
+@app.post("/cluster")
+async def print():
+    return {"ok"}
+
 @app.post("/clusters")
 async def process_file(file: UploadFile = File(...)):
     """
@@ -33,5 +36,8 @@ async def process_file(file: UploadFile = File(...)):
 
         # Возвращаем результат в формате JSON
         return JSONResponse(content=response)
+
+    except HTTPException as httpE:
+        return
     except Exception as e:
         raise HTTPException(status_code=500, detail=str(e))
\ No newline at end of file
diff --git a/venv/models/kmeans_model.py b/venv/models/kmeans_model.py
index e44cceb2ffb56d809ce401873801da62fde0c7d5..ae2113f80fb0a019ac20eeec5e5a55d66cf85e4b 100644
--- a/venv/models/kmeans_model.py
+++ b/venv/models/kmeans_model.py
@@ -50,7 +50,7 @@ def preprocess_input_for_model(input_data: pd.DataFrame, preprocessing_artifacts
     return reduced_data
 
 
-def clusterize_data(input_df:pd.DataFrame, preprocessing_artifacts_path='venv/models/preprocessing_artifacts.pkl'):
+def clusterize_data(input_df:pd.DataFrame, preprocessing_artifacts_path='models/preprocessing_artifacts.pkl'):
     """
     Predict clusters for new input data.
 
diff --git a/venv/requirements.txt b/venv/requirements.txt
index 1a0aa86bffab152ed476398ed9034e6b3cd4322e..daa18ba40f6fef9c69bcaa31e9571f94b78c535d 100644
--- a/venv/requirements.txt
+++ b/venv/requirements.txt
@@ -1,4 +1,5 @@
 pandas
 joblib
 fastapi
-scikit-learn
\ No newline at end of file
+scikit-learn
+uvicorn
\ No newline at end of file
diff --git a/venv/test_model.py b/venv/test_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..52340912c300ad3bef2338bc83def27ba207c222
--- /dev/null
+++ b/venv/test_model.py
@@ -0,0 +1,91 @@
+import pytest
+import pandas as pd
+import numpy as np
+import joblib
+from unittest.mock import MagicMock
+from models.kmeans_model import clusterize_data, preprocess_input_for_model
+
+
+@pytest.fixture
+def preprocessing_artifacts():
+    scaler = MagicMock()
+    scaler.transform = lambda x: x
+    pca = MagicMock()
+    pca.n_components_ = 2
+    pca.transform = lambda x: np.array([[1, 2]] * len(x))
+    kmeans = MagicMock()
+    kmeans.cluster_centers_ = np.array([[1, 2], [3, 4]])
+    kmeans.predict = lambda x: [0] * len(x)
+    artifacts = {
+        'scaler': scaler,
+        'pca': pca,
+        'kmeans': kmeans,
+        'encoded_columns': ['Monthly Revenue', 'Age', 'Device_A', 'Device_B']
+    }
+    return artifacts
+
+@pytest.fixture
+def input_data():
+    return pd.DataFrame({
+        'Monthly Revenue': [100, 200],
+        'Age': [30, 40],
+        'Device': ['A', 'B']
+    })
+
+def test_empty_dataframe(preprocessing_artifacts, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    empty_df = pd.DataFrame()
+    with pytest.raises(KeyError, match="Отсутствуют обязательные колонки в данных"):
+        preprocess_input_for_model(empty_df)
+
+def test_preprocess_input_missing_device_columns(preprocessing_artifacts, input_data, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    input_data_missing = input_data.drop(columns=['Device'])
+    with pytest.raises(KeyError, match="Отсутствуют обязательные колонки в данных"):
+        preprocess_input_for_model(input_data_missing)
+
+def test_preprocess_input_missing_age_columns(preprocessing_artifacts, input_data, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    input_data_missing = input_data.drop(columns=['Age'])
+    with pytest.raises(KeyError, match="Отсутствуют обязательные колонки в данных"):
+        preprocess_input_for_model(input_data_missing)
+
+def test_preprocess_input_missing_monthly_revenue_columns(preprocessing_artifacts, input_data, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    input_data_missing = input_data.drop(columns=['Age'])
+    with pytest.raises(KeyError, match="Отсутствуют обязательные колонки в данных"):
+        preprocess_input_for_model(input_data_missing)
+
+def test_preprocess_input_monthly_revenue_invalid_dtype(preprocessing_artifacts, input_data, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    input_data['Monthly Revenue'] = ['100', '200']
+    with pytest.raises(ValueError, match="Колонки 'Monthly Revenue' и 'Age' должны содержать числовые значения"):
+        preprocess_input_for_model(input_data)
+
+def test_preprocess_input_age_invalid_dtype(preprocessing_artifacts, input_data, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    input_data['Age'] = ['34', '20']
+    with pytest.raises(ValueError, match="Колонки 'Monthly Revenue' и 'Age' должны содержать числовые значения"):
+        preprocess_input_for_model(input_data)
+
+def test_pca_component_count(preprocessing_artifacts, input_data, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    result = preprocess_input_for_model(input_data)
+    expected_components = preprocessing_artifacts['pca'].n_components_
+    assert result.shape[1] == expected_components
+
+def test_missing_artifact_file(input_data):
+    with pytest.raises(FileNotFoundError, match="Не удалось найти файл артефактов"):
+        preprocess_input_for_model(input_data, 'sdfsd.pkl')
+
+def test_artifacts_missing_keys(preprocessing_artifacts, input_data, monkeypatch):
+    del preprocessing_artifacts['pca']
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    with pytest.raises(KeyError, match="В артефактах отсутствуют ключи"):
+        preprocess_input_for_model(input_data)
+
+def test_clusterize_empty_dataframe(preprocessing_artifacts, monkeypatch):
+    monkeypatch.setattr(joblib, 'load', lambda _: preprocessing_artifacts)
+    empty_df = pd.DataFrame()
+    with pytest.raises(KeyError, match="Отсутствуют обязательные колонки в данных"):
+        clusterize_data(empty_df)
\ No newline at end of file