Dr-Swopt 3 日 前
コミット
3720bdc18a
3 ファイル変更37 行追加55 行削除
  1. BIN
      YOLOv5-Detection.pt
  2. 19 19
      demo_app.py
  3. 18 36
      src/api/main.py

BIN
YOLOv5-Detection.pt


+ 19 - 19
demo_app.py

@@ -73,14 +73,15 @@ def show_tech_guide():
     """)
 
     st.write("---")
-    st.write("### 🧠 4. Model Benchmarking: YOLO26 vs YOLOv5")
-    st.write("""
-    We have included **YOLOv5** as an industry-standard baseline to validate the performance of our custom **YOLO26** model.
+    st.markdown("""
+    Your detection environment is powered by **YOLO26**, a custom architectural fork designed for zero-latency industrial sorting.
     
-    **Key Comparison Points:**
-    - **NMS-Free Architecture**: YOLO26 is designed to be NMS-Free, meaning it doesn't require a secondary 'cleaning' step to remove overlapping boxes, making it more efficient in post-processing.
-    - **Accuracy vs. Latency**: While YOLOv5 is a robust general-purpose model, YOLO26 is fine-tuned specifically for Palm Oil FFB features, providing superior ripeness grading.
-    - **Real-Time Efficiency**: By comparing the 'Inference Speed' and 'Post-Processing' metrics, you can see how the architectural differences impact real-world performance.
+    ### ⚡ Performance Comparison
+    | Feature | YOLO26 (ONNX) | YOLO26 (Native) |
+    | :--- | :--- | :--- |
+    | **Coordinate System** | Normalized (0.0 - 1.0) | Absolute (Pixels) |
+    | **Primary Use Case** | Real-time Edge Sorting | High-Resolution Auditing |
+    | **Post-Processing** | None (NMS-Free) | Standard NMS |
     """)
 
 # --- 1. Global Backend Check ---
@@ -169,19 +170,21 @@ st.sidebar.slider(
 st.sidebar.markdown("---")
 # Inference Engine
 engine_choice = st.sidebar.selectbox(
-    "Select Model Engine",
-    ["YOLO26 (PyTorch - Native)", "YOLO26 (ONNX - High Speed)", "YOLOv5 (Benchmarking Mode)"],
+    "Select Model Engine:",
+    ["YOLO26 (ONNX - High Speed)", "YOLO26 (PyTorch - Native)"],
     index=0,
-    help="ONNX is optimized for latency. PyTorch provides native object handling. YOLOv5 is the benchmark baseline.",
     on_change=reset_all_analysis # Clear canvas on engine switch
 )
+
+# Map selection to internal labels
+engine_map = {
+    "YOLO26 (ONNX - High Speed)": "onnx",
+    "YOLO26 (PyTorch - Native)": "pytorch"
+}
+
 st.sidebar.markdown("---")
-if "ONNX" in engine_choice:
-    model_type = "onnx"
-elif "YOLOv5" in engine_choice:
-    model_type = "yolov5"
-else:
-    model_type = "pytorch"
+model_type = engine_map[engine_choice]
+
 
 if st.sidebar.button("❓ How to read results?", icon="📘", width='stretch'):
     show_tech_guide()
@@ -414,9 +417,6 @@ with tab1:
                 res = requests.post(f"{API_BASE_URL}/analyze", files=files, data=payload)
                 if res.status_code == 200:
                     st.session_state.last_detection = res.json()
-                    detections = st.session_state.last_detection.get('detections', [])
-                    if model_type == "yolov5" and not detections:
-                        st.warning("⚠️ YOLOv5 Benchmarking model failed to load on the backend. Results are empty.")
                     st.rerun() # Refresh to show results immediately
                 else:
                     st.error(f"Detection Failed: {res.text}")

+ 18 - 36
src/api/main.py

@@ -65,12 +65,6 @@ class ModelManager:
         self.onnx_session = ort.InferenceSession(onnx_path)
         self.onnx_input_name = self.onnx_session.get_inputs()[0].name
         self.pt_model = YOLO(pt_path)
-        try:
-            self.v5_model = YOLO('YOLOv5-Detection.pt')
-            print("YOLOv5 model loaded successfully.")
-        except Exception as e:
-            print(f"Warning: YOLOv5-Detection.pt could not be loaded via Ultralytics (Compatibility issue). Details: {e}")
-            self.v5_model = None
         self.class_names = self.pt_model.names
 
     def preprocess_onnx(self, img: Image.Image):
@@ -147,33 +141,6 @@ class ModelManager:
         raw_snippet = results[0].boxes.data[:5].tolist() if len(results[0].boxes) > 0 else []
         return detections, raw_snippet, inference_ms
 
-    def run_v5_inference(self, img: Image.Image, conf_threshold: float):
-        if self.v5_model is None:
-            # Fallback for benchmarking if model is missing
-            print("Logic: Skipping YOLOv5 inference - model not loaded.")
-            return [], [], 0.0
-
-        import time
-        start_inf = time.perf_counter()
-        results = self.v5_model(img, conf=conf_threshold, verbose=False)
-        end_inf = time.perf_counter()
-        inference_ms = (end_inf - start_inf) * 1000
-
-        detections = []
-        for i, box in enumerate(results[0].boxes):
-            class_id = int(box.cls)
-            class_name = self.class_names.get(class_id, "Unknown")
-            detections.append({
-                "bunch_id": i + 1,
-                "class": class_name,
-                "confidence": round(float(box.conf), 2),
-                "is_health_alert": class_name in ["Abnormal", "Empty_Bunch"],
-                "box": box.xyxy.tolist()[0]
-            })
-        
-        # Extract snippet from results (simulating raw output)
-        raw_snippet = results[0].boxes.data[:5].tolist() if len(results[0].boxes) > 0 else []
-        return detections, raw_snippet, inference_ms
 
 model_manager = ModelManager(onnx_path='best.onnx', pt_path='best.pt')
 
@@ -191,7 +158,16 @@ repo = MongoPalmOilRepository(
     db_name=os.getenv("DB_NAME", "palm_oil_db"),
     collection_name=os.getenv("COLLECTION_NAME", "ffb_records")
 )
-repo.ensure_indexes()
+
+db_connected = False
+try:
+    print("Connecting to MongoDB Atlas...")
+    repo.ensure_indexes()
+    db_connected = True
+    print("MongoDB Atlas Connected.")
+except Exception as e:
+    print(f"Warning: Could not connect to MongoDB Atlas (Timeout). Cloud archival will be disabled. Details: {e}")
+
 analyze_use_case = AnalyzeBunchUseCase(vision_service, repo)
 analyze_batch_use_case = AnalyzeBatchUseCase(vision_service, repo)
 search_use_case = SearchSimilarUseCase(vision_service, repo)
@@ -228,8 +204,6 @@ async def analyze_with_health_metrics(file: UploadFile = File(...), model_type:
     # Select Inference Engine
     if model_type == "pytorch":
         detections, raw_sample, inference_ms = model_manager.run_pytorch_inference(img, current_conf)
-    elif model_type == "yolov5":
-        detections, raw_sample, inference_ms = model_manager.run_v5_inference(img, current_conf)
     else:
         detections, raw_sample, inference_ms = model_manager.run_onnx_inference(img, current_conf)
     
@@ -275,6 +249,8 @@ async def analyze_with_health_metrics(file: UploadFile = File(...), model_type:
 @app.post("/vectorize_and_store")
 async def vectorize_and_store(file: UploadFile = File(...), detection_data: str = Form(...)):
     """Cloud-dependent. Requires active billing."""
+    if not db_connected:
+        return {"status": "error", "message": "Cloud Archival is currently unavailable (Database Offline)."}
     import json
     try:
         primary_detection = json.loads(detection_data)
@@ -307,6 +283,10 @@ async def vectorize_and_store(file: UploadFile = File(...), detection_data: str
 @app.post("/process_batch")
 async def process_batch(files: List[UploadFile] = File(...), model_type: str = Form("onnx")):
     """Handles multiple images: Detect -> Vectorize -> Store."""
+    if not db_connected:
+        # We could still do detection locally, but the prompt says 'Detect -> Vectorize -> Store'
+        # For simplicity in this demo, we'll block it if DB is offline.
+        return {"status": "error", "message": "Batch Processing (Cloud Archival) is currently unavailable (Database Offline)."}
     batch_results = []
     temp_files = []
 
@@ -397,6 +377,8 @@ async def search_hybrid(
     limit: int = Form(3)
 ):
     """Hybrid Search: Supports Visual Similarity and Natural Language Search."""
+    if not db_connected:
+        return {"status": "error", "message": "Semantic Search is currently unavailable (Database Offline)."}
     temp_path = None
     try:
         try: