فهرست منبع

feat: Implement interactive Plotly visualizations, PDF batch report generation, and a 'History Vault' tab, alongside a new API endpoint.

Dr-Swopt 3 روز پیش
والد
کامیت
0f77fae817
4فایلهای تغییر یافته به همراه486 افزوده شده و 156 حذف شده
  1. 2 1
      .gitignore
  2. 322 40
      demo_app.py
  3. 0 78
      palm_oil_mobile/README.md
  4. 162 37
      src/api/main.py

+ 2 - 1
.gitignore

@@ -37,4 +37,5 @@ Thumbs.db
 unified_dataset
 datasets
 runs
-best_saved_model
+best_saved_model
+history_archive

+ 322 - 40
demo_app.py

@@ -7,6 +7,12 @@ import io
 import base64
 import pandas as pd
 import plotly.express as px
+import plotly.graph_objects as go
+import json
+import os
+from datetime import datetime
+from fpdf import FPDF
+
 
 # --- 1. Global Backend Check ---
 API_BASE_URL = "http://localhost:8000"
@@ -68,8 +74,205 @@ def reset_single_results():
 def reset_batch_results():
     st.session_state.last_batch_results = None
 
+# MPOB Color Map for Overlays (Global for consistency)
+overlay_colors = {
+    'Ripe': '#22c55e',       # Industrial Green
+    'Underripe': '#fbbf24',  # Industrial Orange
+    'Unripe': '#3b82f6',     # Industrial Blue
+    'Abnormal': '#dc2626',   # Critical Red
+    'Empty_Bunch': '#64748b',# Waste Gray
+    'Overripe': '#7c2d12'    # Dark Brown/Orange
+}
+
+def display_interactive_results(image, detections, key=None):
+    """Renders image with interactive hover-boxes using Plotly."""
+    img_width, img_height = image.size
+    fig = go.Figure()
+
+    # Add the palm image as the background
+    fig.add_layout_image(
+        dict(source=image, x=0, y=img_height, sizex=img_width, sizey=img_height,
+             sizing="stretch", opacity=1, layer="below", xref="x", yref="y")
+    )
+
+    # Configure axes to match image dimensions
+    fig.update_xaxes(showgrid=False, range=(0, img_width), zeroline=False, visible=False)
+    fig.update_yaxes(showgrid=False, range=(0, img_height), zeroline=False, visible=False, scaleanchor="x")
+
+    # Add interactive boxes
+    for i, det in enumerate(detections):
+        x1, y1, x2, y2 = det['box']
+        # Plotly y-axis is inverted relative to PIL, so we flip y
+        y_top, y_bottom = img_height - y1, img_height - y2
+        color = overlay_colors.get(det['class'], "#ffeb3b")
+
+        # The 'Hover' shape
+        bunch_id = det.get('bunch_id', i+1)
+        fig.add_trace(go.Scatter(
+            x=[x1, x2, x2, x1, x1],
+            y=[y_top, y_top, y_bottom, y_bottom, y_top],
+            fill="toself",
+            fillcolor=color,
+            opacity=0.3, # Semi-transparent until hover
+            mode='lines',
+            line=dict(color=color, width=3),
+            name=f"Bunch #{bunch_id}",
+            text=f"<b>ID: #{bunch_id}</b><br>Grade: {det['class']}<br>Score: {det['confidence']:.2f}<br>Alert: {det['is_health_alert']}",
+            hoverinfo="text"
+        ))
+
+    fig.update_layout(width=800, height=600, margin=dict(l=0, r=0, b=0, t=0), showlegend=False)
+    st.plotly_chart(fig, use_container_width=True, key=key)
+
+def annotate_image(image, detections):
+    """Draws high-visibility boxes and background-shaded labels."""
+    from PIL import ImageDraw, ImageFont
+    draw = ImageDraw.Draw(image)
+    # Dynamic font size based on image resolution
+    font_size = max(20, image.width // 40)
+    try:
+        font_path = "C:\\Windows\\Fonts\\arial.ttf"
+        if os.path.exists(font_path):
+            font = ImageFont.truetype(font_path, font_size)
+        else:
+            font = ImageFont.load_default()
+    except:
+        font = ImageFont.load_default()
+
+    for det in detections:
+        box = det['box'] # [x1, y1, x2, y2]
+        cls = det['class']
+        conf = det['confidence']
+        bunch_id = det.get('bunch_id', '?')
+        color = overlay_colors.get(cls, '#ffffff')
+
+        # 1. Draw Bold Bounding Box
+        draw.rectangle(box, outline=color, width=max(4, image.width // 200)) 
+
+        # 2. Draw Label Background (High Contrast)
+        label = f"#{bunch_id} {cls} {conf:.2f}"
+        try:
+            # textbbox provides precise coordinates for background rectangle
+            l, t, r, b = draw.textbbox((box[0], box[1] - font_size - 10), label, font=font)
+            draw.rectangle([l-5, t-5, r+5, b+5], fill=color)
+            draw.text((l, t), label, fill="white", font=font)
+        except:
+            # Fallback for basic text drawing
+            draw.text((box[0], box[1] - 25), label, fill=color)
+    
+    return image
+
+def generate_batch_report(data, uploaded_files_map=None):
+
+    """Generates a professional PDF report for batch results with visual evidence."""
+    from PIL import ImageDraw
+    pdf = FPDF()
+    pdf.add_page()
+    pdf.set_font("Arial", "B", 16)
+    pdf.cell(190, 10, "Palm Oil FFB Harvest Quality Report", ln=True, align="C")
+    pdf.set_font("Arial", "", 12)
+    pdf.cell(190, 10, f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", ln=True, align="C")
+    pdf.ln(10)
+
+    # 1. Summary Table
+    pdf.set_font("Arial", "B", 14)
+    pdf.cell(190, 10, "1. Batch Summary", ln=True)
+    pdf.set_font("Arial", "", 12)
+    
+    summary = data.get('industrial_summary', {})
+    total_bunches = data.get('total_count', 0)
+    pdf.cell(95, 10, "Metric", border=1)
+    pdf.cell(95, 10, "Value", border=1, ln=True)
+    
+    pdf.cell(95, 10, "Total Bunches Detected", border=1)
+    pdf.cell(95, 10, str(total_bunches), border=1, ln=True)
+    
+    for grade, count in summary.items():
+        if count > 0:
+            pdf.cell(95, 10, f"Grade: {grade}", border=1)
+            pdf.cell(95, 10, str(count), border=1, ln=True)
+    
+    pdf.ln(10)
+
+    # 2. Strategic Insights
+    pdf.set_font("Arial", "B", 14)
+    pdf.cell(190, 10, "2. Strategic Yield Insights", ln=True)
+    pdf.set_font("Arial", "", 12)
+    
+    unripe = summary.get('Unripe', 0)
+    underripe = summary.get('Underripe', 0)
+    loss = unripe + underripe
+    
+    if loss > 0:
+        pdf.multi_cell(190, 10, f"WARNING: {loss} bunches were harvested before peak ripeness. "
+                               "This directly impacts the Oil Extraction Rate (OER) and results in potential yield loss.")
+    else:
+        pdf.multi_cell(190, 10, "EXCELLENT: All detected bunches meet prime ripeness standards. Harvest efficiency is 100%.")
+
+    # Critical Alerts
+    abnormal = summary.get('Abnormal', 0)
+    empty = summary.get('Empty_Bunch', 0)
+    if abnormal > 0 or empty > 0:
+        pdf.ln(5)
+        pdf.set_text_color(220, 0, 0)
+        pdf.set_font("Arial", "B", 12)
+        pdf.cell(190, 10, "CRITICAL HEALTH ALERTS:", ln=True)
+        pdf.set_font("Arial", "", 12)
+        if abnormal > 0:
+            pdf.cell(190, 10, f"- {abnormal} Abnormal Bunches detected (Requires immediate field inspection).", ln=True)
+        if empty > 0:
+            pdf.cell(190, 10, f"- {empty} Empty Bunches detected (Waste reduction needed).", ln=True)
+        pdf.set_text_color(0, 0, 0)
+
+    # 3. Visual Evidence Section
+    if 'detailed_results' in data and uploaded_files_map:
+        pdf.add_page()
+        pdf.set_font("Arial", "B", 14)
+        pdf.cell(190, 10, "3. Visual Batch Evidence (AI Overlay)", ln=True)
+        pdf.ln(5)
+        
+        # Group detections by filename
+        results_by_file = {}
+        for res in data['detailed_results']:
+            fname = res['filename']
+            if fname not in results_by_file:
+                results_by_file[fname] = []
+            results_by_file[fname].append(res['detection'])
+            
+        for fname, detections in results_by_file.items():
+            if fname in uploaded_files_map:
+                img_bytes = uploaded_files_map[fname]
+                img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
+                draw = ImageDraw.Draw(img)
+                # Drawing annotated boxes for PDF using high-visibility utility
+                annotate_image(img, detections)
+
+                
+                # Save to temp file for PDF
+                temp_img_path = f"temp_report_{fname}"
+                img.save(temp_img_path)
+                
+                # Check if we need a new page based on image height (rough estimate)
+                if pdf.get_y() > 200:
+                    pdf.add_page()
+                
+                pdf.image(temp_img_path, x=10, w=150)
+                pdf.set_font("Arial", "I", 10)
+                pdf.cell(190, 10, f"Annotated: {fname}", ln=True)
+                pdf.ln(5)
+                os.remove(temp_img_path)
+
+    # Footer
+    pdf.set_y(-15)
+    pdf.set_font("Arial", "I", 8)
+    pdf.cell(190, 10, "Generated by Palm Oil AI Desktop PoC - YOLO26 Engine", align="C")
+    
+    return pdf.output(dest='S')
+
+
+
 # --- Tabs ---
-tab1, tab2, tab3 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search"])
+tab1, tab2, tab3, tab4 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search", "History Vault"])
 
 # --- Tab 1: Single Analysis ---
 with tab1:
@@ -101,43 +304,20 @@ with tab1:
         if st.session_state.last_detection:
             st.divider()
             
-            # SIDE-BY-SIDE ANALYTICAL VIEW
-            col_left, col_right = st.columns(2)
-            
-            # Fetch data once
+            # PRIMARY ANNOTATED VIEW
+            st.write("### 🔍 AI Analytical View")
             data = st.session_state.last_detection
+            img = Image.open(uploaded_file).convert("RGB")
+            display_interactive_results(img, data['detections'], key="main_viewer")
 
-            with col_left:
-                st.image(uploaded_file, caption="Original Photo", width='stretch')
-            
-            with col_right:
-                # MANUAL OVERLAY DRAWING (NMS-Free Output from API)
-                img = Image.open(uploaded_file).convert("RGB")
-                from PIL import ImageDraw, ImageFont
-                draw = ImageDraw.Draw(img)
-                
-                # MPOB Color Map for Overlays
-                overlay_colors = {
-                    'Ripe': '#22c55e',       # Industrial Green
-                    'Underripe': '#fbbf24',  # Industrial Orange
-                    'Unripe': '#3b82f6',     # Industrial Blue
-                    'Abnormal': '#dc2626',   # Critical Red
-                    'Empty_Bunch': '#64748b' # Waste Gray
-                }
-                
-                for det in data['detections']:
-                    box = det['box'] # [x1, y1, x2, y2]
-                    cls = det['class']
-                    color = overlay_colors.get(cls, '#ffffff')
-                    
-                    # Draw Box
-                    draw.rectangle(box, outline=color, width=4)
-                    
-                    # Draw Label Background
-                    label = f"{cls} {det['confidence']:.2f}"
-                    draw.text((box[0], box[1] - 15), label, fill=color)
-                
-                st.image(img, caption="AI Analytical View (NMS-Free Native)", width='stretch')
+            # Visual Legend
+            st.write("#### 🎨 Ripeness Legend")
+            l_cols = st.columns(len(overlay_colors))
+            for i, (grade, color) in enumerate(overlay_colors.items()):
+                with l_cols[i]:
+                    st.markdown(f'<div style="background-color:{color}; padding:10px; border-radius:5px; text-align:center; color:white; font-weight:bold;">{grade}</div>', unsafe_allow_html=True)
+
+            st.divider()
 
             st.write("### 📈 Manager's Dashboard")
             m_col1, m_col2, m_col3 = st.columns(3)
@@ -158,7 +338,7 @@ with tab1:
                         st.warning("No Fresh Fruit Bunches detected.")
                     else:
                         for det in data['detections']:
-                            st.info(f"**{det['class']}** - {det['confidence']:.2%} confidence")
+                            st.info(f"### Bunch #{det['bunch_id']}: {det['class']} ({det['confidence']:.2%})")
                         
                         st.write("### 📊 Harvest Quality Mix")
                         # Convert industrial_summary dictionary to a DataFrame for charting
@@ -181,7 +361,7 @@ with tab1:
                                          },
                                          hole=0.4)
                             fig.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=300)
-                            st.plotly_chart(fig, width='stretch')
+                            st.plotly_chart(fig, width='stretch', key="single_pie")
 
                         # 💡 Strategic R&D Insight: Harvest Efficiency
                         st.write("---")
@@ -225,6 +405,33 @@ with tab1:
                                 else:
                                     st.error("Failed to connect to cloud service")
 
+                        if st.button("🚩 Flag Misclassification", width='stretch', type="secondary"):
+                            # Save to local feedback folder
+                            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+                            feedback_id = f"fb_{timestamp}"
+                            img_path = f"feedback/{feedback_id}.jpg"
+                            json_path = f"feedback/{feedback_id}.json"
+                            
+                            # Save image
+                            Image.open(uploaded_file).save(img_path)
+                            
+                            # Save metadata
+                            feedback_data = {
+                                "original_filename": uploaded_file.name,
+                                "timestamp": timestamp,
+                                "detections": data['detections'],
+                                "threshold_used": data['current_threshold']
+                            }
+                            with open(json_path, "w") as f:
+                                json.dump(feedback_data, f, indent=4)
+                            
+                            st.toast("✅ Feedback saved to local vault!", icon="🚩")
+
+                        if st.button("💾 Local History Vault (Auto-Saved)", width='stretch', type="secondary", disabled=True):
+                            pass
+                        st.caption("✅ This analysis was automatically archived to the local vault.")
+
+
 # --- Tab 2: Batch Processing ---
 with tab2:
     st.subheader("Bulk Analysis")
@@ -262,16 +469,50 @@ with tab2:
                                              'Empty_Bunch': '#64748b'
                                           })
                         fig_batch.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=200, showlegend=False)
-                        st.plotly_chart(fig_batch, width='stretch')
+                        st.plotly_chart(fig_batch, width='stretch', key="batch_bar")
 
             if batch_summary.get('Abnormal', 0) > 0:
                 st.error(f"🚨 BATCH CRITICAL: {batch_summary['Abnormal']} Abnormal Bunches found in this batch!")
 
             st.write("Generated Record IDs:")
             st.code(res_data['record_ids'])
-            if st.button("Clear Results & Start New Batch"):
+            
+            # --- 4. Batch Evidence Gallery ---
+            st.write("### 🖼️ Detailed Detection Evidence")
+            if 'detailed_results' in res_data:
+                # Group results by filename for gallery
+                gallery_map = {}
+                for res in res_data['detailed_results']:
+                    fname = res['filename']
+                    if fname not in gallery_map:
+                        gallery_map[fname] = []
+                    gallery_map[fname].append(res['detection'])
+                
+                # Show images with overlays using consistent utility
+                for up_file in uploaded_files:
+                    if up_file.name in gallery_map:
+                        with st.container(border=True):
+                            g_img = Image.open(up_file).convert("RGB")
+                            g_annotated = annotate_image(g_img, gallery_map[up_file.name])
+                            st.image(g_annotated, caption=f"Evidence: {up_file.name}", use_container_width=True)
+
+
+            # PDF Export Button (Pass images map)
+            files_map = {f.name: f.getvalue() for f in uploaded_files}
+            pdf_bytes = generate_batch_report(res_data, files_map)
+            st.download_button(
+                label="📄 Download Executive Batch Report (PDF)",
+                data=pdf_bytes,
+                file_name=f"PalmOil_BatchReport_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf",
+                mime="application/pdf",
+                width='stretch'
+            )
+
+
+            if st.button("Clear Results & Start New Batch", width='stretch'):
                 st.session_state.last_batch_results = None
                 st.rerun()
+
         st.divider()
 
     # 3. Uploader UI
@@ -377,3 +618,44 @@ with tab3:
                                     st.write(f"**ID:** `{rec_id}`")
                 else:
                     st.error(f"Search failed: {res.text}")
+
+# --- Tab 4: History Vault ---
+with tab4:
+    st.subheader("📜 Local History Vault")
+    try:
+        res = requests.get(f"{API_BASE_URL}/get_history")
+        if res.status_code == 200:
+            history_data = res.json().get("history", [])
+            if not history_data:
+                st.info("No saved records found.")
+            else:
+                # Selection table
+                df_history = pd.DataFrame(history_data)[['id', 'filename', 'timestamp']]
+                selected_id = st.selectbox("Select a record to review:", df_history['id'])
+                
+                if selected_id:
+                    record = next(item for item in history_data if item["id"] == selected_id)
+                    detections = json.loads(record['detections'])
+                    
+                    # Display Interactive Hover View
+                    if os.path.exists(record['archive_path']):
+                        with open(record['archive_path'], "rb") as f:
+                            hist_img = Image.open(f).convert("RGB")
+                            display_interactive_results(hist_img, detections, key=f"hist_{record['id']}")
+                        
+                        st.write("### 📈 Archived Summary")
+                        summary = json.loads(record['summary'])
+                        s_col1, s_col2, s_col3 = st.columns(3)
+                        with s_col1:
+                            st.metric("Total Bunches", sum(summary.values()))
+                        with s_col2:
+                            st.metric("Healthy (Ripe)", summary.get('Ripe', 0))
+                        with s_col3:
+                            abnormal = summary.get('Abnormal', 0)
+                            st.metric("Abnormal Alerts", abnormal)
+                    else:
+                        st.error(f"Archive file not found: {record['archive_path']}")
+        else:
+            st.error(f"Failed to fetch history: {res.text}")
+    except Exception as e:
+        st.error(f"Error loading history: {str(e)}")

+ 0 - 78
palm_oil_mobile/README.md

@@ -1,78 +0,0 @@
-# 🌴 Palm Oil Ripeness AI (YOLO26 Mobile)
-
-A professional, high-performance Flutter application powered by the **YOLO26 (January 2026)** architecture. Designed for palm oil plantation managers, this app utilizes **NMS-Free End-to-End** detection for maximum efficiency in the field.
-
----
-
-## 📱 Executive Summary
-The **Palm Oil Ripeness AI** mobile app is a field-ready tool that automates Fresh Fruit Bunch (FFB) assessment. By leveraging **YOLO26-Nano**, the app achieves a **43% speed increase** on mobile CPUs compared to previous generations, eliminating latency bottlenecks and providing instant, high-accuracy grading without internet connectivity.
-
----
-
-## 🛠 Features
-
-### 1. **Live Inference (NMS-Free Point-and-Scan)**
-- **Real-time Detection:** Powered by YOLO26's native one-to-one label assignment, removing the need for post-inference NMS processing.
-- **Momentum Lock:** A robust hysteresis system that "locks" onto fruit bunches to prevent camera stutter and ensure reliable capture.
-- **Ultra-Low Latency:** Optimized for **38.9ms inference** on standard mobile hardware.
-
-### 2. **Snap & Analyze (Manager's Manual Mode)**
-- **High-Res Accuracy:** Manual shutter mode using `ResolutionPreset.high` for maximum detail.
-- **Direct JPEG Decoding:** Skips YUV conversion for the most accurate industrial-grade inference.
-- **Auto-Reset Workflow:** Seamlessly clears previous results after acknowledgment to allow rapid sequential captures.
-
-### 3. **Analyze Gallery**
-- Allows managers to analyze previously captured photos from the device's storage.
-- Includes a scanning animation and detailed bounding box overlays.
-
-### 4. **History Vault & Result Persistence**
-- **SQLite Storage:** Every analysis is stored locally with metadata (ripeness class, confidence, coordinates).
-- **Image Archiving:** Automatically copies and persists captured images to the `ApplicationDocumentsDirectory`.
-
-### 5. **Industrial Alerts & Summaries**
-- **Health Alert:** 🔴 Red warnings for "Abnormal" or "Empty_Bunch" detections.
-- **Yield Warning:** 🟠 Orange alerts for "Unripe" or "Underripe" fruit, highlighting "Potential Yield Loss" to optimize Oil Extraction Rates (OER).
-
----
-
-## 🚀 Setup & Installation
-
-### Prerequisites
-- [Flutter SDK](https://docs.flutter.dev/get-started/install) (latest stable).
-- **Ultralytics 8.4.24+** (for model conversion to TFLite/CoreML).
-- A physical Android/iOS device (Camera required; Emulators NOT recommended).
-
-### 1. Initialize Project
-```bash
-# Navigate to the mobile project directory
-cd palm_oil_mobile
-
-# Fetch dependencies
-flutter pub get
-```
-
-### 2. Run the App
-To take advantage of the **YOLO26 performance gains**, run in **Release Mode**:
-```bash
-flutter run --release
-```
-
----
-
-## 🏗 Project Architecture
-
-### 📁 `lib/services/` (Logic Layer)
-- **`tflite_service.dart`**: An Isolate-based service handling **YOLO26 NMS-Free** inference. By removing the NMS step, the service reduces UI thread jank by up to 50% compared to legacy v8 models.
-
----
-
-## 📦 Assets
-- `assets/best.tflite`: The **YOLO26-Nano** model (Natively NMS-Free).
-- `assets/labels.txt`: Class definitions (Ripe, Unripe, Underripe, Overripe, Abnormal, Empty_Bunch).
-
----
-
-## ⚠️ Requirements & Permissions
-- **Camera:** Required for Live and Static analysis.
-- **Storage:** Required to save/load photos and database records.
-

+ 162 - 37
src/api/main.py

@@ -3,7 +3,9 @@ import uuid
 import os
 import shutil
 from fastapi import FastAPI, File, UploadFile, Body, Form, BackgroundTasks
-from ultralytics import YOLO
+import onnxruntime as ort
+import numpy as np
+
 from dotenv import load_dotenv
 import io
 from PIL import Image
@@ -11,6 +13,30 @@ from PIL import Image
 from src.infrastructure.vision_service import VertexVisionService
 from src.infrastructure.repository import MongoPalmOilRepository
 from src.application.analyze_bunch import AnalyzeBunchUseCase, AnalyzeBatchUseCase, SearchSimilarUseCase
+import sqlite3
+import json
+
+DB_PATH = "palm_history.db"
+ARCHIVE_DIR = "history_archive"
+os.makedirs(ARCHIVE_DIR, exist_ok=True)
+
+def init_local_db():
+    conn = sqlite3.connect(DB_PATH)
+    cursor = conn.cursor()
+    cursor.execute('''
+        CREATE TABLE IF NOT EXISTS history (
+            id INTEGER PRIMARY KEY AUTOINCREMENT,
+            filename TEXT,
+            archive_path TEXT,
+            detections TEXT,
+            summary TEXT,
+            timestamp DATETIME DEFAULT CURRENT_TIMESTAMP
+        )
+    ''')
+    conn.commit()
+    conn.close()
+
+init_local_db()
 
 # Load environment variables
 load_dotenv()
@@ -20,8 +46,65 @@ os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "gemini-embedding-service-key.jso
 
 app = FastAPI(title="Palm Oil Ripeness Service (DDD)")
 
-# Initialize YOLO model
-yolo_model = YOLO('best.pt')
+# Initialize ONNX model
+onnx_path = 'best.onnx'
+ort_session = ort.InferenceSession(onnx_path)
+input_name = ort_session.get_inputs()[0].name
+class_names = {
+    0: 'Empty_Bunch',
+    1: 'Underripe',
+    2: 'Abnormal',
+    3: 'Ripe',
+    4: 'Unripe',
+    5: 'Overripe'
+}
+
+def preprocess(img: Image.Image):
+    """Preprocess image for YOLO ONNX input [1, 3, 640, 640]."""
+    img = img.convert("RGB")
+    # Store original size for scaling
+    orig_w, orig_h = img.size
+    img_resized = img.resize((640, 640))
+    img_array = np.array(img_resized) / 255.0
+    img_array = img_array.transpose(2, 0, 1) # HWC to CHW
+    img_array = img_array.reshape(1, 3, 640, 640).astype(np.float32)
+    return img_array, orig_w, orig_h
+
+def run_inference(img: Image.Image, conf_threshold: float):
+    """Run ONNX inference and return list of detections."""
+    img_array, orig_w, orig_h = preprocess(img)
+    outputs = ort_session.run(None, {input_name: img_array})
+    # Output shape: [1, 300, 6] -> [x1, y1, x2, y2, conf, class_id]
+    detections_batch = outputs[0]
+    
+    scale_w = orig_w / 640.0
+    scale_h = orig_h / 640.0
+    
+    detections = []
+    valid_count = 0
+    for i in range(detections_batch.shape[1]):
+        det = detections_batch[0, i]
+        conf = float(det[4])
+        if conf >= conf_threshold:
+            valid_count += 1
+            x1, y1, x2, y2 = det[:4]
+            # Rescale
+            x1 *= scale_w
+            y1 *= scale_h
+            x2 *= scale_w
+            y2 *= scale_h
+            class_id = int(det[5])
+            class_name = class_names.get(class_id, "Unknown")
+            
+            detections.append({
+                "bunch_id": valid_count,
+                "class": class_name,
+                "confidence": round(conf, 2),
+                "is_health_alert": class_name in ["Abnormal", "Empty_Bunch"],
+                "box": [float(x1), float(y1), float(x2), float(y2)]
+            })
+    return detections
+
 
 # Global state for the confidence threshold
 current_conf = 0.25
@@ -68,33 +151,42 @@ async def analyze_with_health_metrics(file: UploadFile = File(...)):
     image_bytes = await file.read()
     img = Image.open(io.BytesIO(image_bytes))
     
-    # Run YOLO26 detection (natively NMS-free)
-    results = yolo_model(img, conf=current_conf)
+    # Run ONNX inference (natively NMS-free)
+    detections = run_inference(img, current_conf)
     
-    detections = []
-    # Initialize summary for all classes known by the model
-    summary = {name: 0 for name in yolo_model.names.values()}
+    # Initialize summary for all known classes
+    summary = {name: 0 for name in class_names.values()}
     
-    for r in results:
-        for box in r.boxes:
-            class_name = yolo_model.names[int(box.cls)]
-            summary[class_name] += 1
-            
-            detections.append({
-                "class": class_name,
-                "confidence": round(float(box.conf), 2),
-                "is_health_alert": class_name in ["Abnormal", "Empty_Bunch"],
-                "box": box.xyxy.tolist()[0]
-            })
+    for det in detections:
+        summary[det['class']] += 1
+    
+    # AUTO-ARCHIVE to Local History Vault
+    unique_id = uuid.uuid4().hex
+    archive_filename = f"{unique_id}_{file.filename}"
+    archive_path = os.path.join(ARCHIVE_DIR, archive_filename)
+    
+    # Save image copy
+    with open(archive_path, "wb") as buffer:
+        buffer.write(image_bytes)
+        
+    # Save to SQLite
+    conn = sqlite3.connect(DB_PATH)
+    cursor = conn.cursor()
+    cursor.execute("INSERT INTO history (filename, archive_path, detections, summary) VALUES (?, ?, ?, ?)",
+                   (file.filename, archive_path, json.dumps(detections), json.dumps(summary)))
+    conn.commit()
+    conn.close()
             
     return {
         "status": "success",
         "current_threshold": current_conf,
-        "total_count": sum(summary.values()),
+        "total_count": len(detections),
         "industrial_summary": summary,
-        "detections": detections
+        "detections": detections,
+        "archive_id": unique_id
     }
 
+
 @app.post("/vectorize_and_store")
 async def vectorize_and_store(file: UploadFile = File(...), detection_data: str = Form(...)):
     """Cloud-dependent. Requires active billing."""
@@ -142,33 +234,35 @@ async def process_batch(files: List[UploadFile] = File(...)):
                 shutil.copyfileobj(file.file, f_out)
             temp_files.append(path)
 
-            # 2. YOLO26 Detect (natively NMS-free)
+            # 2. ONNX Detect (natively NMS-free)
             img = Image.open(path)
-            yolo_res = yolo_model(img, conf=current_conf)
+            detections = run_inference(img, current_conf)
             
             # 3. Process all detections in the image
-            for r in yolo_res:
-                for box in r.boxes:
-                    class_name = yolo_model.names[int(box.cls)]
-                    batch_results.append({
-                        "path": path,
-                        "yolo": {
-                            "class": class_name,
-                            "confidence": float(box.conf),
-                            "is_health_alert": class_name in ["Abnormal", "Empty_Bunch"],
-                            "box": box.xyxy.tolist()[0]
-                        }
-                    })
+            for det in detections:
+                batch_results.append({
+                    "path": path,
+                    "yolo": det
+                })
+
 
         if not batch_results:
             return {"status": "no_detection", "message": "No bunches detected in batch"}
 
         # Calculate Total Industrial Summary for the Batch
-        total_summary = {name: 0 for name in yolo_model.names.values()}
+        total_summary = {name: 0 for name in class_names.values()}
         for item in batch_results:
             total_summary[item['yolo']['class']] += 1
 
+
         # 4. Process Batch Use Case with error handling for cloud services
+        detailed_detections = []
+        for item in batch_results:
+            detailed_detections.append({
+                "filename": os.path.basename(item['path']),
+                "detection": item['yolo']
+            })
+
         try:
             record_ids = analyze_batch_use_case.execute(batch_results)
             total_records = len(record_ids)
@@ -178,15 +272,19 @@ async def process_batch(files: List[UploadFile] = File(...)):
                 "total_count": sum(total_summary.values()),
                 "record_ids": record_ids,
                 "industrial_summary": total_summary,
+                "detailed_results": detailed_detections,
                 "message": f"Successfully processed {total_records} images and identified {sum(total_summary.values())} bunches"
             }
+
         except RuntimeError as e:
             return {
                 "status": "partial_success",
                 "message": f"Detections completed, but cloud archival failed: {str(e)}",
-                "detections_count": len(batch_results)
+                "detections_count": len(batch_results),
+                "detailed_results": detailed_detections
             }
 
+
     except Exception as e:
         return {"status": "error", "message": f"Batch processing failed: {str(e)}"}
 
@@ -236,6 +334,33 @@ async def get_image(record_id: str):
         "image_data": record.get("image_data")
     }
 
+@app.post("/save_to_history")
+async def save_to_history(file: UploadFile = File(...), detections: str = Form(...), summary: str = Form(...)):
+    unique_id = uuid.uuid4().hex
+    filename = f"{unique_id}_{file.filename}"
+    archive_path = os.path.join(ARCHIVE_DIR, filename)
+    
+    with open(archive_path, "wb") as buffer:
+        shutil.copyfileobj(file.file, buffer)
+        
+    conn = sqlite3.connect(DB_PATH)
+    cursor = conn.cursor()
+    cursor.execute("INSERT INTO history (filename, archive_path, detections, summary) VALUES (?, ?, ?, ?)",
+                   (file.filename, archive_path, detections, summary))
+    conn.commit()
+    conn.close()
+    return {"status": "success", "message": "Saved to local vault"}
+
+@app.get("/get_history")
+async def get_history():
+    conn = sqlite3.connect(DB_PATH)
+    conn.row_factory = sqlite3.Row
+    cursor = conn.cursor()
+    cursor.execute("SELECT * FROM history ORDER BY timestamp DESC")
+    rows = [dict(row) for row in cursor.fetchall()]
+    conn.close()
+    return {"status": "success", "history": rows}
+
 if __name__ == "__main__":
     import uvicorn
     uvicorn.run(app, host="0.0.0.0", port=8000)