|
|
@@ -16,32 +16,107 @@ from fpdf import FPDF
|
|
|
|
|
|
@st.dialog("📘 AI Interpretation Guide")
|
|
|
def show_tech_guide():
|
|
|
- st.write("### 🎯 What does 'Confidence' mean?")
|
|
|
+ st.write("### 🧠 1. The 'Thinking' Phase: The Raw Tensor [1, 300, 6]")
|
|
|
st.write("""
|
|
|
- This is a probability score from **0.0 to 1.0**.
|
|
|
- - **0.90+**: The AI is nearly certain this is a bunch of this grade.
|
|
|
- - **0.25 (Threshold)**: We ignore anything below this to filter out 'ghost' detections or background noise.
|
|
|
+ When the AI 'thinks' about an image, it doesn't see 'Ripe' or 'Unripe'. It populates a
|
|
|
+ fixed-size memory buffer (Tensor) with **300 potential candidates**. Each candidate is
|
|
|
+ represented by a row of 6 numbers.
|
|
|
+ """)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ st.table({
|
|
|
+ "Tensor Index": ["0, 1, 2, 3", "4", "5"],
|
|
|
+ "AI Output": ["Coordinates", "Confidence Score", "Class ID"],
|
|
|
+ "Programmer's Logic": ["`[x1, y1, x2, y2]`", "`float (0.0 - 1.0)`", "`int (0-5)`"]
|
|
|
+ })
|
|
|
+
|
|
|
+ st.write("#### 🎯 The Coordinate Paradox (Pixels vs. Ratios)")
|
|
|
+ st.write("""
|
|
|
+ Depending on the engine, the **Values at Index 0-3** speak different languages.
|
|
|
+ This is why the raw numbers won't match if you swap engines:
|
|
|
+ """)
|
|
|
+
|
|
|
+ col_a, col_b = st.columns(2)
|
|
|
+ with col_a:
|
|
|
+ st.info("**PyTorch Pathway (.pt)**")
|
|
|
+ st.write("- **Format**: Absolute Pixels")
|
|
|
+ st.write("- **Logic**: The AI outputs numbers mapped to the photo's resolution (e.g., `245.0`).")
|
|
|
+ with col_b:
|
|
|
+ st.success("**ONNX Pathway (.onnx)**")
|
|
|
+ st.write("- **Format**: Normalized Ratios")
|
|
|
+ st.write("- **Logic**: The AI outputs percentages (0.0 to 1.0) relative to its internal 640x640 grid (e.g., `0.38`).")
|
|
|
+
|
|
|
+ st.write("---")
|
|
|
+ st.write("### 🎯 2. What is 'Confidence'? (The Probability Filter)")
|
|
|
+ st.write("""
|
|
|
+ Confidence is the AI's **mathematical certainty** that an object exists in a specific box.
|
|
|
+ It is the product of *Objectness* (Is something there?) and *Class Probability* (What is it?).
|
|
|
""")
|
|
|
|
|
|
- st.write("### 🛠️ The Raw Mathematical Tensor")
|
|
|
- st.write("The AI returns a raw array of shape `[1, 300, 6]`. Here is the key:")
|
|
|
st.table({
|
|
|
- "Index": ["0-3", "4", "5"],
|
|
|
- "Meaning": ["Coordinates (x1, y1, x2, y2)", "Confidence Score", "Class ID (0-5)"],
|
|
|
- "Reality": ["The 'Box' in the image.", "The AI's certainty.", "The Ripeness Grade."]
|
|
|
+ "Confidence Value": ["> 0.90", "0.50 - 0.89", "< 0.25 (Threshold)"],
|
|
|
+ "Interpretation": ["**Certain**: Clear, unobstructed view.", "**Likely**: Valid, but possibly obscured by fronds.", "**Noise**: Discarded to prevent False Positives."]
|
|
|
})
|
|
|
|
|
|
- st.write("### ⚡ Inference vs. Processing Time")
|
|
|
+
|
|
|
+
|
|
|
+ st.write("---")
|
|
|
+ st.write("### 🛠️ 3. The Custom Handler (The Translation Layer)")
|
|
|
st.write("""
|
|
|
- - **Inference Speed**: The time the AI model took to 'think' about the pixels.
|
|
|
- - **Total Time**: Includes image uploading and database saving overhead.
|
|
|
+ Because ONNX returns raw ratios, we built a **Manual Scaling Handler**. It maps those
|
|
|
+ `0.0 - 1.0` values back to your high-resolution photo pixels.
|
|
|
+
|
|
|
+ This explains our two key metrics:
|
|
|
+ - **Inference Speed**: The time the AI spent populating the Raw Tensor.
|
|
|
+ - **Post-Processing**: The time our code spent 'translating' that Tensor into labels and pixels.
|
|
|
""")
|
|
|
- st.info("💡 **Engine Note**: ONNX is optimized for latency (~39ms), while PyTorch offers native indicator flexibility.")
|
|
|
|
|
|
+ st.write("---")
|
|
|
+ st.markdown("""
|
|
|
+ Your detection environment is powered by **YOLO26**, a custom architectural fork designed for zero-latency industrial sorting.
|
|
|
+
|
|
|
+ ### ⚡ Performance Comparison
|
|
|
+ | Feature | YOLO26 (ONNX) | YOLO26 (Native) |
|
|
|
+ | :--- | :--- | :--- |
|
|
|
+ | **Coordinate System** | Normalized (0.0 - 1.0) | Absolute (Pixels) |
|
|
|
+ | **Primary Use Case** | Real-time Edge Sorting | High-Resolution Auditing |
|
|
|
+ | **Post-Processing** | None (NMS-Free) | Standard NMS |
|
|
|
+ """)
|
|
|
|
|
|
# --- 1. Global Backend Check ---
|
|
|
API_BASE_URL = "http://localhost:8000"
|
|
|
|
|
|
+# MPOB Color Map for Overlays (Global for consistency)
|
|
|
+overlay_colors = {
|
|
|
+ 'Ripe': '#22c55e', # Industrial Green
|
|
|
+ 'Underripe': '#fbbf24', # Industrial Orange
|
|
|
+ 'Unripe': '#3b82f6', # Industrial Blue
|
|
|
+ 'Abnormal': '#dc2626', # Critical Red
|
|
|
+ 'Empty_Bunch': '#64748b',# Waste Gray
|
|
|
+ 'Overripe': '#7c2d12' # Dark Brown/Orange
|
|
|
+}
|
|
|
+
|
|
|
+# Helper to reset results when files change or engine switches
|
|
|
+def reset_single_results():
|
|
|
+ st.session_state.last_detection = None
|
|
|
+
|
|
|
+def reset_batch_results():
|
|
|
+ st.session_state.last_batch_results = None
|
|
|
+
|
|
|
+def reset_all_analysis():
|
|
|
+ """Global reset for all active analysis views."""
|
|
|
+ st.session_state.last_detection = None
|
|
|
+ st.session_state.last_batch_results = None
|
|
|
+ # Increment uploader keys to 'forget' current files (Clear Canvas)
|
|
|
+ if "single_uploader_key" not in st.session_state:
|
|
|
+ st.session_state.single_uploader_key = 0
|
|
|
+ st.session_state.single_uploader_key += 1
|
|
|
+
|
|
|
+ if "batch_uploader_key" not in st.session_state:
|
|
|
+ st.session_state.batch_uploader_key = 0
|
|
|
+ st.session_state.batch_uploader_key += 1
|
|
|
+
|
|
|
def check_backend():
|
|
|
try:
|
|
|
res = requests.get(f"{API_BASE_URL}/get_confidence", timeout=2)
|
|
|
@@ -93,43 +168,29 @@ st.sidebar.slider(
|
|
|
)
|
|
|
|
|
|
st.sidebar.markdown("---")
|
|
|
-st.sidebar.subheader("Inference Engine")
|
|
|
+# Inference Engine
|
|
|
engine_choice = st.sidebar.selectbox(
|
|
|
- "Select Model Engine",
|
|
|
- ["YOLO26 (PyTorch - Native)", "YOLO26 (ONNX - High Speed)"],
|
|
|
+ "Select Model Engine:",
|
|
|
+ ["YOLO26 (ONNX - High Speed)", "YOLO26 (PyTorch - Native)", "Sawit-TBS (Benchmark)"],
|
|
|
index=0,
|
|
|
- help="ONNX is optimized for latency. PyTorch provides native object handling."
|
|
|
+ on_change=reset_all_analysis # Clear canvas on engine switch
|
|
|
)
|
|
|
-st.sidebar.markdown("---")
|
|
|
-st.sidebar.subheader("🛠️ Technical Controls")
|
|
|
-show_trace = st.sidebar.toggle("🔬 Show Technical Trace", value=False, help="Enable to see raw mathematical tensor data alongside AI labels.")
|
|
|
-st.session_state.tech_trace = show_trace
|
|
|
-model_type = "onnx" if "ONNX" in engine_choice else "pytorch"
|
|
|
-if model_type == "pytorch":
|
|
|
- st.sidebar.warning("PyTorch Engine: Higher Memory Usage")
|
|
|
-else:
|
|
|
- st.sidebar.info("ONNX Engine: ~39ms Latency")
|
|
|
+
|
|
|
+# Map selection to internal labels
|
|
|
+engine_map = {
|
|
|
+ "YOLO26 (ONNX - High Speed)": "onnx",
|
|
|
+ "YOLO26 (PyTorch - Native)": "pytorch",
|
|
|
+ "Sawit-TBS (Benchmark)": "benchmark"
|
|
|
+}
|
|
|
|
|
|
st.sidebar.markdown("---")
|
|
|
-if st.sidebar.button("❓ How to read results?", icon="📘", width='stretch'):
|
|
|
- show_tech_guide()
|
|
|
+model_type = engine_map[engine_choice]
|
|
|
|
|
|
-# Helper to reset results when files change
|
|
|
-def reset_single_results():
|
|
|
- st.session_state.last_detection = None
|
|
|
|
|
|
-def reset_batch_results():
|
|
|
- st.session_state.last_batch_results = None
|
|
|
+if st.sidebar.button("❓ How to read results?", icon="📘", width='stretch'):
|
|
|
+ show_tech_guide()
|
|
|
|
|
|
-# MPOB Color Map for Overlays (Global for consistency)
|
|
|
-overlay_colors = {
|
|
|
- 'Ripe': '#22c55e', # Industrial Green
|
|
|
- 'Underripe': '#fbbf24', # Industrial Orange
|
|
|
- 'Unripe': '#3b82f6', # Industrial Blue
|
|
|
- 'Abnormal': '#dc2626', # Critical Red
|
|
|
- 'Empty_Bunch': '#64748b',# Waste Gray
|
|
|
- 'Overripe': '#7c2d12' # Dark Brown/Orange
|
|
|
-}
|
|
|
+# Function definitions moved to top
|
|
|
|
|
|
def display_interactive_results(image, detections, key=None):
|
|
|
"""Renders image with interactive hover-boxes using Plotly."""
|
|
|
@@ -151,7 +212,7 @@ def display_interactive_results(image, detections, key=None):
|
|
|
x1, y1, x2, y2 = det['box']
|
|
|
# Plotly y-axis is inverted relative to PIL, so we flip y
|
|
|
y_top, y_bottom = img_height - y1, img_height - y2
|
|
|
- color = overlay_colors.get(det['class'], "#ffeb3b")
|
|
|
+ color = overlay_colors.get(det['class'], "#9ca3af") # Fallback to neutral gray
|
|
|
|
|
|
# The 'Hover' shape
|
|
|
bunch_id = det.get('bunch_id', i+1)
|
|
|
@@ -169,7 +230,7 @@ def display_interactive_results(image, detections, key=None):
|
|
|
))
|
|
|
|
|
|
fig.update_layout(width=800, height=600, margin=dict(l=0, r=0, b=0, t=0), showlegend=False)
|
|
|
- st.plotly_chart(fig, use_container_width=True, key=key)
|
|
|
+ st.plotly_chart(fig, width='stretch', key=key)
|
|
|
|
|
|
def annotate_image(image, detections):
|
|
|
"""Draws high-visibility 'Plated Labels' and boxes on the image."""
|
|
|
@@ -196,7 +257,7 @@ def annotate_image(image, detections):
|
|
|
cls = det['class']
|
|
|
conf = det['confidence']
|
|
|
bunch_id = det.get('bunch_id', '?')
|
|
|
- color = overlay_colors.get(cls, '#ffffff')
|
|
|
+ color = overlay_colors.get(cls, '#9ca3af') # Fallback to neutral gray
|
|
|
|
|
|
# 2. Draw Heavy-Duty Bounding Box
|
|
|
line_width = max(4, image.width // 150)
|
|
|
@@ -333,10 +394,14 @@ tab1, tab2, tab3, tab4 = st.tabs(["Single Analysis", "Batch Processing", "Simila
|
|
|
# --- Tab 1: Single Analysis ---
|
|
|
with tab1:
|
|
|
st.subheader("Analyze Single Bunch")
|
|
|
+ # 1. Initialize Uploader Key
|
|
|
+ if "single_uploader_key" not in st.session_state:
|
|
|
+ st.session_state.single_uploader_key = 0
|
|
|
+
|
|
|
uploaded_file = st.file_uploader(
|
|
|
"Upload a bunch image...",
|
|
|
type=["jpg", "jpeg", "png"],
|
|
|
- key="single",
|
|
|
+ key=f"single_{st.session_state.single_uploader_key}",
|
|
|
on_change=reset_single_results
|
|
|
)
|
|
|
|
|
|
@@ -359,6 +424,11 @@ with tab1:
|
|
|
|
|
|
# 2. Results Layout
|
|
|
if st.session_state.last_detection:
|
|
|
+ # Redo Button at the top for easy access
|
|
|
+ if st.button("🔄 Re-analyze Image", width='stretch', type="primary", help="Force a fresh detection (useful if threshold changed)."):
|
|
|
+ st.session_state.last_detection = None
|
|
|
+ st.rerun()
|
|
|
+
|
|
|
data = st.session_state.last_detection
|
|
|
st.divider()
|
|
|
|
|
|
@@ -367,7 +437,14 @@ with tab1:
|
|
|
with m_col1:
|
|
|
st.metric("Total Bunches", data.get('total_count', 0))
|
|
|
with m_col2:
|
|
|
- st.metric("Healthy (Ripe)", data['industrial_summary'].get('Ripe', 0))
|
|
|
+ if model_type == "benchmark":
|
|
|
+ # For benchmark model, show the top detected class instead of 'Healthy'
|
|
|
+ top_class = "None"
|
|
|
+ if data.get('industrial_summary'):
|
|
|
+ top_class = max(data['industrial_summary'], key=data['industrial_summary'].get)
|
|
|
+ st.metric("Top Detected Class", top_class)
|
|
|
+ else:
|
|
|
+ st.metric("Healthy (Ripe)", data['industrial_summary'].get('Ripe', 0))
|
|
|
with m_col3:
|
|
|
# Refined speed label based on engine
|
|
|
speed_label = "Raw Speed (Unlabeled)" if model_type == "onnx" else "Wrapped Speed (Auto-Labeled)"
|
|
|
@@ -397,15 +474,15 @@ with tab1:
|
|
|
col1, col2 = st.columns([1.5, 1]) # Keep original col structure for summary below
|
|
|
|
|
|
with col1:
|
|
|
- col_tech_h1, col_tech_h2 = st.columns([4, 1])
|
|
|
+ col_tech_h1, col_tech_h2 = st.columns([1, 1])
|
|
|
with col_tech_h1:
|
|
|
st.write("#### 🛠️ Technical Evidence")
|
|
|
with col_tech_h2:
|
|
|
- if st.button("❓ Guide", key="guide_tab1"):
|
|
|
- show_tech_guide()
|
|
|
+ st.session_state.tech_trace = st.toggle("🔬 Side-by-Side Trace", value=st.session_state.get('tech_trace', False))
|
|
|
|
|
|
with st.expander("Raw Output Tensor (NMS-Free)", expanded=False):
|
|
|
- st.caption("See the Interpretation Guide for a breakdown of these numbers.")
|
|
|
+ coord_type = "Absolute Pixels" if model_type == "pytorch" else "Normalized Ratios (0.0-1.0)"
|
|
|
+ st.warning(f"Engine detected: {model_type.upper()} | Coordinate System: {coord_type}")
|
|
|
st.json(data.get('raw_array_sample', []))
|
|
|
with st.container(border=True):
|
|
|
st.write("### 🏷️ Detection Results")
|
|
|
@@ -717,13 +794,13 @@ with tab4:
|
|
|
# Prepare searchable dataframe
|
|
|
df_history = pd.DataFrame(history_data)
|
|
|
# Clean up for display
|
|
|
- display_df = df_history[['id', 'timestamp', 'filename', 'inference_ms']].copy()
|
|
|
- display_df.columns = ['ID', 'Date/Time', 'Filename', 'Inference (ms)']
|
|
|
+ display_df = df_history[['id', 'timestamp', 'engine', 'filename', 'inference_ms']].copy()
|
|
|
+ display_df.columns = ['ID', 'Date/Time', 'Engine', 'Filename', 'Inference (ms)']
|
|
|
|
|
|
st.dataframe(
|
|
|
display_df,
|
|
|
hide_index=True,
|
|
|
- use_container_width=True,
|
|
|
+ width='stretch',
|
|
|
column_config={
|
|
|
"ID": st.column_config.NumberColumn(width="small"),
|
|
|
"Inference (ms)": st.column_config.NumberColumn(format="%.1f ms")
|
|
|
@@ -740,7 +817,7 @@ with tab4:
|
|
|
)
|
|
|
with hist_col2:
|
|
|
st.write("##") # Alignment
|
|
|
- if st.button("🔬 Start Deep Dive", type="primary", use_container_width=True):
|
|
|
+ if st.button("🔬 Start Deep Dive", type="primary", width='stretch'):
|
|
|
st.session_state.selected_history_id = target_id
|
|
|
st.rerun()
|
|
|
else:
|
|
|
@@ -756,7 +833,8 @@ with tab4:
|
|
|
|
|
|
st.divider()
|
|
|
st.write(f"## 🔍 Deep Dive: Record #{record['id']}")
|
|
|
- st.caption(f"Original Filename: `{record['filename']}` | Processed: `{record['timestamp']}`")
|
|
|
+ engine_val = record.get('engine', 'Unknown')
|
|
|
+ st.caption(f"Original Filename: `{record['filename']}` | Processed: `{record['timestamp']}` | Engine: `{engine_val.upper()}`")
|
|
|
|
|
|
detections = json.loads(record['detections'])
|
|
|
summary = json.loads(record['summary'])
|
|
|
@@ -783,7 +861,7 @@ with tab4:
|
|
|
display_interactive_results(hist_img, detections, key=f"hist_plotly_{record['id']}")
|
|
|
with v_tab2:
|
|
|
img_plate = annotate_image(hist_img.copy(), detections)
|
|
|
- st.image(img_plate, use_container_width=True, caption="Point-of-Harvest AI Interpretation")
|
|
|
+ st.image(img_plate, width='stretch', caption="Point-of-Harvest AI Interpretation")
|
|
|
else:
|
|
|
st.warning(f"Technical Error: Archive file missing at `{record['archive_path']}`")
|
|
|
|