|
@@ -7,6 +7,12 @@ import io
|
|
|
import base64
|
|
import base64
|
|
|
import pandas as pd
|
|
import pandas as pd
|
|
|
import plotly.express as px
|
|
import plotly.express as px
|
|
|
|
|
+import plotly.graph_objects as go
|
|
|
|
|
+import json
|
|
|
|
|
+import os
|
|
|
|
|
+from datetime import datetime
|
|
|
|
|
+from fpdf import FPDF
|
|
|
|
|
+
|
|
|
|
|
|
|
|
# --- 1. Global Backend Check ---
|
|
# --- 1. Global Backend Check ---
|
|
|
API_BASE_URL = "http://localhost:8000"
|
|
API_BASE_URL = "http://localhost:8000"
|
|
@@ -68,8 +74,205 @@ def reset_single_results():
|
|
|
def reset_batch_results():
|
|
def reset_batch_results():
|
|
|
st.session_state.last_batch_results = None
|
|
st.session_state.last_batch_results = None
|
|
|
|
|
|
|
|
|
|
+# MPOB Color Map for Overlays (Global for consistency)
|
|
|
|
|
+overlay_colors = {
|
|
|
|
|
+ 'Ripe': '#22c55e', # Industrial Green
|
|
|
|
|
+ 'Underripe': '#fbbf24', # Industrial Orange
|
|
|
|
|
+ 'Unripe': '#3b82f6', # Industrial Blue
|
|
|
|
|
+ 'Abnormal': '#dc2626', # Critical Red
|
|
|
|
|
+ 'Empty_Bunch': '#64748b',# Waste Gray
|
|
|
|
|
+ 'Overripe': '#7c2d12' # Dark Brown/Orange
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+def display_interactive_results(image, detections, key=None):
|
|
|
|
|
+ """Renders image with interactive hover-boxes using Plotly."""
|
|
|
|
|
+ img_width, img_height = image.size
|
|
|
|
|
+ fig = go.Figure()
|
|
|
|
|
+
|
|
|
|
|
+ # Add the palm image as the background
|
|
|
|
|
+ fig.add_layout_image(
|
|
|
|
|
+ dict(source=image, x=0, y=img_height, sizex=img_width, sizey=img_height,
|
|
|
|
|
+ sizing="stretch", opacity=1, layer="below", xref="x", yref="y")
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+ # Configure axes to match image dimensions
|
|
|
|
|
+ fig.update_xaxes(showgrid=False, range=(0, img_width), zeroline=False, visible=False)
|
|
|
|
|
+ fig.update_yaxes(showgrid=False, range=(0, img_height), zeroline=False, visible=False, scaleanchor="x")
|
|
|
|
|
+
|
|
|
|
|
+ # Add interactive boxes
|
|
|
|
|
+ for i, det in enumerate(detections):
|
|
|
|
|
+ x1, y1, x2, y2 = det['box']
|
|
|
|
|
+ # Plotly y-axis is inverted relative to PIL, so we flip y
|
|
|
|
|
+ y_top, y_bottom = img_height - y1, img_height - y2
|
|
|
|
|
+ color = overlay_colors.get(det['class'], "#ffeb3b")
|
|
|
|
|
+
|
|
|
|
|
+ # The 'Hover' shape
|
|
|
|
|
+ bunch_id = det.get('bunch_id', i+1)
|
|
|
|
|
+ fig.add_trace(go.Scatter(
|
|
|
|
|
+ x=[x1, x2, x2, x1, x1],
|
|
|
|
|
+ y=[y_top, y_top, y_bottom, y_bottom, y_top],
|
|
|
|
|
+ fill="toself",
|
|
|
|
|
+ fillcolor=color,
|
|
|
|
|
+ opacity=0.3, # Semi-transparent until hover
|
|
|
|
|
+ mode='lines',
|
|
|
|
|
+ line=dict(color=color, width=3),
|
|
|
|
|
+ name=f"Bunch #{bunch_id}",
|
|
|
|
|
+ text=f"<b>ID: #{bunch_id}</b><br>Grade: {det['class']}<br>Score: {det['confidence']:.2f}<br>Alert: {det['is_health_alert']}",
|
|
|
|
|
+ hoverinfo="text"
|
|
|
|
|
+ ))
|
|
|
|
|
+
|
|
|
|
|
+ fig.update_layout(width=800, height=600, margin=dict(l=0, r=0, b=0, t=0), showlegend=False)
|
|
|
|
|
+ st.plotly_chart(fig, use_container_width=True, key=key)
|
|
|
|
|
+
|
|
|
|
|
+def annotate_image(image, detections):
|
|
|
|
|
+ """Draws high-visibility boxes and background-shaded labels."""
|
|
|
|
|
+ from PIL import ImageDraw, ImageFont
|
|
|
|
|
+ draw = ImageDraw.Draw(image)
|
|
|
|
|
+ # Dynamic font size based on image resolution
|
|
|
|
|
+ font_size = max(20, image.width // 40)
|
|
|
|
|
+ try:
|
|
|
|
|
+ font_path = "C:\\Windows\\Fonts\\arial.ttf"
|
|
|
|
|
+ if os.path.exists(font_path):
|
|
|
|
|
+ font = ImageFont.truetype(font_path, font_size)
|
|
|
|
|
+ else:
|
|
|
|
|
+ font = ImageFont.load_default()
|
|
|
|
|
+ except:
|
|
|
|
|
+ font = ImageFont.load_default()
|
|
|
|
|
+
|
|
|
|
|
+ for det in detections:
|
|
|
|
|
+ box = det['box'] # [x1, y1, x2, y2]
|
|
|
|
|
+ cls = det['class']
|
|
|
|
|
+ conf = det['confidence']
|
|
|
|
|
+ bunch_id = det.get('bunch_id', '?')
|
|
|
|
|
+ color = overlay_colors.get(cls, '#ffffff')
|
|
|
|
|
+
|
|
|
|
|
+ # 1. Draw Bold Bounding Box
|
|
|
|
|
+ draw.rectangle(box, outline=color, width=max(4, image.width // 200))
|
|
|
|
|
+
|
|
|
|
|
+ # 2. Draw Label Background (High Contrast)
|
|
|
|
|
+ label = f"#{bunch_id} {cls} {conf:.2f}"
|
|
|
|
|
+ try:
|
|
|
|
|
+ # textbbox provides precise coordinates for background rectangle
|
|
|
|
|
+ l, t, r, b = draw.textbbox((box[0], box[1] - font_size - 10), label, font=font)
|
|
|
|
|
+ draw.rectangle([l-5, t-5, r+5, b+5], fill=color)
|
|
|
|
|
+ draw.text((l, t), label, fill="white", font=font)
|
|
|
|
|
+ except:
|
|
|
|
|
+ # Fallback for basic text drawing
|
|
|
|
|
+ draw.text((box[0], box[1] - 25), label, fill=color)
|
|
|
|
|
+
|
|
|
|
|
+ return image
|
|
|
|
|
+
|
|
|
|
|
+def generate_batch_report(data, uploaded_files_map=None):
|
|
|
|
|
+
|
|
|
|
|
+ """Generates a professional PDF report for batch results with visual evidence."""
|
|
|
|
|
+ from PIL import ImageDraw
|
|
|
|
|
+ pdf = FPDF()
|
|
|
|
|
+ pdf.add_page()
|
|
|
|
|
+ pdf.set_font("Arial", "B", 16)
|
|
|
|
|
+ pdf.cell(190, 10, "Palm Oil FFB Harvest Quality Report", ln=True, align="C")
|
|
|
|
|
+ pdf.set_font("Arial", "", 12)
|
|
|
|
|
+ pdf.cell(190, 10, f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", ln=True, align="C")
|
|
|
|
|
+ pdf.ln(10)
|
|
|
|
|
+
|
|
|
|
|
+ # 1. Summary Table
|
|
|
|
|
+ pdf.set_font("Arial", "B", 14)
|
|
|
|
|
+ pdf.cell(190, 10, "1. Batch Summary", ln=True)
|
|
|
|
|
+ pdf.set_font("Arial", "", 12)
|
|
|
|
|
+
|
|
|
|
|
+ summary = data.get('industrial_summary', {})
|
|
|
|
|
+ total_bunches = data.get('total_count', 0)
|
|
|
|
|
+ pdf.cell(95, 10, "Metric", border=1)
|
|
|
|
|
+ pdf.cell(95, 10, "Value", border=1, ln=True)
|
|
|
|
|
+
|
|
|
|
|
+ pdf.cell(95, 10, "Total Bunches Detected", border=1)
|
|
|
|
|
+ pdf.cell(95, 10, str(total_bunches), border=1, ln=True)
|
|
|
|
|
+
|
|
|
|
|
+ for grade, count in summary.items():
|
|
|
|
|
+ if count > 0:
|
|
|
|
|
+ pdf.cell(95, 10, f"Grade: {grade}", border=1)
|
|
|
|
|
+ pdf.cell(95, 10, str(count), border=1, ln=True)
|
|
|
|
|
+
|
|
|
|
|
+ pdf.ln(10)
|
|
|
|
|
+
|
|
|
|
|
+ # 2. Strategic Insights
|
|
|
|
|
+ pdf.set_font("Arial", "B", 14)
|
|
|
|
|
+ pdf.cell(190, 10, "2. Strategic Yield Insights", ln=True)
|
|
|
|
|
+ pdf.set_font("Arial", "", 12)
|
|
|
|
|
+
|
|
|
|
|
+ unripe = summary.get('Unripe', 0)
|
|
|
|
|
+ underripe = summary.get('Underripe', 0)
|
|
|
|
|
+ loss = unripe + underripe
|
|
|
|
|
+
|
|
|
|
|
+ if loss > 0:
|
|
|
|
|
+ pdf.multi_cell(190, 10, f"WARNING: {loss} bunches were harvested before peak ripeness. "
|
|
|
|
|
+ "This directly impacts the Oil Extraction Rate (OER) and results in potential yield loss.")
|
|
|
|
|
+ else:
|
|
|
|
|
+ pdf.multi_cell(190, 10, "EXCELLENT: All detected bunches meet prime ripeness standards. Harvest efficiency is 100%.")
|
|
|
|
|
+
|
|
|
|
|
+ # Critical Alerts
|
|
|
|
|
+ abnormal = summary.get('Abnormal', 0)
|
|
|
|
|
+ empty = summary.get('Empty_Bunch', 0)
|
|
|
|
|
+ if abnormal > 0 or empty > 0:
|
|
|
|
|
+ pdf.ln(5)
|
|
|
|
|
+ pdf.set_text_color(220, 0, 0)
|
|
|
|
|
+ pdf.set_font("Arial", "B", 12)
|
|
|
|
|
+ pdf.cell(190, 10, "CRITICAL HEALTH ALERTS:", ln=True)
|
|
|
|
|
+ pdf.set_font("Arial", "", 12)
|
|
|
|
|
+ if abnormal > 0:
|
|
|
|
|
+ pdf.cell(190, 10, f"- {abnormal} Abnormal Bunches detected (Requires immediate field inspection).", ln=True)
|
|
|
|
|
+ if empty > 0:
|
|
|
|
|
+ pdf.cell(190, 10, f"- {empty} Empty Bunches detected (Waste reduction needed).", ln=True)
|
|
|
|
|
+ pdf.set_text_color(0, 0, 0)
|
|
|
|
|
+
|
|
|
|
|
+ # 3. Visual Evidence Section
|
|
|
|
|
+ if 'detailed_results' in data and uploaded_files_map:
|
|
|
|
|
+ pdf.add_page()
|
|
|
|
|
+ pdf.set_font("Arial", "B", 14)
|
|
|
|
|
+ pdf.cell(190, 10, "3. Visual Batch Evidence (AI Overlay)", ln=True)
|
|
|
|
|
+ pdf.ln(5)
|
|
|
|
|
+
|
|
|
|
|
+ # Group detections by filename
|
|
|
|
|
+ results_by_file = {}
|
|
|
|
|
+ for res in data['detailed_results']:
|
|
|
|
|
+ fname = res['filename']
|
|
|
|
|
+ if fname not in results_by_file:
|
|
|
|
|
+ results_by_file[fname] = []
|
|
|
|
|
+ results_by_file[fname].append(res['detection'])
|
|
|
|
|
+
|
|
|
|
|
+ for fname, detections in results_by_file.items():
|
|
|
|
|
+ if fname in uploaded_files_map:
|
|
|
|
|
+ img_bytes = uploaded_files_map[fname]
|
|
|
|
|
+ img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
|
|
|
|
|
+ draw = ImageDraw.Draw(img)
|
|
|
|
|
+ # Drawing annotated boxes for PDF using high-visibility utility
|
|
|
|
|
+ annotate_image(img, detections)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ # Save to temp file for PDF
|
|
|
|
|
+ temp_img_path = f"temp_report_{fname}"
|
|
|
|
|
+ img.save(temp_img_path)
|
|
|
|
|
+
|
|
|
|
|
+ # Check if we need a new page based on image height (rough estimate)
|
|
|
|
|
+ if pdf.get_y() > 200:
|
|
|
|
|
+ pdf.add_page()
|
|
|
|
|
+
|
|
|
|
|
+ pdf.image(temp_img_path, x=10, w=150)
|
|
|
|
|
+ pdf.set_font("Arial", "I", 10)
|
|
|
|
|
+ pdf.cell(190, 10, f"Annotated: {fname}", ln=True)
|
|
|
|
|
+ pdf.ln(5)
|
|
|
|
|
+ os.remove(temp_img_path)
|
|
|
|
|
+
|
|
|
|
|
+ # Footer
|
|
|
|
|
+ pdf.set_y(-15)
|
|
|
|
|
+ pdf.set_font("Arial", "I", 8)
|
|
|
|
|
+ pdf.cell(190, 10, "Generated by Palm Oil AI Desktop PoC - YOLO26 Engine", align="C")
|
|
|
|
|
+
|
|
|
|
|
+ return pdf.output(dest='S')
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
# --- Tabs ---
|
|
# --- Tabs ---
|
|
|
-tab1, tab2, tab3 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search"])
|
|
|
|
|
|
|
+tab1, tab2, tab3, tab4 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search", "History Vault"])
|
|
|
|
|
|
|
|
# --- Tab 1: Single Analysis ---
|
|
# --- Tab 1: Single Analysis ---
|
|
|
with tab1:
|
|
with tab1:
|
|
@@ -101,43 +304,20 @@ with tab1:
|
|
|
if st.session_state.last_detection:
|
|
if st.session_state.last_detection:
|
|
|
st.divider()
|
|
st.divider()
|
|
|
|
|
|
|
|
- # SIDE-BY-SIDE ANALYTICAL VIEW
|
|
|
|
|
- col_left, col_right = st.columns(2)
|
|
|
|
|
-
|
|
|
|
|
- # Fetch data once
|
|
|
|
|
|
|
+ # PRIMARY ANNOTATED VIEW
|
|
|
|
|
+ st.write("### 🔍 AI Analytical View")
|
|
|
data = st.session_state.last_detection
|
|
data = st.session_state.last_detection
|
|
|
|
|
+ img = Image.open(uploaded_file).convert("RGB")
|
|
|
|
|
+ display_interactive_results(img, data['detections'], key="main_viewer")
|
|
|
|
|
|
|
|
- with col_left:
|
|
|
|
|
- st.image(uploaded_file, caption="Original Photo", width='stretch')
|
|
|
|
|
-
|
|
|
|
|
- with col_right:
|
|
|
|
|
- # MANUAL OVERLAY DRAWING (NMS-Free Output from API)
|
|
|
|
|
- img = Image.open(uploaded_file).convert("RGB")
|
|
|
|
|
- from PIL import ImageDraw, ImageFont
|
|
|
|
|
- draw = ImageDraw.Draw(img)
|
|
|
|
|
-
|
|
|
|
|
- # MPOB Color Map for Overlays
|
|
|
|
|
- overlay_colors = {
|
|
|
|
|
- 'Ripe': '#22c55e', # Industrial Green
|
|
|
|
|
- 'Underripe': '#fbbf24', # Industrial Orange
|
|
|
|
|
- 'Unripe': '#3b82f6', # Industrial Blue
|
|
|
|
|
- 'Abnormal': '#dc2626', # Critical Red
|
|
|
|
|
- 'Empty_Bunch': '#64748b' # Waste Gray
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- for det in data['detections']:
|
|
|
|
|
- box = det['box'] # [x1, y1, x2, y2]
|
|
|
|
|
- cls = det['class']
|
|
|
|
|
- color = overlay_colors.get(cls, '#ffffff')
|
|
|
|
|
-
|
|
|
|
|
- # Draw Box
|
|
|
|
|
- draw.rectangle(box, outline=color, width=4)
|
|
|
|
|
-
|
|
|
|
|
- # Draw Label Background
|
|
|
|
|
- label = f"{cls} {det['confidence']:.2f}"
|
|
|
|
|
- draw.text((box[0], box[1] - 15), label, fill=color)
|
|
|
|
|
-
|
|
|
|
|
- st.image(img, caption="AI Analytical View (NMS-Free Native)", width='stretch')
|
|
|
|
|
|
|
+ # Visual Legend
|
|
|
|
|
+ st.write("#### 🎨 Ripeness Legend")
|
|
|
|
|
+ l_cols = st.columns(len(overlay_colors))
|
|
|
|
|
+ for i, (grade, color) in enumerate(overlay_colors.items()):
|
|
|
|
|
+ with l_cols[i]:
|
|
|
|
|
+ st.markdown(f'<div style="background-color:{color}; padding:10px; border-radius:5px; text-align:center; color:white; font-weight:bold;">{grade}</div>', unsafe_allow_html=True)
|
|
|
|
|
+
|
|
|
|
|
+ st.divider()
|
|
|
|
|
|
|
|
st.write("### 📈 Manager's Dashboard")
|
|
st.write("### 📈 Manager's Dashboard")
|
|
|
m_col1, m_col2, m_col3 = st.columns(3)
|
|
m_col1, m_col2, m_col3 = st.columns(3)
|
|
@@ -158,7 +338,7 @@ with tab1:
|
|
|
st.warning("No Fresh Fruit Bunches detected.")
|
|
st.warning("No Fresh Fruit Bunches detected.")
|
|
|
else:
|
|
else:
|
|
|
for det in data['detections']:
|
|
for det in data['detections']:
|
|
|
- st.info(f"**{det['class']}** - {det['confidence']:.2%} confidence")
|
|
|
|
|
|
|
+ st.info(f"### Bunch #{det['bunch_id']}: {det['class']} ({det['confidence']:.2%})")
|
|
|
|
|
|
|
|
st.write("### 📊 Harvest Quality Mix")
|
|
st.write("### 📊 Harvest Quality Mix")
|
|
|
# Convert industrial_summary dictionary to a DataFrame for charting
|
|
# Convert industrial_summary dictionary to a DataFrame for charting
|
|
@@ -181,7 +361,7 @@ with tab1:
|
|
|
},
|
|
},
|
|
|
hole=0.4)
|
|
hole=0.4)
|
|
|
fig.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=300)
|
|
fig.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=300)
|
|
|
- st.plotly_chart(fig, width='stretch')
|
|
|
|
|
|
|
+ st.plotly_chart(fig, width='stretch', key="single_pie")
|
|
|
|
|
|
|
|
# 💡 Strategic R&D Insight: Harvest Efficiency
|
|
# 💡 Strategic R&D Insight: Harvest Efficiency
|
|
|
st.write("---")
|
|
st.write("---")
|
|
@@ -225,6 +405,33 @@ with tab1:
|
|
|
else:
|
|
else:
|
|
|
st.error("Failed to connect to cloud service")
|
|
st.error("Failed to connect to cloud service")
|
|
|
|
|
|
|
|
|
|
+ if st.button("🚩 Flag Misclassification", width='stretch', type="secondary"):
|
|
|
|
|
+ # Save to local feedback folder
|
|
|
|
|
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
|
|
|
+ feedback_id = f"fb_{timestamp}"
|
|
|
|
|
+ img_path = f"feedback/{feedback_id}.jpg"
|
|
|
|
|
+ json_path = f"feedback/{feedback_id}.json"
|
|
|
|
|
+
|
|
|
|
|
+ # Save image
|
|
|
|
|
+ Image.open(uploaded_file).save(img_path)
|
|
|
|
|
+
|
|
|
|
|
+ # Save metadata
|
|
|
|
|
+ feedback_data = {
|
|
|
|
|
+ "original_filename": uploaded_file.name,
|
|
|
|
|
+ "timestamp": timestamp,
|
|
|
|
|
+ "detections": data['detections'],
|
|
|
|
|
+ "threshold_used": data['current_threshold']
|
|
|
|
|
+ }
|
|
|
|
|
+ with open(json_path, "w") as f:
|
|
|
|
|
+ json.dump(feedback_data, f, indent=4)
|
|
|
|
|
+
|
|
|
|
|
+ st.toast("✅ Feedback saved to local vault!", icon="🚩")
|
|
|
|
|
+
|
|
|
|
|
+ if st.button("💾 Local History Vault (Auto-Saved)", width='stretch', type="secondary", disabled=True):
|
|
|
|
|
+ pass
|
|
|
|
|
+ st.caption("✅ This analysis was automatically archived to the local vault.")
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
# --- Tab 2: Batch Processing ---
|
|
# --- Tab 2: Batch Processing ---
|
|
|
with tab2:
|
|
with tab2:
|
|
|
st.subheader("Bulk Analysis")
|
|
st.subheader("Bulk Analysis")
|
|
@@ -262,16 +469,50 @@ with tab2:
|
|
|
'Empty_Bunch': '#64748b'
|
|
'Empty_Bunch': '#64748b'
|
|
|
})
|
|
})
|
|
|
fig_batch.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=200, showlegend=False)
|
|
fig_batch.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=200, showlegend=False)
|
|
|
- st.plotly_chart(fig_batch, width='stretch')
|
|
|
|
|
|
|
+ st.plotly_chart(fig_batch, width='stretch', key="batch_bar")
|
|
|
|
|
|
|
|
if batch_summary.get('Abnormal', 0) > 0:
|
|
if batch_summary.get('Abnormal', 0) > 0:
|
|
|
st.error(f"🚨 BATCH CRITICAL: {batch_summary['Abnormal']} Abnormal Bunches found in this batch!")
|
|
st.error(f"🚨 BATCH CRITICAL: {batch_summary['Abnormal']} Abnormal Bunches found in this batch!")
|
|
|
|
|
|
|
|
st.write("Generated Record IDs:")
|
|
st.write("Generated Record IDs:")
|
|
|
st.code(res_data['record_ids'])
|
|
st.code(res_data['record_ids'])
|
|
|
- if st.button("Clear Results & Start New Batch"):
|
|
|
|
|
|
|
+
|
|
|
|
|
+ # --- 4. Batch Evidence Gallery ---
|
|
|
|
|
+ st.write("### 🖼️ Detailed Detection Evidence")
|
|
|
|
|
+ if 'detailed_results' in res_data:
|
|
|
|
|
+ # Group results by filename for gallery
|
|
|
|
|
+ gallery_map = {}
|
|
|
|
|
+ for res in res_data['detailed_results']:
|
|
|
|
|
+ fname = res['filename']
|
|
|
|
|
+ if fname not in gallery_map:
|
|
|
|
|
+ gallery_map[fname] = []
|
|
|
|
|
+ gallery_map[fname].append(res['detection'])
|
|
|
|
|
+
|
|
|
|
|
+ # Show images with overlays using consistent utility
|
|
|
|
|
+ for up_file in uploaded_files:
|
|
|
|
|
+ if up_file.name in gallery_map:
|
|
|
|
|
+ with st.container(border=True):
|
|
|
|
|
+ g_img = Image.open(up_file).convert("RGB")
|
|
|
|
|
+ g_annotated = annotate_image(g_img, gallery_map[up_file.name])
|
|
|
|
|
+ st.image(g_annotated, caption=f"Evidence: {up_file.name}", use_container_width=True)
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ # PDF Export Button (Pass images map)
|
|
|
|
|
+ files_map = {f.name: f.getvalue() for f in uploaded_files}
|
|
|
|
|
+ pdf_bytes = generate_batch_report(res_data, files_map)
|
|
|
|
|
+ st.download_button(
|
|
|
|
|
+ label="📄 Download Executive Batch Report (PDF)",
|
|
|
|
|
+ data=pdf_bytes,
|
|
|
|
|
+ file_name=f"PalmOil_BatchReport_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf",
|
|
|
|
|
+ mime="application/pdf",
|
|
|
|
|
+ width='stretch'
|
|
|
|
|
+ )
|
|
|
|
|
+
|
|
|
|
|
+
|
|
|
|
|
+ if st.button("Clear Results & Start New Batch", width='stretch'):
|
|
|
st.session_state.last_batch_results = None
|
|
st.session_state.last_batch_results = None
|
|
|
st.rerun()
|
|
st.rerun()
|
|
|
|
|
+
|
|
|
st.divider()
|
|
st.divider()
|
|
|
|
|
|
|
|
# 3. Uploader UI
|
|
# 3. Uploader UI
|
|
@@ -377,3 +618,44 @@ with tab3:
|
|
|
st.write(f"**ID:** `{rec_id}`")
|
|
st.write(f"**ID:** `{rec_id}`")
|
|
|
else:
|
|
else:
|
|
|
st.error(f"Search failed: {res.text}")
|
|
st.error(f"Search failed: {res.text}")
|
|
|
|
|
+
|
|
|
|
|
+# --- Tab 4: History Vault ---
|
|
|
|
|
+with tab4:
|
|
|
|
|
+ st.subheader("📜 Local History Vault")
|
|
|
|
|
+ try:
|
|
|
|
|
+ res = requests.get(f"{API_BASE_URL}/get_history")
|
|
|
|
|
+ if res.status_code == 200:
|
|
|
|
|
+ history_data = res.json().get("history", [])
|
|
|
|
|
+ if not history_data:
|
|
|
|
|
+ st.info("No saved records found.")
|
|
|
|
|
+ else:
|
|
|
|
|
+ # Selection table
|
|
|
|
|
+ df_history = pd.DataFrame(history_data)[['id', 'filename', 'timestamp']]
|
|
|
|
|
+ selected_id = st.selectbox("Select a record to review:", df_history['id'])
|
|
|
|
|
+
|
|
|
|
|
+ if selected_id:
|
|
|
|
|
+ record = next(item for item in history_data if item["id"] == selected_id)
|
|
|
|
|
+ detections = json.loads(record['detections'])
|
|
|
|
|
+
|
|
|
|
|
+ # Display Interactive Hover View
|
|
|
|
|
+ if os.path.exists(record['archive_path']):
|
|
|
|
|
+ with open(record['archive_path'], "rb") as f:
|
|
|
|
|
+ hist_img = Image.open(f).convert("RGB")
|
|
|
|
|
+ display_interactive_results(hist_img, detections, key=f"hist_{record['id']}")
|
|
|
|
|
+
|
|
|
|
|
+ st.write("### 📈 Archived Summary")
|
|
|
|
|
+ summary = json.loads(record['summary'])
|
|
|
|
|
+ s_col1, s_col2, s_col3 = st.columns(3)
|
|
|
|
|
+ with s_col1:
|
|
|
|
|
+ st.metric("Total Bunches", sum(summary.values()))
|
|
|
|
|
+ with s_col2:
|
|
|
|
|
+ st.metric("Healthy (Ripe)", summary.get('Ripe', 0))
|
|
|
|
|
+ with s_col3:
|
|
|
|
|
+ abnormal = summary.get('Abnormal', 0)
|
|
|
|
|
+ st.metric("Abnormal Alerts", abnormal)
|
|
|
|
|
+ else:
|
|
|
|
|
+ st.error(f"Archive file not found: {record['archive_path']}")
|
|
|
|
|
+ else:
|
|
|
|
|
+ st.error(f"Failed to fetch history: {res.text}")
|
|
|
|
|
+ except Exception as e:
|
|
|
|
|
+ st.error(f"Error loading history: {str(e)}")
|