import streamlit as st
import requests
from ultralytics import YOLO
import numpy as np
from PIL import Image
import io
import base64
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import json
import os
from datetime import datetime
from fpdf import FPDF
# --- 1. Global Backend Check ---
API_BASE_URL = "http://localhost:8000"
def check_backend():
try:
res = requests.get(f"{API_BASE_URL}/get_confidence", timeout=2)
return res.status_code == 200
except:
return False
backend_active = check_backend()
# LOCAL MODEL LOADING REMOVED (YOLO26 Clean Sweep)
# UI now relies entirely on Backend API for NMS-Free inference.
if not backend_active:
st.error("⚠️ Backend API is offline!")
st.info("Please start the backend server first (e.g., `python main.py`) to unlock AI features.")
if st.button("🔄 Retry Connection"):
st.rerun()
st.stop() # Stops execution here, effectively disabling the app
# --- 2. Main Page Config (Only rendered if backend is active) ---
st.set_page_config(page_title="Palm Oil Ripeness AI (YOLO26)", layout="wide")
st.title("🌴 Palm Oil FFB Management System")
st.markdown("### Production-Ready AI Analysis & Archival")
# --- Sidebar ---
st.sidebar.header("Backend Controls")
def update_confidence():
new_conf = st.session_state.conf_slider
try:
requests.post(f"{API_BASE_URL}/set_confidence", json={"threshold": new_conf})
st.toast(f"Threshold updated to {new_conf}")
except:
st.sidebar.error("Failed to update threshold")
# We already know backend is up here
response = requests.get(f"{API_BASE_URL}/get_confidence")
current_conf = response.json().get("current_confidence", 0.25)
st.sidebar.success(f"Connected to API")
st.sidebar.info("Engine: YOLO26 NMS-Free (Inference: ~39ms)")
# Synchronized Slider
st.sidebar.slider(
"Confidence Threshold",
0.1, 1.0,
value=float(current_conf),
key="conf_slider",
on_change=update_confidence
)
# Helper to reset results when files change
def reset_single_results():
st.session_state.last_detection = None
def reset_batch_results():
st.session_state.last_batch_results = None
# MPOB Color Map for Overlays (Global for consistency)
overlay_colors = {
'Ripe': '#22c55e', # Industrial Green
'Underripe': '#fbbf24', # Industrial Orange
'Unripe': '#3b82f6', # Industrial Blue
'Abnormal': '#dc2626', # Critical Red
'Empty_Bunch': '#64748b',# Waste Gray
'Overripe': '#7c2d12' # Dark Brown/Orange
}
def display_interactive_results(image, detections, key=None):
"""Renders image with interactive hover-boxes using Plotly."""
img_width, img_height = image.size
fig = go.Figure()
# Add the palm image as the background
fig.add_layout_image(
dict(source=image, x=0, y=img_height, sizex=img_width, sizey=img_height,
sizing="stretch", opacity=1, layer="below", xref="x", yref="y")
)
# Configure axes to match image dimensions
fig.update_xaxes(showgrid=False, range=(0, img_width), zeroline=False, visible=False)
fig.update_yaxes(showgrid=False, range=(0, img_height), zeroline=False, visible=False, scaleanchor="x")
# Add interactive boxes
for i, det in enumerate(detections):
x1, y1, x2, y2 = det['box']
# Plotly y-axis is inverted relative to PIL, so we flip y
y_top, y_bottom = img_height - y1, img_height - y2
color = overlay_colors.get(det['class'], "#ffeb3b")
# The 'Hover' shape
bunch_id = det.get('bunch_id', i+1)
fig.add_trace(go.Scatter(
x=[x1, x2, x2, x1, x1],
y=[y_top, y_top, y_bottom, y_bottom, y_top],
fill="toself",
fillcolor=color,
opacity=0.3, # Semi-transparent until hover
mode='lines',
line=dict(color=color, width=3),
name=f"Bunch #{bunch_id}",
text=f"ID: #{bunch_id}
Grade: {det['class']}
Score: {det['confidence']:.2f}
Alert: {det['is_health_alert']}",
hoverinfo="text"
))
fig.update_layout(width=800, height=600, margin=dict(l=0, r=0, b=0, t=0), showlegend=False)
st.plotly_chart(fig, use_container_width=True, key=key)
def annotate_image(image, detections):
"""Draws high-visibility boxes and background-shaded labels."""
from PIL import ImageDraw, ImageFont
draw = ImageDraw.Draw(image)
# Dynamic font size based on image resolution
font_size = max(20, image.width // 40)
try:
font_path = "C:\\Windows\\Fonts\\arial.ttf"
if os.path.exists(font_path):
font = ImageFont.truetype(font_path, font_size)
else:
font = ImageFont.load_default()
except:
font = ImageFont.load_default()
for det in detections:
box = det['box'] # [x1, y1, x2, y2]
cls = det['class']
conf = det['confidence']
bunch_id = det.get('bunch_id', '?')
color = overlay_colors.get(cls, '#ffffff')
# 1. Draw Bold Bounding Box
draw.rectangle(box, outline=color, width=max(4, image.width // 200))
# 2. Draw Label Background (High Contrast)
label = f"#{bunch_id} {cls} {conf:.2f}"
try:
# textbbox provides precise coordinates for background rectangle
l, t, r, b = draw.textbbox((box[0], box[1] - font_size - 10), label, font=font)
draw.rectangle([l-5, t-5, r+5, b+5], fill=color)
draw.text((l, t), label, fill="white", font=font)
except:
# Fallback for basic text drawing
draw.text((box[0], box[1] - 25), label, fill=color)
return image
def generate_batch_report(data, uploaded_files_map=None):
"""Generates a professional PDF report for batch results with visual evidence."""
from PIL import ImageDraw
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", "B", 16)
pdf.cell(190, 10, "Palm Oil FFB Harvest Quality Report", ln=True, align="C")
pdf.set_font("Arial", "", 12)
pdf.cell(190, 10, f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", ln=True, align="C")
pdf.ln(10)
# 1. Summary Table
pdf.set_font("Arial", "B", 14)
pdf.cell(190, 10, "1. Batch Summary", ln=True)
pdf.set_font("Arial", "", 12)
summary = data.get('industrial_summary', {})
total_bunches = data.get('total_count', 0)
pdf.cell(95, 10, "Metric", border=1)
pdf.cell(95, 10, "Value", border=1, ln=True)
pdf.cell(95, 10, "Total Bunches Detected", border=1)
pdf.cell(95, 10, str(total_bunches), border=1, ln=True)
for grade, count in summary.items():
if count > 0:
pdf.cell(95, 10, f"Grade: {grade}", border=1)
pdf.cell(95, 10, str(count), border=1, ln=True)
pdf.ln(10)
# 2. Strategic Insights
pdf.set_font("Arial", "B", 14)
pdf.cell(190, 10, "2. Strategic Yield Insights", ln=True)
pdf.set_font("Arial", "", 12)
unripe = summary.get('Unripe', 0)
underripe = summary.get('Underripe', 0)
loss = unripe + underripe
if loss > 0:
pdf.multi_cell(190, 10, f"WARNING: {loss} bunches were harvested before peak ripeness. "
"This directly impacts the Oil Extraction Rate (OER) and results in potential yield loss.")
else:
pdf.multi_cell(190, 10, "EXCELLENT: All detected bunches meet prime ripeness standards. Harvest efficiency is 100%.")
# Critical Alerts
abnormal = summary.get('Abnormal', 0)
empty = summary.get('Empty_Bunch', 0)
if abnormal > 0 or empty > 0:
pdf.ln(5)
pdf.set_text_color(220, 0, 0)
pdf.set_font("Arial", "B", 12)
pdf.cell(190, 10, "CRITICAL HEALTH ALERTS:", ln=True)
pdf.set_font("Arial", "", 12)
if abnormal > 0:
pdf.cell(190, 10, f"- {abnormal} Abnormal Bunches detected (Requires immediate field inspection).", ln=True)
if empty > 0:
pdf.cell(190, 10, f"- {empty} Empty Bunches detected (Waste reduction needed).", ln=True)
pdf.set_text_color(0, 0, 0)
# 3. Visual Evidence Section
if 'detailed_results' in data and uploaded_files_map:
pdf.add_page()
pdf.set_font("Arial", "B", 14)
pdf.cell(190, 10, "3. Visual Batch Evidence (AI Overlay)", ln=True)
pdf.ln(5)
# Group detections by filename
results_by_file = {}
for res in data['detailed_results']:
fname = res['filename']
if fname not in results_by_file:
results_by_file[fname] = []
results_by_file[fname].append(res['detection'])
for fname, detections in results_by_file.items():
if fname in uploaded_files_map:
img_bytes = uploaded_files_map[fname]
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
draw = ImageDraw.Draw(img)
# Drawing annotated boxes for PDF using high-visibility utility
annotate_image(img, detections)
# Save to temp file for PDF
temp_img_path = f"temp_report_{fname}"
img.save(temp_img_path)
# Check if we need a new page based on image height (rough estimate)
if pdf.get_y() > 200:
pdf.add_page()
pdf.image(temp_img_path, x=10, w=150)
pdf.set_font("Arial", "I", 10)
pdf.cell(190, 10, f"Annotated: {fname}", ln=True)
pdf.ln(5)
os.remove(temp_img_path)
# Footer
pdf.set_y(-15)
pdf.set_font("Arial", "I", 8)
pdf.cell(190, 10, "Generated by Palm Oil AI Desktop PoC - YOLO26 Engine", align="C")
return pdf.output(dest='S')
# --- Tabs ---
tab1, tab2, tab3, tab4 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search", "History Vault"])
# --- Tab 1: Single Analysis ---
with tab1:
st.subheader("Analyze Single Bunch")
uploaded_file = st.file_uploader(
"Upload a bunch image...",
type=["jpg", "jpeg", "png"],
key="single",
on_change=reset_single_results
)
if uploaded_file:
# State initialization
if "last_detection" not in st.session_state:
st.session_state.last_detection = None
# 1. Auto-Detection Trigger
if uploaded_file and st.session_state.last_detection is None:
with st.spinner("Processing Detections Locally..."):
files = {"file": (uploaded_file.name, uploaded_file.getvalue(), uploaded_file.type)}
res = requests.post(f"{API_BASE_URL}/analyze", files=files)
if res.status_code == 200:
st.session_state.last_detection = res.json()
st.rerun() # Refresh to show results immediately
else:
st.error(f"Detection Failed: {res.text}")
# 2. Results Layout
if st.session_state.last_detection:
st.divider()
# PRIMARY ANNOTATED VIEW
st.write("### 🔍 AI Analytical View")
data = st.session_state.last_detection
img = Image.open(uploaded_file).convert("RGB")
display_interactive_results(img, data['detections'], key="main_viewer")
# Visual Legend
st.write("#### 🎨 Ripeness Legend")
l_cols = st.columns(len(overlay_colors))
for i, (grade, color) in enumerate(overlay_colors.items()):
with l_cols[i]:
st.markdown(f'