demo_app.py 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. import streamlit as st
  2. import requests
  3. from ultralytics import YOLO
  4. import numpy as np
  5. from PIL import Image
  6. import io
  7. import base64
  8. import pandas as pd
  9. import plotly.express as px
  10. import plotly.graph_objects as go
  11. import json
  12. import os
  13. from datetime import datetime
  14. from fpdf import FPDF
  15. @st.dialog("📘 AI Interpretation Guide")
  16. def show_tech_guide():
  17. st.write("### 🧠 1. The 'Thinking' Phase: The Raw Tensor [1, 300, 6]")
  18. st.write("""
  19. When the AI 'thinks' about an image, it doesn't see 'Ripe' or 'Unripe'. It populates a
  20. fixed-size memory buffer (Tensor) with **300 potential candidates**. Each candidate is
  21. represented by a row of 6 numbers.
  22. """)
  23. st.table({
  24. "Tensor Index": ["0, 1, 2, 3", "4", "5"],
  25. "AI Output": ["Coordinates", "Confidence Score", "Class ID"],
  26. "Programmer's Logic": ["`[x1, y1, x2, y2]`", "`float (0.0 - 1.0)`", "`int (0-5)`"]
  27. })
  28. st.write("#### 🎯 The Coordinate Paradox (Pixels vs. Ratios)")
  29. st.write("""
  30. Depending on the engine, the **Values at Index 0-3** speak different languages.
  31. This is why the raw numbers won't match if you swap engines:
  32. """)
  33. col_a, col_b = st.columns(2)
  34. with col_a:
  35. st.info("**PyTorch Pathway (.pt)**")
  36. st.write("- **Format**: Absolute Pixels")
  37. st.write("- **Logic**: The AI outputs numbers mapped to the photo's resolution (e.g., `245.0`).")
  38. with col_b:
  39. st.success("**ONNX Pathway (.onnx)**")
  40. st.write("- **Format**: Normalized Ratios")
  41. st.write("- **Logic**: The AI outputs percentages (0.0 to 1.0) relative to its internal 640x640 grid (e.g., `0.38`).")
  42. st.write("---")
  43. st.write("### 🎯 2. What is 'Confidence'? (The Probability Filter)")
  44. st.write("""
  45. Confidence is the AI's **mathematical certainty** that an object exists in a specific box.
  46. It is the product of *Objectness* (Is something there?) and *Class Probability* (What is it?).
  47. """)
  48. st.table({
  49. "Confidence Value": ["> 0.90", "0.50 - 0.89", "< 0.25 (Threshold)"],
  50. "Interpretation": ["**Certain**: Clear, unobstructed view.", "**Likely**: Valid, but possibly obscured by fronds.", "**Noise**: Discarded to prevent False Positives."]
  51. })
  52. st.write("---")
  53. st.write("### 🛠️ 3. The Custom Handler (The Translation Layer)")
  54. st.write("""
  55. Because ONNX returns raw ratios, we built a **Manual Scaling Handler**. It maps those
  56. `0.0 - 1.0` values back to your high-resolution photo pixels.
  57. This explains our two key metrics:
  58. - **Inference Speed**: The time the AI spent populating the Raw Tensor.
  59. - **Post-Processing**: The time our code spent 'translating' that Tensor into labels and pixels.
  60. """)
  61. st.write("---")
  62. st.markdown("""
  63. Your detection environment is powered by **YOLO26**, a custom architectural fork designed for zero-latency industrial sorting.
  64. ### ⚡ Performance Comparison
  65. | Feature | YOLO26 (ONNX) | YOLO26 (Native) |
  66. | :--- | :--- | :--- |
  67. | **Coordinate System** | Normalized (0.0 - 1.0) | Absolute (Pixels) |
  68. | **Primary Use Case** | Real-time Edge Sorting | High-Resolution Auditing |
  69. | **Post-Processing** | None (NMS-Free) | Standard NMS |
  70. """)
  71. @st.dialog("📋 Batch Metadata Configuration")
  72. def configure_batch_metadata(uploaded_files):
  73. st.write(f"Preparing to process **{len(uploaded_files)}** images.")
  74. col1, col2 = st.columns(2)
  75. with col1:
  76. estate = st.text_input("Estate / Venue", value="Estate Alpha")
  77. block = st.text_input("Block ID", placeholder="e.g., B12")
  78. with col2:
  79. harvester = st.text_input("Harvester ID / Name")
  80. priority = st.selectbox("Job Priority", ["Normal", "High", "Urgent"])
  81. if st.button("🚀 Start Production Batch", type="primary", width='stretch'):
  82. metadata = {
  83. "estate": estate,
  84. "block": block,
  85. "harvester": harvester,
  86. "priority": priority
  87. }
  88. with st.spinner("Building Production Bundle..."):
  89. files_payload = [("files", (f.name, f.getvalue(), f.type)) for f in uploaded_files]
  90. # Use engine_choice from session state to get the correct model_type
  91. engine_map_rev = {
  92. "YOLO26 (ONNX - High Speed)": "onnx",
  93. "YOLO26 (PyTorch - Native)": "pytorch",
  94. "YOLOv8-Sawit (Benchmark)": "yolov8_sawit"
  95. }
  96. selected_engine = st.session_state.get('engine_choice', "YOLO26 (ONNX - High Speed)")
  97. data_payload = {
  98. "model_type": engine_map_rev.get(selected_engine, "onnx"),
  99. "metadata": json.dumps(metadata)
  100. }
  101. try:
  102. res = requests.post(f"{API_BASE_URL}/process_batch", files=files_payload, data=data_payload)
  103. if res.status_code == 200:
  104. st.session_state.last_batch_results = res.json()
  105. st.rerun()
  106. else:
  107. st.error(f"Batch Hand-off Failed: {res.text}")
  108. except Exception as e:
  109. st.error(f"Connection Error: {e}")
  110. # --- 1. Global Backend Check ---
  111. API_BASE_URL = "http://localhost:8000"
  112. # MPOB Color Map for Overlays (Global for consistency)
  113. overlay_colors = {
  114. 'Ripe': '#22c55e', # Industrial Green
  115. 'Underripe': '#fbbf24', # Industrial Orange
  116. 'Unripe': '#3b82f6', # Industrial Blue
  117. 'Abnormal': '#dc2626', # Critical Red
  118. 'Empty_Bunch': '#64748b',# Waste Gray
  119. 'Overripe': '#7c2d12' # Dark Brown/Orange
  120. }
  121. # Helper to reset results when files change or engine switches
  122. def get_color(class_name):
  123. """Robust color lookup for consistent across models."""
  124. # Normalize: "Under-ripe" -> "underripe", "Empty Bunch" -> "emptybunch"
  125. norm_name = class_name.lower().replace("-", "").replace("_", "").replace(" ", "")
  126. # Map normalized names to your MPOB standard colors
  127. color_map = {k.lower().replace("_", ""): v for k, v in overlay_colors.items()}
  128. if norm_name in color_map:
  129. return color_map[norm_name]
  130. # Fallback: Generate a consistent unique color for benchmark-only classes
  131. import hashlib
  132. return f"#{hashlib.md5(class_name.encode()).hexdigest()[:6]}"
  133. def reset_single_results():
  134. st.session_state.last_detection = None
  135. def reset_batch_results():
  136. st.session_state.last_batch_results = None
  137. def reset_all_analysis():
  138. """Global reset for all active analysis views."""
  139. st.session_state.last_detection = None
  140. st.session_state.last_batch_results = None
  141. # Increment uploader keys to 'forget' current files (Clear Canvas)
  142. if "single_uploader_key" not in st.session_state:
  143. st.session_state.single_uploader_key = 0
  144. st.session_state.single_uploader_key += 1
  145. if "batch_uploader_key" not in st.session_state:
  146. st.session_state.batch_uploader_key = 0
  147. st.session_state.batch_uploader_key += 1
  148. def check_backend():
  149. try:
  150. res = requests.get(f"{API_BASE_URL}/get_confidence", timeout=2)
  151. return res.status_code == 200
  152. except:
  153. return False
  154. backend_active = check_backend()
  155. # LOCAL MODEL LOADING REMOVED (YOLO26 Clean Sweep)
  156. # UI now relies entirely on Backend API for NMS-Free inference.
  157. if not backend_active:
  158. st.error("⚠️ Backend API is offline!")
  159. st.info("Please start the backend server first (e.g., `python main.py`) to unlock AI features.")
  160. if st.button("🔄 Retry Connection"):
  161. st.rerun()
  162. st.stop() # Stops execution here, effectively disabling the app
  163. # --- 2. Main Page Config (Only rendered if backend is active) ---
  164. st.set_page_config(page_title="Palm Oil Ripeness AI (YOLO26)", layout="wide")
  165. st.title("🌴 Palm Oil FFB Management System")
  166. st.markdown("### Production-Ready AI Analysis & Archival")
  167. # --- Sidebar ---
  168. st.sidebar.header("Backend Controls")
  169. def update_confidence():
  170. new_conf = st.session_state.conf_slider
  171. try:
  172. requests.post(f"{API_BASE_URL}/set_confidence", json={"threshold": new_conf})
  173. st.toast(f"Threshold updated to {new_conf}")
  174. except:
  175. st.sidebar.error("Failed to update threshold")
  176. # We already know backend is up here
  177. response = requests.get(f"{API_BASE_URL}/get_confidence")
  178. current_conf = response.json().get("current_confidence", 0.25)
  179. st.sidebar.success(f"Connected to API")
  180. st.sidebar.info("Engine: YOLO26 NMS-Free (Inference: ~39ms)")
  181. # Synchronized Slider
  182. st.sidebar.slider(
  183. "Confidence Threshold",
  184. 0.1, 1.0,
  185. value=float(current_conf),
  186. key="conf_slider",
  187. on_change=update_confidence
  188. )
  189. st.sidebar.markdown("---")
  190. # Inference Engine
  191. engine_choice = st.sidebar.selectbox(
  192. "Select Model Engine:",
  193. ["YOLO26 (ONNX - High Speed)", "YOLO26 (PyTorch - Native)", "YOLOv8-Sawit (Benchmark)"],
  194. index=0,
  195. on_change=reset_all_analysis # Clear canvas on engine switch
  196. )
  197. # Map selection to internal labels
  198. engine_map = {
  199. "YOLO26 (ONNX - High Speed)": "onnx",
  200. "YOLO26 (PyTorch - Native)": "pytorch",
  201. "YOLOv8-Sawit (Benchmark)": "yolov8_sawit"
  202. }
  203. st.sidebar.markdown("---")
  204. model_type = engine_map[engine_choice]
  205. if st.sidebar.button("❓ How to read results?", icon="📘", width='stretch'):
  206. show_tech_guide()
  207. st.sidebar.markdown("---")
  208. st.sidebar.subheader("🏗️ Model Capabilities")
  209. try:
  210. info_res = requests.get(f"{API_BASE_URL}/get_model_info", params={"model_type": model_type})
  211. if info_res.status_code == 200:
  212. m_info = info_res.json()
  213. st.sidebar.caption(m_info['description'])
  214. st.sidebar.write("**Detected Categories:**")
  215. # Display as a cloud of tags or bullets
  216. cols = st.sidebar.columns(2)
  217. for i, cat in enumerate(m_info['detections_categories']):
  218. cols[i % 2].markdown(f"- `{cat}`")
  219. except:
  220. st.sidebar.error("Failed to load model metadata.")
  221. # Function definitions moved to top
  222. def display_interactive_results(image, detections, key=None):
  223. """Renders image with interactive hover-boxes using Plotly."""
  224. img_width, img_height = image.size
  225. fig = go.Figure()
  226. # Add the palm image as the background
  227. fig.add_layout_image(
  228. dict(source=image, x=0, y=img_height, sizex=img_width, sizey=img_height,
  229. sizing="stretch", opacity=1, layer="below", xref="x", yref="y")
  230. )
  231. # Configure axes to match image dimensions
  232. fig.update_xaxes(showgrid=False, range=(0, img_width), zeroline=False, visible=False)
  233. fig.update_yaxes(showgrid=False, range=(0, img_height), zeroline=False, visible=False, scaleanchor="x")
  234. # Add interactive boxes
  235. for i, det in enumerate(detections):
  236. x1, y1, x2, y2 = det['box']
  237. # Plotly y-axis is inverted relative to PIL, so we flip y
  238. y_top, y_bottom = img_height - y1, img_height - y2
  239. color = get_color(det['class'])
  240. is_bench = (st.session_state.get('engine_choice') == "YOLOv8-Sawit (Benchmark)")
  241. # The 'Hover' shape
  242. bunch_id = det.get('bunch_id', i+1)
  243. fig.add_trace(go.Scatter(
  244. x=[x1, x2, x2, x1, x1],
  245. y=[y_top, y_top, y_bottom, y_bottom, y_top],
  246. fill="toself",
  247. fillcolor=color,
  248. opacity=0.5 if is_bench else 0.3, # Stronger highlight for benchmark
  249. mode='lines',
  250. line=dict(color=color, width=5 if is_bench else 3, dash='dot' if is_bench else 'solid'),
  251. name=f"ID: #{bunch_id}", # Unified ID Tag
  252. text=f"<b>ID: #{bunch_id}</b><br>Grade: {det['class']}<br>Score: {det['confidence']:.2f}<br>Alert: {det['is_health_alert']}",
  253. hoverinfo="text"
  254. ))
  255. fig.update_layout(width=800, height=600, margin=dict(l=0, r=0, b=0, t=0), showlegend=False)
  256. st.plotly_chart(fig, width='stretch', key=key)
  257. def annotate_image(image, detections):
  258. """Draws high-visibility 'Plated Labels' and boxes on the image."""
  259. from PIL import ImageDraw, ImageFont
  260. draw = ImageDraw.Draw(image)
  261. # 1. Dynamic Font Scaling (width // 40 as requested)
  262. font_size = max(20, image.width // 40)
  263. try:
  264. # standard Windows font paths for agent environment
  265. font_path = "C:\\Windows\\Fonts\\arialbd.ttf" # Bold for higher visibility
  266. if not os.path.exists(font_path):
  267. font_path = "C:\\Windows\\Fonts\\arial.ttf"
  268. if os.path.exists(font_path):
  269. font = ImageFont.truetype(font_path, font_size)
  270. else:
  271. font = ImageFont.load_default()
  272. except:
  273. font = ImageFont.load_default()
  274. for det in detections:
  275. box = det['box'] # [x1, y1, x2, y2]
  276. cls = det['class']
  277. conf = det['confidence']
  278. bunch_id = det.get('bunch_id', '?')
  279. color = get_color(cls)
  280. is_bench = (st.session_state.get('engine_choice') == "YOLOv8-Sawit (Benchmark)")
  281. # 2. Draw Heavy-Duty Bounding Box
  282. line_width = max(6 if is_bench else 4, image.width // (80 if is_bench else 150))
  283. draw.rectangle(box, outline=color, width=line_width)
  284. # 3. Draw 'Plated Label' (Background Shaded)
  285. label = f"#{bunch_id} {cls} {conf:.2f}"
  286. try:
  287. # Precise background calculation using textbbox
  288. l, t, r, b = draw.textbbox((box[0], box[1]), label, font=font)
  289. # Shift background up so it doesn't obscure the fruit
  290. bg_rect = [l - 2, t - (b - t) - 10, r + 2, t - 6]
  291. draw.rectangle(bg_rect, fill=color)
  292. # Draw text inside the plate
  293. draw.text((l, t - (b - t) - 8), label, fill="white", font=font)
  294. except:
  295. # Simple fallback
  296. draw.text((box[0], box[1] - font_size), label, fill=color)
  297. return image
  298. def generate_batch_report(data, uploaded_files_map=None):
  299. """Generates a professional PDF report for batch results with visual evidence."""
  300. from PIL import ImageDraw
  301. pdf = FPDF()
  302. pdf.add_page()
  303. pdf.set_font("Arial", "B", 16)
  304. pdf.cell(190, 10, "Palm Oil FFB Harvest Quality Report", ln=True, align="C")
  305. pdf.set_font("Arial", "", 12)
  306. pdf.cell(190, 10, f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", ln=True, align="C")
  307. pdf.ln(10)
  308. # 1. Summary Table
  309. pdf.set_font("Arial", "B", 14)
  310. pdf.cell(190, 10, "1. Batch Summary", ln=True)
  311. pdf.set_font("Arial", "", 12)
  312. summary = data.get('industrial_summary', {})
  313. total_bunches = data.get('total_count', 0)
  314. pdf.cell(95, 10, "Metric", border=1)
  315. pdf.cell(95, 10, "Value", border=1, ln=True)
  316. pdf.cell(95, 10, "Total Bunches Detected", border=1)
  317. pdf.cell(95, 10, str(total_bunches), border=1, ln=True)
  318. for grade, count in summary.items():
  319. if count > 0:
  320. pdf.cell(95, 10, f"Grade: {grade}", border=1)
  321. pdf.cell(95, 10, str(count), border=1, ln=True)
  322. pdf.ln(10)
  323. # 2. Strategic Insights
  324. pdf.set_font("Arial", "B", 14)
  325. pdf.cell(190, 10, "2. Strategic Yield Insights", ln=True)
  326. pdf.set_font("Arial", "", 12)
  327. unripe = summary.get('Unripe', 0)
  328. underripe = summary.get('Underripe', 0)
  329. loss = unripe + underripe
  330. if loss > 0:
  331. pdf.multi_cell(190, 10, f"WARNING: {loss} bunches were harvested before peak ripeness. "
  332. "This directly impacts the Oil Extraction Rate (OER) and results in potential yield loss.")
  333. else:
  334. pdf.multi_cell(190, 10, "EXCELLENT: All detected bunches meet prime ripeness standards. Harvest efficiency is 100%.")
  335. # Critical Alerts
  336. abnormal = summary.get('Abnormal', 0)
  337. empty = summary.get('Empty_Bunch', 0)
  338. if abnormal > 0 or empty > 0:
  339. pdf.ln(5)
  340. pdf.set_text_color(220, 0, 0)
  341. pdf.set_font("Arial", "B", 12)
  342. pdf.cell(190, 10, "CRITICAL HEALTH ALERTS:", ln=True)
  343. pdf.set_font("Arial", "", 12)
  344. if abnormal > 0:
  345. pdf.cell(190, 10, f"- {abnormal} Abnormal Bunches detected (Requires immediate field inspection).", ln=True)
  346. if empty > 0:
  347. pdf.cell(190, 10, f"- {empty} Empty Bunches detected (Waste reduction needed).", ln=True)
  348. pdf.set_text_color(0, 0, 0)
  349. # 3. Visual Evidence Section
  350. if 'detailed_results' in data and uploaded_files_map:
  351. pdf.add_page()
  352. pdf.set_font("Arial", "B", 14)
  353. pdf.cell(190, 10, "3. Visual Batch Evidence (AI Overlay)", ln=True)
  354. pdf.ln(5)
  355. # Group detections by filename
  356. results_by_file = {}
  357. for res in data['detailed_results']:
  358. fname = res['filename']
  359. if fname not in results_by_file:
  360. results_by_file[fname] = []
  361. results_by_file[fname].append(res['detection'])
  362. for fname, detections in results_by_file.items():
  363. if fname in uploaded_files_map:
  364. img_bytes = uploaded_files_map[fname]
  365. img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
  366. draw = ImageDraw.Draw(img)
  367. # Drawing annotated boxes for PDF using high-visibility utility
  368. annotate_image(img, detections)
  369. # Save to temp file for PDF
  370. temp_img_path = f"temp_report_{fname}"
  371. img.save(temp_img_path)
  372. # Check if we need a new page based on image height (rough estimate)
  373. if pdf.get_y() > 200:
  374. pdf.add_page()
  375. pdf.image(temp_img_path, x=10, w=150)
  376. pdf.set_font("Arial", "I", 10)
  377. pdf.cell(190, 10, f"Annotated: {fname}", ln=True)
  378. pdf.ln(5)
  379. os.remove(temp_img_path)
  380. # Footer
  381. pdf.set_y(-15)
  382. pdf.set_font("Arial", "I", 8)
  383. pdf.cell(190, 10, "Generated by Palm Oil AI Desktop PoC - YOLO26 Engine", align="C")
  384. return pdf.output(dest='S')
  385. # --- Tabs ---
  386. tab1, tab2, tab3, tab4, tab5 = st.tabs([
  387. "Single Analysis",
  388. "Batch Processing",
  389. "Similarity Search",
  390. "History Vault",
  391. "Batch Reviewer"
  392. ])
  393. # --- Tab 1: Single Analysis ---
  394. with tab1:
  395. st.subheader("Analyze Single Bunch")
  396. # 1. Initialize Uploader Key
  397. if "single_uploader_key" not in st.session_state:
  398. st.session_state.single_uploader_key = 0
  399. uploaded_file = st.file_uploader(
  400. "Upload a bunch image...",
  401. type=["jpg", "jpeg", "png"],
  402. key=f"single_{st.session_state.single_uploader_key}",
  403. on_change=reset_single_results
  404. )
  405. if uploaded_file:
  406. # State initialization
  407. if "last_detection" not in st.session_state:
  408. st.session_state.last_detection = None
  409. # 1. Auto-Detection Trigger
  410. if uploaded_file and st.session_state.last_detection is None:
  411. with st.spinner(f"Processing with {model_type.upper()} Engine..."):
  412. files = {"file": (uploaded_file.name, uploaded_file.getvalue(), uploaded_file.type)}
  413. payload = {"model_type": model_type}
  414. res = requests.post(f"{API_BASE_URL}/analyze", files=files, data=payload)
  415. if res.status_code == 200:
  416. st.session_state.last_detection = res.json()
  417. st.rerun() # Refresh to show results immediately
  418. else:
  419. st.error(f"Detection Failed: {res.text}")
  420. # 2. Results Layout
  421. if st.session_state.last_detection:
  422. # Redo Button at the top for easy access
  423. if st.button("🔄 Re-analyze Image", width='stretch', type="primary", help="Force a fresh detection (useful if threshold changed)."):
  424. st.session_state.last_detection = None
  425. st.rerun()
  426. data = st.session_state.last_detection
  427. st.divider()
  428. if model_type == "benchmark":
  429. st.info("💡 **Benchmark Mode**: Labels and colors are determined by the external model's architecture. Some labels may not match standard MPOB categories.")
  430. st.write("### 📈 Manager's Dashboard")
  431. m_col1, m_col2, m_col3, m_col4 = st.columns(4)
  432. with m_col1:
  433. st.metric("Total Bunches", data.get('total_count', 0))
  434. with m_col2:
  435. if model_type == "benchmark":
  436. # For benchmark model, show the top detected class instead of 'Healthy'
  437. top_class = "None"
  438. if data.get('industrial_summary'):
  439. top_class = max(data['industrial_summary'], key=data['industrial_summary'].get)
  440. st.metric("Top Detected Class", top_class)
  441. else:
  442. st.metric("Healthy (Ripe)", data['industrial_summary'].get('Ripe', 0))
  443. with m_col3:
  444. # Refined speed label based on engine
  445. speed_label = "Raw Speed (Unlabeled)" if model_type == "onnx" else "Wrapped Speed (Auto-Labeled)"
  446. st.metric("Inference Speed", f"{data.get('inference_ms', 0):.1f} ms", help=speed_label)
  447. with m_col4:
  448. st.metric("Post-Processing", f"{data.get('processing_ms', 0):.1f} ms", help="Labeling/Scaling overhead")
  449. st.divider()
  450. # Side-by-Side View (Technical Trace)
  451. img = Image.open(uploaded_file).convert("RGB")
  452. if st.session_state.get('tech_trace', False):
  453. t_col1, t_col2 = st.columns(2)
  454. with t_col1:
  455. st.subheader("🔢 Raw Output Tensor (The Math)")
  456. st.caption("First 5 rows of the 1x300x6 detection tensor.")
  457. st.json(data.get('raw_array_sample', []))
  458. with t_col2:
  459. st.subheader("🎨 AI Interpretation")
  460. img_annotated = annotate_image(img.copy(), data['detections'])
  461. st.image(img_annotated, width='stretch')
  462. else:
  463. # Regular View
  464. st.write("### 🔍 AI Analytical View")
  465. display_interactive_results(img, data['detections'], key="main_viewer")
  466. col1, col2 = st.columns([1.5, 1]) # Keep original col structure for summary below
  467. with col1:
  468. col_tech_h1, col_tech_h2 = st.columns([1, 1])
  469. with col_tech_h1:
  470. st.write("#### 🛠️ Technical Evidence")
  471. with col_tech_h2:
  472. st.session_state.tech_trace = st.toggle("🔬 Side-by-Side Trace", value=st.session_state.get('tech_trace', False))
  473. with st.expander("Raw Output Tensor (NMS-Free)", expanded=False):
  474. coord_type = "Absolute Pixels" if model_type == "pytorch" else "Normalized Ratios (0.0-1.0)"
  475. st.warning(f"Engine detected: {model_type.upper()} | Coordinate System: {coord_type}")
  476. st.json(data.get('raw_array_sample', []))
  477. with st.container(border=True):
  478. st.write("### 🏷️ Detection Results")
  479. if not data['detections']:
  480. st.warning("No Fresh Fruit Bunches detected.")
  481. else:
  482. for det in data['detections']:
  483. st.info(f"### Bunch #{det['bunch_id']}: {det['class']} ({det['confidence']:.2%})")
  484. st.write("### 📊 Harvest Quality Mix")
  485. # Convert industrial_summary dictionary to a DataFrame for charting
  486. summary_df = pd.DataFrame(
  487. list(data['industrial_summary'].items()),
  488. columns=['Grade', 'Count']
  489. )
  490. # Filter out classes with 0 count for a cleaner chart
  491. summary_df = summary_df[summary_df['Count'] > 0]
  492. if not summary_df.empty:
  493. # Create a Pie Chart to show the proportion of each grade
  494. fig = px.pie(summary_df, values='Count', names='Grade',
  495. color='Grade',
  496. color_discrete_map={
  497. 'Ripe': '#22c55e', # Industrial Green
  498. 'Underripe': '#fbbf24', # Industrial Orange
  499. 'Unripe': '#3b82f6', # Industrial Blue
  500. 'Abnormal': '#dc2626', # Critical Red
  501. 'Empty_Bunch': '#64748b' # Waste Gray
  502. },
  503. hole=0.4)
  504. fig.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=300)
  505. st.plotly_chart(fig, width='stretch', key="single_pie")
  506. # 💡 Strategic R&D Insight: Harvest Efficiency
  507. st.write("---")
  508. st.write("#### 💡 Strategic R&D Insight")
  509. unripe_count = data['industrial_summary'].get('Unripe', 0)
  510. underripe_count = data['industrial_summary'].get('Underripe', 0)
  511. total_non_prime = unripe_count + underripe_count
  512. st.write(f"🌑 **Unripe (Mentah):** {unripe_count}")
  513. st.write(f"🌗 **Underripe (Kurang Masak):** {underripe_count}")
  514. if total_non_prime > 0:
  515. st.warning(f"🚨 **Potential Yield Loss:** {total_non_prime} bunches harvested too early. This will reduce OER (Oil Extraction Rate).")
  516. else:
  517. st.success("✅ **Harvest Efficiency:** 100% Prime Ripeness detected.")
  518. # High-Priority Health Alert
  519. if data['industrial_summary'].get('Abnormal', 0) > 0:
  520. st.error(f"🚨 CRITICAL: {data['industrial_summary']['Abnormal']} Abnormal Bunches Detected!")
  521. if data['industrial_summary'].get('Empty_Bunch', 0) > 0:
  522. st.warning(f"⚠️ ALERT: {data['industrial_summary']['Empty_Bunch']} Empty Bunches Detected.")
  523. # 3. Cloud Actions (Only if detections found)
  524. st.write("---")
  525. st.write("#### ✨ Cloud Archive")
  526. if st.button("🚀 Save to Atlas (Vectorize)", width='stretch'):
  527. with st.spinner("Archiving..."):
  528. import json
  529. primary_det = data['detections'][0]
  530. payload = {"detection_data": json.dumps(primary_det)}
  531. files_cloud = {"file": (uploaded_file.name, uploaded_file.getvalue(), uploaded_file.type)}
  532. res_cloud = requests.post(f"{API_BASE_URL}/vectorize_and_store", files=files_cloud, data=payload)
  533. if res_cloud.status_code == 200:
  534. res_json = res_cloud.json()
  535. if res_json["status"] == "success":
  536. st.success(f"Archived! ID: `{res_json['record_id'][:8]}...`")
  537. else:
  538. st.error(f"Cloud Error: {res_json['message']}")
  539. else:
  540. st.error("Failed to connect to cloud service")
  541. if st.button("🚩 Flag Misclassification", width='stretch', type="secondary"):
  542. # Save to local feedback folder
  543. timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
  544. feedback_id = f"fb_{timestamp}"
  545. img_path = f"feedback/{feedback_id}.jpg"
  546. json_path = f"feedback/{feedback_id}.json"
  547. # Save image
  548. Image.open(uploaded_file).save(img_path)
  549. # Save metadata
  550. feedback_data = {
  551. "original_filename": uploaded_file.name,
  552. "timestamp": timestamp,
  553. "detections": data['detections'],
  554. "threshold_used": data['current_threshold']
  555. }
  556. with open(json_path, "w") as f:
  557. json.dump(feedback_data, f, indent=4)
  558. st.toast("✅ Feedback saved to local vault!", icon="🚩")
  559. if st.button("💾 Local History Vault (Auto-Saved)", width='stretch', type="secondary", disabled=True):
  560. pass
  561. st.caption("✅ This analysis was automatically archived to the local vault.")
  562. # --- Tab 2: Batch Processing ---
  563. with tab2:
  564. st.subheader("Bulk Analysis")
  565. # 1. Initialize Session State
  566. if "batch_uploader_key" not in st.session_state:
  567. st.session_state.batch_uploader_key = 0
  568. if "last_batch_results" not in st.session_state:
  569. st.session_state.last_batch_results = None
  570. # 2. Uploader UI (Must be at top to avoid NameError during result persistence)
  571. col_batch1, col_batch2 = st.columns([4, 1])
  572. with col_batch1:
  573. uploaded_files = st.file_uploader(
  574. "Upload multiple images...",
  575. type=["jpg", "jpeg", "png"],
  576. accept_multiple_files=True,
  577. key=f"batch_{st.session_state.batch_uploader_key}",
  578. on_change=reset_batch_results
  579. )
  580. with col_batch2:
  581. st.write("##") # Alignment
  582. if st.session_state.last_batch_results is None and uploaded_files:
  583. if st.button("🔍 Configure & Process Batch", type="primary", width='stretch'):
  584. configure_batch_metadata(uploaded_files)
  585. if st.button("🗑️ Reset Uploader"):
  586. st.session_state.batch_uploader_key += 1
  587. st.session_state.last_batch_results = None
  588. st.rerun()
  589. st.divider()
  590. # 3. Display Persisted Results (if any)
  591. if st.session_state.last_batch_results:
  592. res_data = st.session_state.last_batch_results
  593. with st.container(border=True):
  594. st.success(f"✅ Successfully processed {res_data['processed_count']} images.")
  595. # Batch Summary Dashboard
  596. st.write("### 📈 Batch Quality Overview")
  597. batch_summary = res_data.get('industrial_summary', {})
  598. if batch_summary:
  599. sum_df = pd.DataFrame(list(batch_summary.items()), columns=['Grade', 'Count'])
  600. sum_df = sum_df[sum_df['Count'] > 0]
  601. b_col1, b_col2 = st.columns([1, 1])
  602. with b_col1:
  603. st.dataframe(sum_df, hide_index=True, width='stretch')
  604. with b_col2:
  605. if not sum_df.empty:
  606. fig_batch = px.bar(sum_df, x='Grade', y='Count', color='Grade',
  607. color_discrete_map={
  608. 'Ripe': '#22c55e',
  609. 'Underripe': '#fbbf24',
  610. 'Unripe': '#3b82f6',
  611. 'Abnormal': '#dc2626',
  612. 'Empty_Bunch': '#64748b'
  613. })
  614. fig_batch.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=200, showlegend=False)
  615. st.plotly_chart(fig_batch, width='stretch', key="batch_bar")
  616. if batch_summary.get('Abnormal', 0) > 0:
  617. st.error(f"🚨 BATCH CRITICAL: {batch_summary['Abnormal']} Abnormal Bunches found in this batch!")
  618. st.write("Generated Record IDs:")
  619. st.code(res_data['record_ids'])
  620. # --- 4. Batch Evidence Gallery ---
  621. st.write("### 🖼️ Detailed Detection Evidence")
  622. if 'detailed_results' in res_data:
  623. # Group results by filename for gallery
  624. gallery_map = {}
  625. for res in res_data['detailed_results']:
  626. fname = res['filename']
  627. if fname not in gallery_map:
  628. gallery_map[fname] = []
  629. gallery_map[fname].append(res['detection'])
  630. # Show images with overlays using consistent utility
  631. for up_file in uploaded_files:
  632. if up_file.name in gallery_map:
  633. with st.container(border=True):
  634. g_img = Image.open(up_file).convert("RGB")
  635. g_annotated = annotate_image(g_img, gallery_map[up_file.name])
  636. st.image(g_annotated, caption=f"Evidence: {up_file.name}", width='stretch')
  637. # PDF Export Button (Pass images map)
  638. files_map = {f.name: f.getvalue() for f in uploaded_files}
  639. pdf_bytes = generate_batch_report(res_data, files_map)
  640. st.download_button(
  641. label="📄 Download Executive Batch Report (PDF)",
  642. data=pdf_bytes,
  643. file_name=f"PalmOil_BatchReport_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf",
  644. mime="application/pdf",
  645. width='stretch'
  646. )
  647. if st.button("Clear Results & Start New Batch", width='stretch'):
  648. st.session_state.last_batch_results = None
  649. st.rerun()
  650. st.divider()
  651. # --- Tab 3: Similarity Search ---
  652. with tab3:
  653. st.subheader("Hybrid Semantic Search")
  654. st.markdown("Search records by either **Image Similarity** or **Natural Language Query**.")
  655. with st.form("hybrid_search_form"):
  656. col_input1, col_input2 = st.columns(2)
  657. with col_input1:
  658. search_file = st.file_uploader("Option A: Search Image...", type=["jpg", "jpeg", "png"], key="search")
  659. with col_input2:
  660. text_query = st.text_input("Option B: Natural Language Query", placeholder="e.g., 'ripe bunches with dark spots' or 'unripe fruit'")
  661. top_k = st.slider("Results Limit (Top K)", 1, 20, 3)
  662. submit_search = st.form_submit_button("Run Semantic Search")
  663. if submit_search:
  664. if not search_file and not text_query:
  665. st.warning("Please provide either an image or a text query.")
  666. else:
  667. with st.spinner("Searching Vector Index..."):
  668. payload = {"limit": top_k}
  669. # If an image is uploaded, it takes precedence for visual search
  670. if search_file:
  671. files = {"file": (search_file.name, search_file.getvalue(), search_file.type)}
  672. # Pass top_k as part of the data
  673. res = requests.post(f"{API_BASE_URL}/search_hybrid", files=files, data=payload)
  674. # Otherwise, use text query
  675. elif text_query:
  676. payload["text_query"] = text_query
  677. # Send as form-data (data=) to match FastAPI's Form(None)
  678. res = requests.post(f"{API_BASE_URL}/search_hybrid", data=payload)
  679. if res.status_code == 200:
  680. results = res.json().get("results", [])
  681. if not results:
  682. st.warning("No similar records found.")
  683. else:
  684. st.success(f"Found {len(results)} matches.")
  685. for item in results:
  686. with st.container(border=True):
  687. c1, c2 = st.columns([1, 2])
  688. # Fetch the image for this result
  689. rec_id = item["_id"]
  690. img_res = requests.get(f"{API_BASE_URL}/get_image/{rec_id}")
  691. with c1:
  692. if img_res.status_code == 200:
  693. img_b64 = img_res.json().get("image_data")
  694. if img_b64:
  695. st.image(base64.b64decode(img_b64), width=250)
  696. else:
  697. st.write("No image data found.")
  698. else:
  699. st.write("Failed to load image.")
  700. with c2:
  701. st.write(f"**Class:** {item['ripeness_class']}")
  702. st.write(f"**Similarity Score:** {item['score']:.4f}")
  703. st.write(f"**Timestamp:** {item['timestamp']}")
  704. st.write(f"**ID:** `{rec_id}`")
  705. else:
  706. st.error(f"Search failed: {res.text}")
  707. # --- Tab 4: History Vault ---
  708. with tab4:
  709. st.subheader("📜 Local History Vault")
  710. st.caption("Industrial-grade audit log of all past AI harvest scans.")
  711. if "selected_history_id" not in st.session_state:
  712. st.session_state.selected_history_id = None
  713. try:
  714. res = requests.get(f"{API_BASE_URL}/get_history")
  715. if res.status_code == 200:
  716. history_data = res.json().get("history", [])
  717. if not history_data:
  718. st.info("No saved records found in the vault.")
  719. else:
  720. if st.session_state.selected_history_id is None:
  721. # --- 1. ListView Mode (Management Dashboard) ---
  722. st.write("### 📋 Audit Log")
  723. # Prepare searchable dataframe
  724. df_history = pd.DataFrame(history_data)
  725. # Clean up for display
  726. display_df = df_history[['id', 'timestamp', 'engine', 'filename', 'inference_ms']].copy()
  727. display_df.columns = ['ID', 'Date/Time', 'Engine', 'Filename', 'Inference (ms)']
  728. st.dataframe(
  729. display_df,
  730. hide_index=True,
  731. width='stretch',
  732. column_config={
  733. "ID": st.column_config.NumberColumn(width="small"),
  734. "Inference (ms)": st.column_config.NumberColumn(format="%.1f ms")
  735. }
  736. )
  737. # Industrial Selection UI
  738. hist_col1, hist_col2 = st.columns([3, 1])
  739. with hist_col1:
  740. target_id = st.selectbox(
  741. "Select Record for Deep Dive Analysis",
  742. options=df_history['id'].tolist(),
  743. format_func=lambda x: f"Record #{x} - {df_history[df_history['id']==x]['filename'].values[0]}"
  744. )
  745. with hist_col2:
  746. st.write("##") # Alignment
  747. if st.button("🔬 Start Deep Dive", type="primary", width='stretch'):
  748. st.session_state.selected_history_id = target_id
  749. st.rerun()
  750. else:
  751. # --- 2. Detail View Mode (Technical Auditor) ---
  752. record = next((item for item in history_data if item["id"] == st.session_state.selected_history_id), None)
  753. if not record:
  754. st.error("Audit record not found.")
  755. if st.button("Back to List"):
  756. st.session_state.selected_history_id = None
  757. st.rerun()
  758. else:
  759. st.button("⬅️ Back to Audit Log", on_click=lambda: st.session_state.update({"selected_history_id": None}))
  760. st.divider()
  761. st.write(f"## 🔍 Deep Dive: Record #{record['id']}")
  762. engine_val = record.get('engine', 'Unknown')
  763. st.caption(f"Original Filename: `{record['filename']}` | Processed: `{record['timestamp']}` | Engine: `{engine_val.upper()}`")
  764. detections = json.loads(record['detections'])
  765. summary = json.loads(record['summary'])
  766. # Metrics Executive Summary
  767. h_col1, h_col2, h_col3, h_col4 = st.columns(4)
  768. with h_col1:
  769. st.metric("Total Bunches", sum(summary.values()))
  770. with h_col2:
  771. st.metric("Healthy (Ripe)", summary.get('Ripe', 0))
  772. with h_col3:
  773. st.metric("Engine Performance", f"{record.get('inference_ms', 0) or 0:.1f} ms")
  774. with h_col4:
  775. st.metric("Labeling Overhead", f"{record.get('processing_ms', 0) or 0:.1f} ms")
  776. # Re-Annotate Archived Image
  777. if os.path.exists(record['archive_path']):
  778. with open(record['archive_path'], "rb") as f:
  779. hist_img = Image.open(f).convert("RGB")
  780. # Side-by-Side: Interactive vs Static Plate
  781. v_tab1, v_tab2 = st.tabs(["Interactive Plotly View", "Static Annotated Evidence"])
  782. with v_tab1:
  783. display_interactive_results(hist_img, detections, key=f"hist_plotly_{record['id']}")
  784. with v_tab2:
  785. img_plate = annotate_image(hist_img.copy(), detections)
  786. st.image(img_plate, width='stretch', caption="Point-of-Harvest AI Interpretation")
  787. else:
  788. st.warning(f"Technical Error: Archive file missing at `{record['archive_path']}`")
  789. # Technical Evidence Expander (Mathematical Audit)
  790. st.divider()
  791. st.write("### 🛠️ Technical Audit Trail")
  792. with st.expander("🔬 View Raw Mathematical Tensor", expanded=False):
  793. st.info("This is the exact numerical output from the AI engine prior to human-readable transformation.")
  794. raw_data = record.get('raw_tensor')
  795. if raw_data:
  796. try:
  797. st.json(json.loads(raw_data))
  798. except:
  799. st.code(raw_data)
  800. else:
  801. st.warning("No raw tensor trace was archived for this legacy record.")
  802. else:
  803. st.error(f"Vault Connection Failed: {res.text}")
  804. except Exception as e:
  805. st.error(f"Audit System Error: {str(e)}")
  806. # --- Tab 5: Batch Reviewer ---
  807. with tab5:
  808. st.subheader("📦 Local Batch Reviewer")
  809. st.caption("Provide a local directory path to review the AI Data Contract and evidence.")
  810. # 1. Path Input
  811. batch_path = st.text_input(
  812. "Enter Batch Folder Path:",
  813. placeholder="e.g., batch_outputs/BATCH_2646CB27",
  814. help="The folder should contain 'manifest.json' and a 'raw' subfolder."
  815. )
  816. if batch_path:
  817. manifest_path = os.path.join(batch_path, "manifest.json")
  818. raw_dir = os.path.join(batch_path, "raw")
  819. # 2. Validation
  820. if not os.path.exists(manifest_path):
  821. st.error(f"❌ Could not find `manifest.json` at: `{manifest_path}`")
  822. elif not os.path.exists(raw_dir):
  823. st.error(f"❌ Could not find `raw` folder at: `{raw_dir}`")
  824. else:
  825. # 3. Load the Contract
  826. try:
  827. with open(manifest_path, 'r') as f:
  828. manifest = json.load(f)
  829. # --- Batch Header: Metadata Audit ---
  830. with st.container(border=True):
  831. c1, c2, c3 = st.columns(3)
  832. with c1:
  833. st.metric("Batch ID", manifest['job_id'])
  834. with c2:
  835. ctx = manifest.get('source_context', {})
  836. st.write(f"**Venue:** {ctx.get('estate', 'N/A')}")
  837. st.write(f"**Block:** {ctx.get('block', 'B12')}")
  838. with c3:
  839. eng = manifest.get('engine', {})
  840. st.write(f"**AI Engine:** {eng.get('name')} ({eng.get('type')})")
  841. st.write(f"**Threshold:** {eng.get('threshold')}")
  842. st.divider()
  843. # --- Inventory Review ---
  844. st.write("### 📂 Production Inventory")
  845. for item in manifest['inventory']:
  846. fname = item['filename']
  847. img_full_path = os.path.join(raw_dir, fname)
  848. if os.path.exists(img_full_path):
  849. with st.expander(f"🖼️ {fname}", expanded=False):
  850. img = Image.open(img_full_path).convert("RGB")
  851. width, height = img.size
  852. # --- Coordinate Remapping Engine ---
  853. # We use 'norm_box' to remain resolution-agnostic for the subscriber
  854. remapped_detections = []
  855. for d in item['detections']:
  856. nx1, ny1, nx2, ny2 = d['norm_box']
  857. remapped_detections.append({
  858. **d,
  859. # Map ratios back to absolute pixels of the loaded image
  860. "box": [nx1 * width, ny1 * height, nx2 * width, ny2 * height]
  861. })
  862. # --- Side-by-Side Review ---
  863. v_col1, v_col2 = st.columns([2, 1])
  864. with v_col1:
  865. # Reuse high-performance interactive viewer
  866. display_interactive_results(img, remapped_detections, key=f"rev_{item['image_id']}")
  867. with v_col2:
  868. st.write("#### 📡 Subscriber Payload")
  869. st.info("Clean metadata ready for hand-off to ERP or Vectorization.")
  870. # Extract non-geometric business data
  871. payload = [{
  872. "id": det['bunch_id'],
  873. "grade": det['class'],
  874. "score": det['confidence'],
  875. "alert": det['is_health_alert']
  876. } for det in remapped_detections]
  877. st.json(payload)
  878. if st.button(f"🚀 Vectorize Image {item['image_id']}", key=f"btn_{item['image_id']}"):
  879. st.toast(f"Broadcasting data for {fname} to remote subscribers...")
  880. else:
  881. st.warning(f"⚠️ Image missing from /raw folder: `{fname}`")
  882. except Exception as e:
  883. st.error(f"Failed to load batch: {e}")