demo_app.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. import streamlit as st
  2. import requests
  3. from ultralytics import YOLO
  4. import numpy as np
  5. from PIL import Image
  6. import io
  7. import base64
  8. import pandas as pd
  9. import plotly.express as px
  10. import plotly.graph_objects as go
  11. import json
  12. import os
  13. from datetime import datetime
  14. from fpdf import FPDF
  15. @st.dialog("📘 AI Interpretation Guide")
  16. def show_tech_guide():
  17. st.write("### 🎯 What does 'Confidence' mean?")
  18. st.write("""
  19. This is a probability score from **0.0 to 1.0**.
  20. - **0.90+**: The AI is nearly certain this is a bunch of this grade.
  21. - **0.25 (Threshold)**: We ignore anything below this to filter out 'ghost' detections or background noise.
  22. """)
  23. st.write("### 🛠️ The Raw Mathematical Tensor")
  24. st.write("The AI returns a raw array of shape `[1, 300, 6]`. Here is the key:")
  25. st.table({
  26. "Index": ["0-3", "4", "5"],
  27. "Meaning": ["Coordinates (x1, y1, x2, y2)", "Confidence Score", "Class ID (0-5)"],
  28. "Reality": ["The 'Box' in the image.", "The AI's certainty.", "The Ripeness Grade."]
  29. })
  30. st.write("### ⚡ Inference vs. Processing Time")
  31. st.write("""
  32. - **Inference Speed**: The time the AI model took to 'think' about the pixels.
  33. - **Total Time**: Includes image uploading and database saving overhead.
  34. """)
  35. st.info("💡 **Engine Note**: ONNX is optimized for latency (~39ms), while PyTorch offers native indicator flexibility.")
  36. # --- 1. Global Backend Check ---
  37. API_BASE_URL = "http://localhost:8000"
  38. def check_backend():
  39. try:
  40. res = requests.get(f"{API_BASE_URL}/get_confidence", timeout=2)
  41. return res.status_code == 200
  42. except:
  43. return False
  44. backend_active = check_backend()
  45. # LOCAL MODEL LOADING REMOVED (YOLO26 Clean Sweep)
  46. # UI now relies entirely on Backend API for NMS-Free inference.
  47. if not backend_active:
  48. st.error("⚠️ Backend API is offline!")
  49. st.info("Please start the backend server first (e.g., `python main.py`) to unlock AI features.")
  50. if st.button("🔄 Retry Connection"):
  51. st.rerun()
  52. st.stop() # Stops execution here, effectively disabling the app
  53. # --- 2. Main Page Config (Only rendered if backend is active) ---
  54. st.set_page_config(page_title="Palm Oil Ripeness AI (YOLO26)", layout="wide")
  55. st.title("🌴 Palm Oil FFB Management System")
  56. st.markdown("### Production-Ready AI Analysis & Archival")
  57. # --- Sidebar ---
  58. st.sidebar.header("Backend Controls")
  59. def update_confidence():
  60. new_conf = st.session_state.conf_slider
  61. try:
  62. requests.post(f"{API_BASE_URL}/set_confidence", json={"threshold": new_conf})
  63. st.toast(f"Threshold updated to {new_conf}")
  64. except:
  65. st.sidebar.error("Failed to update threshold")
  66. # We already know backend is up here
  67. response = requests.get(f"{API_BASE_URL}/get_confidence")
  68. current_conf = response.json().get("current_confidence", 0.25)
  69. st.sidebar.success(f"Connected to API")
  70. st.sidebar.info("Engine: YOLO26 NMS-Free (Inference: ~39ms)")
  71. # Synchronized Slider
  72. st.sidebar.slider(
  73. "Confidence Threshold",
  74. 0.1, 1.0,
  75. value=float(current_conf),
  76. key="conf_slider",
  77. on_change=update_confidence
  78. )
  79. st.sidebar.markdown("---")
  80. st.sidebar.subheader("Inference Engine")
  81. engine_choice = st.sidebar.selectbox(
  82. "Select Model Engine",
  83. ["YOLO26 (PyTorch - Native)", "YOLO26 (ONNX - High Speed)"],
  84. index=0,
  85. help="ONNX is optimized for latency. PyTorch provides native object handling."
  86. )
  87. st.sidebar.markdown("---")
  88. st.sidebar.subheader("🛠️ Technical Controls")
  89. show_trace = st.sidebar.toggle("🔬 Show Technical Trace", value=False, help="Enable to see raw mathematical tensor data alongside AI labels.")
  90. st.session_state.tech_trace = show_trace
  91. model_type = "onnx" if "ONNX" in engine_choice else "pytorch"
  92. if model_type == "pytorch":
  93. st.sidebar.warning("PyTorch Engine: Higher Memory Usage")
  94. else:
  95. st.sidebar.info("ONNX Engine: ~39ms Latency")
  96. st.sidebar.markdown("---")
  97. if st.sidebar.button("❓ How to read results?", icon="📘", width='stretch'):
  98. show_tech_guide()
  99. # Helper to reset results when files change
  100. def reset_single_results():
  101. st.session_state.last_detection = None
  102. def reset_batch_results():
  103. st.session_state.last_batch_results = None
  104. # MPOB Color Map for Overlays (Global for consistency)
  105. overlay_colors = {
  106. 'Ripe': '#22c55e', # Industrial Green
  107. 'Underripe': '#fbbf24', # Industrial Orange
  108. 'Unripe': '#3b82f6', # Industrial Blue
  109. 'Abnormal': '#dc2626', # Critical Red
  110. 'Empty_Bunch': '#64748b',# Waste Gray
  111. 'Overripe': '#7c2d12' # Dark Brown/Orange
  112. }
  113. def display_interactive_results(image, detections, key=None):
  114. """Renders image with interactive hover-boxes using Plotly."""
  115. img_width, img_height = image.size
  116. fig = go.Figure()
  117. # Add the palm image as the background
  118. fig.add_layout_image(
  119. dict(source=image, x=0, y=img_height, sizex=img_width, sizey=img_height,
  120. sizing="stretch", opacity=1, layer="below", xref="x", yref="y")
  121. )
  122. # Configure axes to match image dimensions
  123. fig.update_xaxes(showgrid=False, range=(0, img_width), zeroline=False, visible=False)
  124. fig.update_yaxes(showgrid=False, range=(0, img_height), zeroline=False, visible=False, scaleanchor="x")
  125. # Add interactive boxes
  126. for i, det in enumerate(detections):
  127. x1, y1, x2, y2 = det['box']
  128. # Plotly y-axis is inverted relative to PIL, so we flip y
  129. y_top, y_bottom = img_height - y1, img_height - y2
  130. color = overlay_colors.get(det['class'], "#ffeb3b")
  131. # The 'Hover' shape
  132. bunch_id = det.get('bunch_id', i+1)
  133. fig.add_trace(go.Scatter(
  134. x=[x1, x2, x2, x1, x1],
  135. y=[y_top, y_top, y_bottom, y_bottom, y_top],
  136. fill="toself",
  137. fillcolor=color,
  138. opacity=0.3, # Semi-transparent until hover
  139. mode='lines',
  140. line=dict(color=color, width=3),
  141. name=f"Bunch #{bunch_id}",
  142. text=f"<b>ID: #{bunch_id}</b><br>Grade: {det['class']}<br>Score: {det['confidence']:.2f}<br>Alert: {det['is_health_alert']}",
  143. hoverinfo="text"
  144. ))
  145. fig.update_layout(width=800, height=600, margin=dict(l=0, r=0, b=0, t=0), showlegend=False)
  146. st.plotly_chart(fig, width='stretch', key=key)
  147. def annotate_image(image, detections):
  148. """Draws high-visibility boxes and background-shaded labels."""
  149. from PIL import ImageDraw, ImageFont
  150. draw = ImageDraw.Draw(image)
  151. # Dynamic font size based on image resolution
  152. font_size = max(20, image.width // 40)
  153. try:
  154. font_path = "C:\\Windows\\Fonts\\arial.ttf"
  155. if os.path.exists(font_path):
  156. font = ImageFont.truetype(font_path, font_size)
  157. else:
  158. font = ImageFont.load_default()
  159. except:
  160. font = ImageFont.load_default()
  161. for det in detections:
  162. box = det['box'] # [x1, y1, x2, y2]
  163. cls = det['class']
  164. conf = det['confidence']
  165. bunch_id = det.get('bunch_id', '?')
  166. color = overlay_colors.get(cls, '#ffffff')
  167. # 1. Draw Bold Bounding Box
  168. draw.rectangle(box, outline=color, width=max(4, image.width // 200))
  169. # 2. Draw Label Background (High Contrast)
  170. label = f"#{bunch_id} {cls} {conf:.2f}"
  171. try:
  172. # textbbox provides precise coordinates for background rectangle
  173. l, t, r, b = draw.textbbox((box[0], box[1] - font_size - 10), label, font=font)
  174. draw.rectangle([l-5, t-5, r+5, b+5], fill=color)
  175. draw.text((l, t), label, fill="white", font=font)
  176. except:
  177. # Fallback for basic text drawing
  178. draw.text((box[0], box[1] - 25), label, fill=color)
  179. return image
  180. def generate_batch_report(data, uploaded_files_map=None):
  181. """Generates a professional PDF report for batch results with visual evidence."""
  182. from PIL import ImageDraw
  183. pdf = FPDF()
  184. pdf.add_page()
  185. pdf.set_font("Arial", "B", 16)
  186. pdf.cell(190, 10, "Palm Oil FFB Harvest Quality Report", ln=True, align="C")
  187. pdf.set_font("Arial", "", 12)
  188. pdf.cell(190, 10, f"Date: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}", ln=True, align="C")
  189. pdf.ln(10)
  190. # 1. Summary Table
  191. pdf.set_font("Arial", "B", 14)
  192. pdf.cell(190, 10, "1. Batch Summary", ln=True)
  193. pdf.set_font("Arial", "", 12)
  194. summary = data.get('industrial_summary', {})
  195. total_bunches = data.get('total_count', 0)
  196. pdf.cell(95, 10, "Metric", border=1)
  197. pdf.cell(95, 10, "Value", border=1, ln=True)
  198. pdf.cell(95, 10, "Total Bunches Detected", border=1)
  199. pdf.cell(95, 10, str(total_bunches), border=1, ln=True)
  200. for grade, count in summary.items():
  201. if count > 0:
  202. pdf.cell(95, 10, f"Grade: {grade}", border=1)
  203. pdf.cell(95, 10, str(count), border=1, ln=True)
  204. pdf.ln(10)
  205. # 2. Strategic Insights
  206. pdf.set_font("Arial", "B", 14)
  207. pdf.cell(190, 10, "2. Strategic Yield Insights", ln=True)
  208. pdf.set_font("Arial", "", 12)
  209. unripe = summary.get('Unripe', 0)
  210. underripe = summary.get('Underripe', 0)
  211. loss = unripe + underripe
  212. if loss > 0:
  213. pdf.multi_cell(190, 10, f"WARNING: {loss} bunches were harvested before peak ripeness. "
  214. "This directly impacts the Oil Extraction Rate (OER) and results in potential yield loss.")
  215. else:
  216. pdf.multi_cell(190, 10, "EXCELLENT: All detected bunches meet prime ripeness standards. Harvest efficiency is 100%.")
  217. # Critical Alerts
  218. abnormal = summary.get('Abnormal', 0)
  219. empty = summary.get('Empty_Bunch', 0)
  220. if abnormal > 0 or empty > 0:
  221. pdf.ln(5)
  222. pdf.set_text_color(220, 0, 0)
  223. pdf.set_font("Arial", "B", 12)
  224. pdf.cell(190, 10, "CRITICAL HEALTH ALERTS:", ln=True)
  225. pdf.set_font("Arial", "", 12)
  226. if abnormal > 0:
  227. pdf.cell(190, 10, f"- {abnormal} Abnormal Bunches detected (Requires immediate field inspection).", ln=True)
  228. if empty > 0:
  229. pdf.cell(190, 10, f"- {empty} Empty Bunches detected (Waste reduction needed).", ln=True)
  230. pdf.set_text_color(0, 0, 0)
  231. # 3. Visual Evidence Section
  232. if 'detailed_results' in data and uploaded_files_map:
  233. pdf.add_page()
  234. pdf.set_font("Arial", "B", 14)
  235. pdf.cell(190, 10, "3. Visual Batch Evidence (AI Overlay)", ln=True)
  236. pdf.ln(5)
  237. # Group detections by filename
  238. results_by_file = {}
  239. for res in data['detailed_results']:
  240. fname = res['filename']
  241. if fname not in results_by_file:
  242. results_by_file[fname] = []
  243. results_by_file[fname].append(res['detection'])
  244. for fname, detections in results_by_file.items():
  245. if fname in uploaded_files_map:
  246. img_bytes = uploaded_files_map[fname]
  247. img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
  248. draw = ImageDraw.Draw(img)
  249. # Drawing annotated boxes for PDF using high-visibility utility
  250. annotate_image(img, detections)
  251. # Save to temp file for PDF
  252. temp_img_path = f"temp_report_{fname}"
  253. img.save(temp_img_path)
  254. # Check if we need a new page based on image height (rough estimate)
  255. if pdf.get_y() > 200:
  256. pdf.add_page()
  257. pdf.image(temp_img_path, x=10, w=150)
  258. pdf.set_font("Arial", "I", 10)
  259. pdf.cell(190, 10, f"Annotated: {fname}", ln=True)
  260. pdf.ln(5)
  261. os.remove(temp_img_path)
  262. # Footer
  263. pdf.set_y(-15)
  264. pdf.set_font("Arial", "I", 8)
  265. pdf.cell(190, 10, "Generated by Palm Oil AI Desktop PoC - YOLO26 Engine", align="C")
  266. return pdf.output(dest='S')
  267. # --- Tabs ---
  268. tab1, tab2, tab3, tab4 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search", "History Vault"])
  269. # --- Tab 1: Single Analysis ---
  270. with tab1:
  271. st.subheader("Analyze Single Bunch")
  272. uploaded_file = st.file_uploader(
  273. "Upload a bunch image...",
  274. type=["jpg", "jpeg", "png"],
  275. key="single",
  276. on_change=reset_single_results
  277. )
  278. if uploaded_file:
  279. # State initialization
  280. if "last_detection" not in st.session_state:
  281. st.session_state.last_detection = None
  282. # 1. Auto-Detection Trigger
  283. if uploaded_file and st.session_state.last_detection is None:
  284. with st.spinner(f"Processing with {model_type.upper()} Engine..."):
  285. files = {"file": (uploaded_file.name, uploaded_file.getvalue(), uploaded_file.type)}
  286. payload = {"model_type": model_type}
  287. res = requests.post(f"{API_BASE_URL}/analyze", files=files, data=payload)
  288. if res.status_code == 200:
  289. st.session_state.last_detection = res.json()
  290. st.rerun() # Refresh to show results immediately
  291. else:
  292. st.error(f"Detection Failed: {res.text}")
  293. # 2. Results Layout
  294. if st.session_state.last_detection:
  295. data = st.session_state.last_detection
  296. st.divider()
  297. st.write("### 📈 Manager's Dashboard")
  298. m_col1, m_col2, m_col3, m_col4 = st.columns(4)
  299. with m_col1:
  300. st.metric("Total Bunches", data.get('total_count', 0))
  301. with m_col2:
  302. st.metric("Healthy (Ripe)", data['industrial_summary'].get('Ripe', 0))
  303. with m_col3:
  304. # Refined speed label based on engine
  305. speed_label = "Raw Speed (Unlabeled)" if model_type == "onnx" else "Wrapped Speed (Auto-Labeled)"
  306. st.metric("Inference Speed", f"{data.get('inference_ms', 0):.1f} ms", help=speed_label)
  307. with m_col4:
  308. st.metric("Post-Processing", f"{data.get('processing_ms', 0):.1f} ms", help="Labeling/Scaling overhead")
  309. st.divider()
  310. # Side-by-Side View (Technical Trace)
  311. img = Image.open(uploaded_file).convert("RGB")
  312. if st.session_state.get('tech_trace', False):
  313. t_col1, t_col2 = st.columns(2)
  314. with t_col1:
  315. st.subheader("🔢 Raw Output Tensor (The Math)")
  316. st.caption("First 5 rows of the 1x300x6 detection tensor.")
  317. st.json(data.get('raw_array_sample', []))
  318. with t_col2:
  319. st.subheader("🎨 AI Interpretation")
  320. img_annotated = annotate_image(img.copy(), data['detections'])
  321. st.image(img_annotated, width='stretch')
  322. else:
  323. # Regular View
  324. st.write("### 🔍 AI Analytical View")
  325. display_interactive_results(img, data['detections'], key="main_viewer")
  326. col1, col2 = st.columns([1.5, 1]) # Keep original col structure for summary below
  327. with col1:
  328. col_tech_h1, col_tech_h2 = st.columns([4, 1])
  329. with col_tech_h1:
  330. st.write("#### 🛠️ Technical Evidence")
  331. with col_tech_h2:
  332. if st.button("❓ Guide", key="guide_tab1"):
  333. show_tech_guide()
  334. with st.expander("Raw Output Tensor (NMS-Free)", expanded=False):
  335. st.caption("See the Interpretation Guide for a breakdown of these numbers.")
  336. st.json(data.get('raw_array_sample', []))
  337. with st.container(border=True):
  338. st.write("### 🏷️ Detection Results")
  339. if not data['detections']:
  340. st.warning("No Fresh Fruit Bunches detected.")
  341. else:
  342. for det in data['detections']:
  343. st.info(f"### Bunch #{det['bunch_id']}: {det['class']} ({det['confidence']:.2%})")
  344. st.write("### 📊 Harvest Quality Mix")
  345. # Convert industrial_summary dictionary to a DataFrame for charting
  346. summary_df = pd.DataFrame(
  347. list(data['industrial_summary'].items()),
  348. columns=['Grade', 'Count']
  349. )
  350. # Filter out classes with 0 count for a cleaner chart
  351. summary_df = summary_df[summary_df['Count'] > 0]
  352. if not summary_df.empty:
  353. # Create a Pie Chart to show the proportion of each grade
  354. fig = px.pie(summary_df, values='Count', names='Grade',
  355. color='Grade',
  356. color_discrete_map={
  357. 'Ripe': '#22c55e', # Industrial Green
  358. 'Underripe': '#fbbf24', # Industrial Orange
  359. 'Unripe': '#3b82f6', # Industrial Blue
  360. 'Abnormal': '#dc2626', # Critical Red
  361. 'Empty_Bunch': '#64748b' # Waste Gray
  362. },
  363. hole=0.4)
  364. fig.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=300)
  365. st.plotly_chart(fig, width='stretch', key="single_pie")
  366. # 💡 Strategic R&D Insight: Harvest Efficiency
  367. st.write("---")
  368. st.write("#### 💡 Strategic R&D Insight")
  369. unripe_count = data['industrial_summary'].get('Unripe', 0)
  370. underripe_count = data['industrial_summary'].get('Underripe', 0)
  371. total_non_prime = unripe_count + underripe_count
  372. st.write(f"🌑 **Unripe (Mentah):** {unripe_count}")
  373. st.write(f"🌗 **Underripe (Kurang Masak):** {underripe_count}")
  374. if total_non_prime > 0:
  375. st.warning(f"🚨 **Potential Yield Loss:** {total_non_prime} bunches harvested too early. This will reduce OER (Oil Extraction Rate).")
  376. else:
  377. st.success("✅ **Harvest Efficiency:** 100% Prime Ripeness detected.")
  378. # High-Priority Health Alert
  379. if data['industrial_summary'].get('Abnormal', 0) > 0:
  380. st.error(f"🚨 CRITICAL: {data['industrial_summary']['Abnormal']} Abnormal Bunches Detected!")
  381. if data['industrial_summary'].get('Empty_Bunch', 0) > 0:
  382. st.warning(f"⚠️ ALERT: {data['industrial_summary']['Empty_Bunch']} Empty Bunches Detected.")
  383. # 3. Cloud Actions (Only if detections found)
  384. st.write("---")
  385. st.write("#### ✨ Cloud Archive")
  386. if st.button("🚀 Save to Atlas (Vectorize)", width='stretch'):
  387. with st.spinner("Archiving..."):
  388. import json
  389. primary_det = data['detections'][0]
  390. payload = {"detection_data": json.dumps(primary_det)}
  391. files_cloud = {"file": (uploaded_file.name, uploaded_file.getvalue(), uploaded_file.type)}
  392. res_cloud = requests.post(f"{API_BASE_URL}/vectorize_and_store", files=files_cloud, data=payload)
  393. if res_cloud.status_code == 200:
  394. res_json = res_cloud.json()
  395. if res_json["status"] == "success":
  396. st.success(f"Archived! ID: `{res_json['record_id'][:8]}...`")
  397. else:
  398. st.error(f"Cloud Error: {res_json['message']}")
  399. else:
  400. st.error("Failed to connect to cloud service")
  401. if st.button("🚩 Flag Misclassification", width='stretch', type="secondary"):
  402. # Save to local feedback folder
  403. timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
  404. feedback_id = f"fb_{timestamp}"
  405. img_path = f"feedback/{feedback_id}.jpg"
  406. json_path = f"feedback/{feedback_id}.json"
  407. # Save image
  408. Image.open(uploaded_file).save(img_path)
  409. # Save metadata
  410. feedback_data = {
  411. "original_filename": uploaded_file.name,
  412. "timestamp": timestamp,
  413. "detections": data['detections'],
  414. "threshold_used": data['current_threshold']
  415. }
  416. with open(json_path, "w") as f:
  417. json.dump(feedback_data, f, indent=4)
  418. st.toast("✅ Feedback saved to local vault!", icon="🚩")
  419. if st.button("💾 Local History Vault (Auto-Saved)", width='stretch', type="secondary", disabled=True):
  420. pass
  421. st.caption("✅ This analysis was automatically archived to the local vault.")
  422. # --- Tab 2: Batch Processing ---
  423. with tab2:
  424. st.subheader("Bulk Analysis")
  425. # 1. Initialize Session State
  426. if "batch_uploader_key" not in st.session_state:
  427. st.session_state.batch_uploader_key = 0
  428. if "last_batch_results" not in st.session_state:
  429. st.session_state.last_batch_results = None
  430. # 2. Display Persisted Results (if any)
  431. if st.session_state.last_batch_results:
  432. res_data = st.session_state.last_batch_results
  433. with st.container(border=True):
  434. st.success(f"✅ Successfully processed {res_data['processed_count']} images.")
  435. # Batch Summary Dashboard
  436. st.write("### 📈 Batch Quality Overview")
  437. batch_summary = res_data.get('industrial_summary', {})
  438. if batch_summary:
  439. sum_df = pd.DataFrame(list(batch_summary.items()), columns=['Grade', 'Count'])
  440. sum_df = sum_df[sum_df['Count'] > 0]
  441. b_col1, b_col2 = st.columns([1, 1])
  442. with b_col1:
  443. st.dataframe(sum_df, hide_index=True, width='stretch')
  444. with b_col2:
  445. if not sum_df.empty:
  446. fig_batch = px.bar(sum_df, x='Grade', y='Count', color='Grade',
  447. color_discrete_map={
  448. 'Ripe': '#22c55e',
  449. 'Underripe': '#fbbf24',
  450. 'Unripe': '#3b82f6',
  451. 'Abnormal': '#dc2626',
  452. 'Empty_Bunch': '#64748b'
  453. })
  454. fig_batch.update_layout(margin=dict(t=0, b=0, l=0, r=0), height=200, showlegend=False)
  455. st.plotly_chart(fig_batch, width='stretch', key="batch_bar")
  456. if batch_summary.get('Abnormal', 0) > 0:
  457. st.error(f"🚨 BATCH CRITICAL: {batch_summary['Abnormal']} Abnormal Bunches found in this batch!")
  458. st.write("Generated Record IDs:")
  459. st.code(res_data['record_ids'])
  460. # --- 4. Batch Evidence Gallery ---
  461. st.write("### 🖼️ Detailed Detection Evidence")
  462. if 'detailed_results' in res_data:
  463. # Group results by filename for gallery
  464. gallery_map = {}
  465. for res in res_data['detailed_results']:
  466. fname = res['filename']
  467. if fname not in gallery_map:
  468. gallery_map[fname] = []
  469. gallery_map[fname].append(res['detection'])
  470. # Show images with overlays using consistent utility
  471. for up_file in uploaded_files:
  472. if up_file.name in gallery_map:
  473. with st.container(border=True):
  474. g_img = Image.open(up_file).convert("RGB")
  475. g_annotated = annotate_image(g_img, gallery_map[up_file.name])
  476. st.image(g_annotated, caption=f"Evidence: {up_file.name}", width='stretch')
  477. # PDF Export Button (Pass images map)
  478. files_map = {f.name: f.getvalue() for f in uploaded_files}
  479. pdf_bytes = generate_batch_report(res_data, files_map)
  480. st.download_button(
  481. label="📄 Download Executive Batch Report (PDF)",
  482. data=pdf_bytes,
  483. file_name=f"PalmOil_BatchReport_{datetime.now().strftime('%Y%m%d_%H%M%S')}.pdf",
  484. mime="application/pdf",
  485. width='stretch'
  486. )
  487. if st.button("Clear Results & Start New Batch", width='stretch'):
  488. st.session_state.last_batch_results = None
  489. st.rerun()
  490. st.divider()
  491. # 3. Uploader UI
  492. col_batch1, col_batch2 = st.columns([4, 1])
  493. with col_batch1:
  494. uploaded_files = st.file_uploader(
  495. "Upload multiple images...",
  496. type=["jpg", "jpeg", "png"],
  497. accept_multiple_files=True,
  498. key=f"batch_{st.session_state.batch_uploader_key}",
  499. on_change=reset_batch_results
  500. )
  501. with col_batch2:
  502. st.write("##") # Alignment
  503. if st.session_state.last_batch_results is None and uploaded_files:
  504. if st.button("🔍 Process Batch", type="primary", width='stretch'):
  505. with st.spinner(f"Analyzing {len(uploaded_files)} images with {model_type.upper()}..."):
  506. files = [("files", (f.name, f.getvalue(), f.type)) for f in uploaded_files]
  507. payload = {"model_type": model_type}
  508. res = requests.post(f"{API_BASE_URL}/process_batch", files=files, data=payload)
  509. if res.status_code == 200:
  510. data = res.json()
  511. if data["status"] == "success":
  512. st.session_state.last_batch_results = data
  513. st.session_state.batch_uploader_key += 1
  514. st.rerun()
  515. elif data["status"] == "partial_success":
  516. st.warning(data["message"])
  517. st.info(f"Successfully detected {data['detections_count']} bunches locally.")
  518. else:
  519. st.error(f"Batch Error: {data['message']}")
  520. else:
  521. st.error(f"Batch Processing Failed: {res.text}")
  522. if st.button("🗑️ Reset Uploader"):
  523. st.session_state.batch_uploader_key += 1
  524. st.session_state.last_batch_results = None
  525. st.rerun()
  526. # --- Tab 3: Similarity Search ---
  527. with tab3:
  528. st.subheader("Hybrid Semantic Search")
  529. st.markdown("Search records by either **Image Similarity** or **Natural Language Query**.")
  530. with st.form("hybrid_search_form"):
  531. col_input1, col_input2 = st.columns(2)
  532. with col_input1:
  533. search_file = st.file_uploader("Option A: Search Image...", type=["jpg", "jpeg", "png"], key="search")
  534. with col_input2:
  535. text_query = st.text_input("Option B: Natural Language Query", placeholder="e.g., 'ripe bunches with dark spots' or 'unripe fruit'")
  536. top_k = st.slider("Results Limit (Top K)", 1, 20, 3)
  537. submit_search = st.form_submit_button("Run Semantic Search")
  538. if submit_search:
  539. if not search_file and not text_query:
  540. st.warning("Please provide either an image or a text query.")
  541. else:
  542. with st.spinner("Searching Vector Index..."):
  543. payload = {"limit": top_k}
  544. # If an image is uploaded, it takes precedence for visual search
  545. if search_file:
  546. files = {"file": (search_file.name, search_file.getvalue(), search_file.type)}
  547. # Pass top_k as part of the data
  548. res = requests.post(f"{API_BASE_URL}/search_hybrid", files=files, data=payload)
  549. # Otherwise, use text query
  550. elif text_query:
  551. payload["text_query"] = text_query
  552. # Send as form-data (data=) to match FastAPI's Form(None)
  553. res = requests.post(f"{API_BASE_URL}/search_hybrid", data=payload)
  554. if res.status_code == 200:
  555. results = res.json().get("results", [])
  556. if not results:
  557. st.warning("No similar records found.")
  558. else:
  559. st.success(f"Found {len(results)} matches.")
  560. for item in results:
  561. with st.container(border=True):
  562. c1, c2 = st.columns([1, 2])
  563. # Fetch the image for this result
  564. rec_id = item["_id"]
  565. img_res = requests.get(f"{API_BASE_URL}/get_image/{rec_id}")
  566. with c1:
  567. if img_res.status_code == 200:
  568. img_b64 = img_res.json().get("image_data")
  569. if img_b64:
  570. st.image(base64.b64decode(img_b64), width=250)
  571. else:
  572. st.write("No image data found.")
  573. else:
  574. st.write("Failed to load image.")
  575. with c2:
  576. st.write(f"**Class:** {item['ripeness_class']}")
  577. st.write(f"**Similarity Score:** {item['score']:.4f}")
  578. st.write(f"**Timestamp:** {item['timestamp']}")
  579. st.write(f"**ID:** `{rec_id}`")
  580. else:
  581. st.error(f"Search failed: {res.text}")
  582. # --- Tab 4: History Vault ---
  583. with tab4:
  584. st.subheader("📜 Local History Vault")
  585. if "selected_history_id" not in st.session_state:
  586. st.session_state.selected_history_id = None
  587. try:
  588. res = requests.get(f"{API_BASE_URL}/get_history")
  589. if res.status_code == 200:
  590. history_data = res.json().get("history", [])
  591. if not history_data:
  592. st.info("No saved records found.")
  593. else:
  594. if st.session_state.selected_history_id is None:
  595. # ListView Mode
  596. st.write("### 📋 Record List")
  597. df_history = pd.DataFrame(history_data)[['id', 'filename', 'timestamp', 'inference_ms']]
  598. st.dataframe(df_history, hide_index=True, width='stretch')
  599. id_to_select = st.number_input("Enter Record ID to view details:", min_value=int(df_history['id'].min()), max_value=int(df_history['id'].max()), step=1)
  600. if st.button("Deep Dive Analysis", type="primary"):
  601. st.session_state.selected_history_id = id_to_select
  602. st.rerun()
  603. else:
  604. # Detail View Mode
  605. record = next((item for item in history_data if item["id"] == st.session_state.selected_history_id), None)
  606. if not record:
  607. st.error("Record not found.")
  608. if st.button("Back to List"):
  609. st.session_state.selected_history_id = None
  610. st.rerun()
  611. else:
  612. if st.button("⬅️ Back to History List"):
  613. st.session_state.selected_history_id = None
  614. st.rerun()
  615. st.divider()
  616. st.write(f"## 🔍 Deep Dive: Record #{record['id']} ({record['filename']})")
  617. detections = json.loads(record['detections'])
  618. summary = json.loads(record['summary'])
  619. # Metrics Row
  620. h_col1, h_col2, h_col3, h_col4 = st.columns(4)
  621. with h_col1:
  622. st.metric("Total Bunches", sum(summary.values()))
  623. with h_col2:
  624. st.metric("Healthy (Ripe)", summary.get('Ripe', 0))
  625. with h_col3:
  626. st.metric("Inference Speed", f"{record.get('inference_ms', 0) or 0:.1f} ms", help="Raw model speed")
  627. with h_col4:
  628. st.metric("Post-Processing", f"{record.get('processing_ms', 0) or 0:.1f} ms", help="Labeling overhead")
  629. # Image View
  630. if os.path.exists(record['archive_path']):
  631. with open(record['archive_path'], "rb") as f:
  632. hist_img = Image.open(f).convert("RGB")
  633. display_interactive_results(hist_img, detections, key=f"hist_{record['id']}")
  634. else:
  635. st.error(f"Archive file not found: {record['archive_path']}")
  636. # Technical Evidence Expander
  637. col_hist_tech1, col_hist_tech2 = st.columns([4, 1])
  638. with col_hist_tech1:
  639. st.write("#### 🛠️ Technical Evidence")
  640. with col_hist_tech2:
  641. if st.button("❓ Guide", key="guide_hist"):
  642. show_tech_guide()
  643. with st.expander("Raw Output Tensor (Archive)", expanded=False):
  644. st.caption("See the Interpretation Guide for a breakdown of these numbers.")
  645. raw_data = record.get('raw_tensor')
  646. if raw_data:
  647. try:
  648. st.json(json.loads(raw_data))
  649. except:
  650. st.text(raw_data)
  651. else:
  652. st.info("No raw tensor data available for this record.")
  653. else:
  654. st.error(f"Failed to fetch history: {res.text}")
  655. except Exception as e:
  656. st.error(f"Error loading history: {str(e)}")