|
|
@@ -1,60 +1,120 @@
|
|
|
import streamlit as st
|
|
|
-from ultralytics import YOLO
|
|
|
+import requests
|
|
|
from PIL import Image
|
|
|
-import numpy as np
|
|
|
import io
|
|
|
+import base64
|
|
|
+
|
|
|
+# --- API Configuration ---
|
|
|
+API_BASE_URL = "http://localhost:8000"
|
|
|
|
|
|
# --- Page Config ---
|
|
|
st.set_page_config(page_title="Palm Oil Ripeness AI", layout="wide")
|
|
|
-st.title("🌴 Palm Oil FFB Ripeness Detector")
|
|
|
-st.markdown("### R&D Proof of Concept: Automated Maturity Grading")
|
|
|
+st.title("🌴 Palm Oil FFB Management System")
|
|
|
+st.markdown("### Production-Ready AI Analysis & Archival")
|
|
|
|
|
|
-# --- Load Model (Cached for performance) ---
|
|
|
-@st.cache_resource
|
|
|
-def load_model():
|
|
|
- return YOLO("best.pt")
|
|
|
+# --- Sidebar ---
|
|
|
+st.sidebar.header("Backend Controls")
|
|
|
|
|
|
-model = load_model()
|
|
|
+def update_confidence():
|
|
|
+ new_conf = st.session_state.conf_slider
|
|
|
+ try:
|
|
|
+ requests.post(f"{API_BASE_URL}/set_confidence", json={"threshold": new_conf})
|
|
|
+ st.toast(f"Threshold updated to {new_conf}")
|
|
|
+ except:
|
|
|
+ st.sidebar.error("Failed to update threshold")
|
|
|
|
|
|
-# --- Sidebar Controls ---
|
|
|
-st.sidebar.header("Settings")
|
|
|
-conf_threshold = st.sidebar.slider("Confidence Threshold", 0.1, 1.0, 0.5)
|
|
|
+try:
|
|
|
+ response = requests.get(f"{API_BASE_URL}/get_confidence")
|
|
|
+ if response.status_code == 200:
|
|
|
+ current_conf = response.json().get("current_confidence", 0.25)
|
|
|
+ st.sidebar.success(f"Connected to API")
|
|
|
+
|
|
|
+ # Synchronized Slider
|
|
|
+ st.sidebar.slider(
|
|
|
+ "Confidence Threshold",
|
|
|
+ 0.1, 1.0,
|
|
|
+ value=float(current_conf),
|
|
|
+ key="conf_slider",
|
|
|
+ on_change=update_confidence
|
|
|
+ )
|
|
|
+ else:
|
|
|
+ st.sidebar.error("API Error")
|
|
|
+except:
|
|
|
+ st.sidebar.error("Could not connect to Backend API. Please ensure it is running.")
|
|
|
|
|
|
-# --- Image Upload ---
|
|
|
-uploaded_file = st.file_uploader("Drag and drop a Palm Oil FFB image here...", type=["jpg", "jpeg", "png"])
|
|
|
+# --- Tabs ---
|
|
|
+tab1, tab2, tab3 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search"])
|
|
|
|
|
|
-if uploaded_file is not None:
|
|
|
- # Convert uploaded file to PIL Image
|
|
|
- image = Image.open(uploaded_file)
|
|
|
-
|
|
|
- # Layout: Original vs Predicted
|
|
|
- col1, col2 = st.columns(2)
|
|
|
+# --- Tab 1: Single Analysis ---
|
|
|
+with tab1:
|
|
|
+ st.subheader("Analyze Single Bunch")
|
|
|
+ uploaded_file = st.file_uploader("Upload a bunch image...", type=["jpg", "jpeg", "png"], key="single")
|
|
|
|
|
|
- with col1:
|
|
|
- st.image(image, caption="Uploaded Image", use_container_width=True)
|
|
|
+ if uploaded_file:
|
|
|
+ col1, col2 = st.columns(2)
|
|
|
+ with col1:
|
|
|
+ st.image(uploaded_file, caption="Input", use_container_width=True)
|
|
|
|
|
|
- with col2:
|
|
|
- with st.spinner('Analyzing ripeness...'):
|
|
|
- # Run Inference
|
|
|
- results = model.predict(source=image, conf=conf_threshold)
|
|
|
-
|
|
|
- # The .plot() method returns a BGR numpy array with boxes drawn
|
|
|
- annotated_img = results[0].plot()
|
|
|
-
|
|
|
- # Convert BGR (OpenCV format) to RGB (Streamlit/PIL format)
|
|
|
- annotated_img_rgb = annotated_img[:, :, ::-1]
|
|
|
-
|
|
|
- st.image(annotated_img_rgb, caption="AI Analysis Result", use_container_width=True)
|
|
|
-
|
|
|
- # --- Metrics Section ---
|
|
|
- st.divider()
|
|
|
- st.subheader("Analysis Summary")
|
|
|
+ with col2:
|
|
|
+ if st.button("Run Full Analysis"):
|
|
|
+ with st.spinner("Processing... (Detecting + Vectorizing + Archiving)"):
|
|
|
+ files = {"file": (uploaded_file.name, uploaded_file.getvalue(), uploaded_file.type)}
|
|
|
+ res = requests.post(f"{API_BASE_URL}/analyze", files=files)
|
|
|
+
|
|
|
+ if res.status_code == 200:
|
|
|
+ data = res.json()
|
|
|
+ st.success(f"✅ Record Archived! ID: {data['record_id']}")
|
|
|
+
|
|
|
+ for det in data['detections']:
|
|
|
+ st.info(f"**{det['class']}** - {det['confidence']:.2%} confidence")
|
|
|
+ else:
|
|
|
+ st.error(f"Analysis Failed: {res.text}")
|
|
|
+
|
|
|
+# --- Tab 2: Batch Processing ---
|
|
|
+with tab2:
|
|
|
+ st.subheader("Bulk Analysis")
|
|
|
+ uploaded_files = st.file_uploader("Upload multiple images...", type=["jpg", "jpeg", "png"], accept_multiple_files=True, key="batch")
|
|
|
|
|
|
- detections = results[0].boxes
|
|
|
- if len(detections) > 0:
|
|
|
- for box in detections:
|
|
|
- label = model.names[int(box.cls)]
|
|
|
- conf = float(box.conf)
|
|
|
- st.success(f"**Detected:** {label} | **Confidence:** {conf:.2%}")
|
|
|
- else:
|
|
|
- st.warning("No fruit bunches detected. Try adjusting the confidence slider.")
|
|
|
+ if uploaded_files:
|
|
|
+ if st.button(f"Process {len(uploaded_files)} Images"):
|
|
|
+ with st.spinner("Batch Processing in progress..."):
|
|
|
+ files = [("files", (f.name, f.getvalue(), f.type)) for f in uploaded_files]
|
|
|
+ res = requests.post(f"{API_BASE_URL}/analyze_batch", files=files)
|
|
|
+
|
|
|
+ if res.status_code == 200:
|
|
|
+ data = res.json()
|
|
|
+ st.success(f"Successfully processed {data['processed_count']} images.")
|
|
|
+ st.write("Generated Record IDs:")
|
|
|
+ st.code(data['record_ids'])
|
|
|
+ else:
|
|
|
+ st.error("Batch Failed")
|
|
|
+
|
|
|
+# --- Tab 3: Similarity Search ---
|
|
|
+with tab3:
|
|
|
+ st.subheader("Atlas Vector Search")
|
|
|
+ st.markdown("Upload an image to find the most similar historical records in the database.")
|
|
|
+ search_file = st.file_uploader("Search Image...", type=["jpg", "jpeg", "png"], key="search")
|
|
|
+
|
|
|
+ if search_file:
|
|
|
+ st.image(search_file, width=300)
|
|
|
+ if st.button("Find Similar Bunches"):
|
|
|
+ with st.spinner("Searching Vector Index..."):
|
|
|
+ files = {"file": (search_file.name, search_file.getvalue(), search_file.type)}
|
|
|
+ res = requests.post(f"{API_BASE_URL}/search", files=files)
|
|
|
+
|
|
|
+ if res.status_code == 200:
|
|
|
+ results = res.json().get("results", [])
|
|
|
+ if not results:
|
|
|
+ st.warning("No similar records found.")
|
|
|
+ else:
|
|
|
+ for item in results:
|
|
|
+ with st.container(border=True):
|
|
|
+ c1, c2 = st.columns([1, 2])
|
|
|
+ # Note: Actual prod app would fetch image_data by id here
|
|
|
+ # For demo, we show the textual metadata
|
|
|
+ with c2:
|
|
|
+ st.write(f"**Class:** {item['ripeness_class']}")
|
|
|
+ st.write(f"**Similarity Score:** {item['score']:.4f}")
|
|
|
+ st.write(f"**Timestamp:** {item['timestamp']}")
|
|
|
+ else:
|
|
|
+ st.error("Search failed")
|