|
|
@@ -4,10 +4,26 @@ from PIL import Image
|
|
|
import io
|
|
|
import base64
|
|
|
|
|
|
-# --- API Configuration ---
|
|
|
+# --- 1. Global Backend Check ---
|
|
|
API_BASE_URL = "http://localhost:8000"
|
|
|
|
|
|
-# --- Page Config ---
|
|
|
+def check_backend():
|
|
|
+ try:
|
|
|
+ res = requests.get(f"{API_BASE_URL}/get_confidence", timeout=2)
|
|
|
+ return res.status_code == 200
|
|
|
+ except:
|
|
|
+ return False
|
|
|
+
|
|
|
+backend_active = check_backend()
|
|
|
+
|
|
|
+if not backend_active:
|
|
|
+ st.error("⚠️ Backend API is offline!")
|
|
|
+ st.info("Please start the backend server first (e.g., `python main.py`) to unlock AI features.")
|
|
|
+ if st.button("🔄 Retry Connection"):
|
|
|
+ st.rerun()
|
|
|
+ st.stop() # Stops execution here, effectively disabling the app
|
|
|
+
|
|
|
+# --- 2. Main Page Config (Only rendered if backend is active) ---
|
|
|
st.set_page_config(page_title="Palm Oil Ripeness AI", layout="wide")
|
|
|
st.title("🌴 Palm Oil FFB Management System")
|
|
|
st.markdown("### Production-Ready AI Analysis & Archival")
|
|
|
@@ -23,24 +39,19 @@ def update_confidence():
|
|
|
except:
|
|
|
st.sidebar.error("Failed to update threshold")
|
|
|
|
|
|
-try:
|
|
|
- response = requests.get(f"{API_BASE_URL}/get_confidence")
|
|
|
- if response.status_code == 200:
|
|
|
- current_conf = response.json().get("current_confidence", 0.25)
|
|
|
- st.sidebar.success(f"Connected to API")
|
|
|
-
|
|
|
- # Synchronized Slider
|
|
|
- st.sidebar.slider(
|
|
|
- "Confidence Threshold",
|
|
|
- 0.1, 1.0,
|
|
|
- value=float(current_conf),
|
|
|
- key="conf_slider",
|
|
|
- on_change=update_confidence
|
|
|
- )
|
|
|
- else:
|
|
|
- st.sidebar.error("API Error")
|
|
|
-except:
|
|
|
- st.sidebar.error("Could not connect to Backend API. Please ensure it is running.")
|
|
|
+# We already know backend is up here
|
|
|
+response = requests.get(f"{API_BASE_URL}/get_confidence")
|
|
|
+current_conf = response.json().get("current_confidence", 0.25)
|
|
|
+st.sidebar.success(f"Connected to API")
|
|
|
+
|
|
|
+# Synchronized Slider
|
|
|
+st.sidebar.slider(
|
|
|
+ "Confidence Threshold",
|
|
|
+ 0.1, 1.0,
|
|
|
+ value=float(current_conf),
|
|
|
+ key="conf_slider",
|
|
|
+ on_change=update_confidence
|
|
|
+)
|
|
|
|
|
|
# --- Tabs ---
|
|
|
tab1, tab2, tab3 = st.tabs(["Single Analysis", "Batch Processing", "Similarity Search"])
|
|
|
@@ -53,7 +64,7 @@ with tab1:
|
|
|
if uploaded_file:
|
|
|
col1, col2 = st.columns(2)
|
|
|
with col1:
|
|
|
- st.image(uploaded_file, caption="Input", use_container_width=True)
|
|
|
+ st.image(uploaded_file, caption="Input", width=500)
|
|
|
|
|
|
with col2:
|
|
|
if st.button("Run Full Analysis"):
|
|
|
@@ -73,48 +84,117 @@ with tab1:
|
|
|
# --- Tab 2: Batch Processing ---
|
|
|
with tab2:
|
|
|
st.subheader("Bulk Analysis")
|
|
|
- uploaded_files = st.file_uploader("Upload multiple images...", type=["jpg", "jpeg", "png"], accept_multiple_files=True, key="batch")
|
|
|
+
|
|
|
+ # 1. Initialize Session State
|
|
|
+ if "batch_uploader_key" not in st.session_state:
|
|
|
+ st.session_state.batch_uploader_key = 0
|
|
|
+ if "last_batch_results" not in st.session_state:
|
|
|
+ st.session_state.last_batch_results = None
|
|
|
+
|
|
|
+ # 2. Display Persisted Results (if any)
|
|
|
+ if st.session_state.last_batch_results:
|
|
|
+ res_data = st.session_state.last_batch_results
|
|
|
+ with st.container(border=True):
|
|
|
+ st.success(f"✅ Successfully processed {res_data['processed_count']} images.")
|
|
|
+ st.write("Generated Record IDs:")
|
|
|
+ st.code(res_data['record_ids'])
|
|
|
+ if st.button("Clear Results & Start New Batch"):
|
|
|
+ st.session_state.last_batch_results = None
|
|
|
+ st.rerun()
|
|
|
+ st.divider()
|
|
|
+
|
|
|
+ # 3. Uploader UI
|
|
|
+ col_batch1, col_batch2 = st.columns([4, 1])
|
|
|
+ with col_batch1:
|
|
|
+ uploaded_files = st.file_uploader(
|
|
|
+ "Upload multiple images...",
|
|
|
+ type=["jpg", "jpeg", "png"],
|
|
|
+ accept_multiple_files=True,
|
|
|
+ key=f"batch_{st.session_state.batch_uploader_key}"
|
|
|
+ )
|
|
|
+
|
|
|
+ with col_batch2:
|
|
|
+ st.write("##") # Alignment
|
|
|
+ if st.button("🗑️ Reset Uploader"):
|
|
|
+ st.session_state.batch_uploader_key += 1
|
|
|
+ st.rerun()
|
|
|
|
|
|
if uploaded_files:
|
|
|
- if st.button(f"Process {len(uploaded_files)} Images"):
|
|
|
+ if st.button(f"🚀 Process {len(uploaded_files)} Images"):
|
|
|
with st.spinner("Batch Processing in progress..."):
|
|
|
files = [("files", (f.name, f.getvalue(), f.type)) for f in uploaded_files]
|
|
|
res = requests.post(f"{API_BASE_URL}/analyze_batch", files=files)
|
|
|
|
|
|
if res.status_code == 200:
|
|
|
- data = res.json()
|
|
|
- st.success(f"Successfully processed {data['processed_count']} images.")
|
|
|
- st.write("Generated Record IDs:")
|
|
|
- st.code(data['record_ids'])
|
|
|
+ # 4. Success: Store results and Clear Uploader automatically
|
|
|
+ st.session_state.last_batch_results = res.json()
|
|
|
+ st.session_state.batch_uploader_key += 1
|
|
|
+ st.rerun()
|
|
|
else:
|
|
|
- st.error("Batch Failed")
|
|
|
+ st.error(f"Batch Failed: {res.text}")
|
|
|
|
|
|
# --- Tab 3: Similarity Search ---
|
|
|
with tab3:
|
|
|
- st.subheader("Atlas Vector Search")
|
|
|
- st.markdown("Upload an image to find the most similar historical records in the database.")
|
|
|
- search_file = st.file_uploader("Search Image...", type=["jpg", "jpeg", "png"], key="search")
|
|
|
+ st.subheader("Hybrid Semantic Search")
|
|
|
+ st.markdown("Search records by either **Image Similarity** or **Natural Language Query**.")
|
|
|
|
|
|
- if search_file:
|
|
|
- st.image(search_file, width=300)
|
|
|
- if st.button("Find Similar Bunches"):
|
|
|
+ with st.form("hybrid_search_form"):
|
|
|
+ col_input1, col_input2 = st.columns(2)
|
|
|
+
|
|
|
+ with col_input1:
|
|
|
+ search_file = st.file_uploader("Option A: Search Image...", type=["jpg", "jpeg", "png"], key="search")
|
|
|
+
|
|
|
+ with col_input2:
|
|
|
+ text_query = st.text_input("Option B: Natural Language Query", placeholder="e.g., 'ripe bunches with dark spots' or 'unripe fruit'")
|
|
|
+ top_k = st.slider("Results Limit (Top K)", 1, 20, 3)
|
|
|
+
|
|
|
+ submit_search = st.form_submit_button("Run Semantic Search")
|
|
|
+
|
|
|
+ if submit_search:
|
|
|
+ if not search_file and not text_query:
|
|
|
+ st.warning("Please provide either an image or a text query.")
|
|
|
+ else:
|
|
|
with st.spinner("Searching Vector Index..."):
|
|
|
- files = {"file": (search_file.name, search_file.getvalue(), search_file.type)}
|
|
|
- res = requests.post(f"{API_BASE_URL}/search", files=files)
|
|
|
+ payload = {"limit": top_k}
|
|
|
+
|
|
|
+ # If an image is uploaded, it takes precedence for visual search
|
|
|
+ if search_file:
|
|
|
+ files = {"file": (search_file.name, search_file.getvalue(), search_file.type)}
|
|
|
+ # Pass top_k as part of the data
|
|
|
+ res = requests.post(f"{API_BASE_URL}/search_hybrid", files=files, data=payload)
|
|
|
+ # Otherwise, use text query
|
|
|
+ elif text_query:
|
|
|
+ payload["text_query"] = text_query
|
|
|
+ # Send as form-data (data=) to match FastAPI's Form(None)
|
|
|
+ res = requests.post(f"{API_BASE_URL}/search_hybrid", data=payload)
|
|
|
|
|
|
if res.status_code == 200:
|
|
|
results = res.json().get("results", [])
|
|
|
if not results:
|
|
|
st.warning("No similar records found.")
|
|
|
else:
|
|
|
+ st.success(f"Found {len(results)} matches.")
|
|
|
for item in results:
|
|
|
with st.container(border=True):
|
|
|
c1, c2 = st.columns([1, 2])
|
|
|
- # Note: Actual prod app would fetch image_data by id here
|
|
|
- # For demo, we show the textual metadata
|
|
|
+ # Fetch the image for this result
|
|
|
+ rec_id = item["_id"]
|
|
|
+ img_res = requests.get(f"{API_BASE_URL}/get_image/{rec_id}")
|
|
|
+
|
|
|
+ with c1:
|
|
|
+ if img_res.status_code == 200:
|
|
|
+ img_b64 = img_res.json().get("image_data")
|
|
|
+ if img_b64:
|
|
|
+ st.image(base64.b64decode(img_b64), width=250)
|
|
|
+ else:
|
|
|
+ st.write("No image data found.")
|
|
|
+ else:
|
|
|
+ st.write("Failed to load image.")
|
|
|
+
|
|
|
with c2:
|
|
|
st.write(f"**Class:** {item['ripeness_class']}")
|
|
|
st.write(f"**Similarity Score:** {item['score']:.4f}")
|
|
|
st.write(f"**Timestamp:** {item['timestamp']}")
|
|
|
+ st.write(f"**ID:** `{rec_id}`")
|
|
|
else:
|
|
|
- st.error("Search failed")
|
|
|
+ st.error(f"Search failed: {res.text}")
|