| 1234567891011121314151617181920212223242526272829303132333435363738394041 |
- from fastapi import FastAPI, File, UploadFile
- from ultralytics import YOLO
- import io
- import torch
- from PIL import Image
- app = FastAPI()
- # Load your custom trained model
- model = YOLO('best.pt')
- @app.post("/detect")
- async def detect_ripeness(file: UploadFile = File(...)):
- image_bytes = await file.read()
- img = Image.open(io.BytesIO(image_bytes))
- # 1. Run YOLO detection
- results = model(img)
- # 2. Extract Detections and the 'Embedding'
- # We use the feature map from the model as a vector
- detections = []
- # Using the last hidden layer or a flattened feature map as a 'pseudo-vector'
- # For a true vector, we'd usually use a CLIP model, but for now, we'll return detection data
- for r in results:
- for box in r.boxes:
- detections.append({
- "class": model.names[int(box.cls)],
- "confidence": round(float(box.conf), 2),
- "box": box.xyxy.tolist()[0]
- })
- return {
- "status": "success",
- "data": detections,
- "message": "Model processed palm oil FFB successfully"
- }
- if __name__ == "__main__":
- import uvicorn
- uvicorn.run(app, host="0.0.0.0", port=8000)
|