Ver Fonte

suboptimal ver

Dr-Swopt há 1 semana atrás
pai
commit
e3737f3290

+ 195 - 80
palm_oil_mobile/lib/screens/live_analysis_screen.dart

@@ -1,5 +1,6 @@
 import 'dart:io';
 import 'dart:ui';
+import 'dart:async';
 import 'package:flutter/material.dart';
 import 'package:camera/camera.dart';
 import 'package:permission_handler/permission_handler.dart';
@@ -9,6 +10,8 @@ import '../services/tflite_service.dart';
 import '../services/database_helper.dart';
 import '../models/palm_record.dart';
 
+enum DetectionState { searching, locking, capturing, cooldown }
+
 class LiveAnalysisScreen extends StatefulWidget {
   const LiveAnalysisScreen({super.key});
 
@@ -27,12 +30,18 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
   List<DetectionResult>? _detections;
   
   // Detection Lock Logic
-  bool _isLocked = false;
+  DetectionState _state = DetectionState.searching;
   static const double _lockThreshold = 0.60;
   static const int _frameThrottle = 2; // Check frames more frequently
   
-  final List<bool> _detectionHistory = List.filled(10, false, growable: true);
-  static const int _requiredHits = 4; // 4 out of 10 for a lock
+  final List<bool> _detectionHistory = List.filled(20, false, growable: true); // 20 frames buffer
+  static const int _requiredHits = 4; // Threshold for momentum ticks
+  int _currentHits = 0;               // Track hits for the timer
+
+  Timer? _lockTimer;
+  Timer? _cooldownTimer;
+  double _lockProgress = 0.0;
+  bool _showFlash = false;
 
   @override
   void initState() {
@@ -49,7 +58,7 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
 
     _controller = CameraController(
       cameras[0],
-      ResolutionPreset.medium, // Restoring to a valid preset
+      ResolutionPreset.low, // Downgraded resolution for performance
       enableAudio: false,
       imageFormatGroup: Platform.isAndroid ? ImageFormatGroup.yuv420 : ImageFormatGroup.bgra8888,
     );
@@ -58,13 +67,7 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
       await _controller!.initialize();
       await _tfliteService.initModel();
 
-      _controller!.startImageStream((image) {
-        if (_isProcessing) return;
-        _frameCount++;
-        if (_frameCount % _frameThrottle != 0) return;
-
-        _processStreamFrame(image);
-      });
+      _controller!.startImageStream(_handleImageStream);
 
       if (mounted) {
         setState(() {
@@ -76,6 +79,14 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
     }
   }
 
+  void _handleImageStream(CameraImage image) {
+    if (_isProcessing || _state == DetectionState.capturing || _state == DetectionState.cooldown) return;
+    _frameCount++;
+    if (_frameCount % _frameThrottle != 0) return;
+
+    _processStreamFrame(image);
+  }
+
   Future<void> _processStreamFrame(CameraImage image) async {
     setState(() => _isProcessing = true);
     try {
@@ -90,30 +101,100 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
       _detectionHistory.removeAt(0);
       _detectionHistory.add(currentFrameHasFruit);
 
-      final hits = _detectionHistory.where((h) => h).length;
+      _currentHits = _detectionHistory.where((h) => h).length;
 
-      if (mounted) {
-        setState(() {
-          _detections = detections;
-          _isLocked = hits >= _requiredHits;
-        });
+      if (!mounted) return;
+
+      setState(() {
+        _detections = detections;
+      });
+
+      if (_state == DetectionState.searching) {
+        if (_currentHits >= _requiredHits) {
+          setState(() {
+            _state = DetectionState.locking;
+            _lockProgress = 0.0;
+          });
+          _startLockTimer();
+        }
       }
+      // Removed the old strict cancel logic.
+      // _startLockTimer now safely handles momentum drain.
     } catch (e) {
       print("Stream processing error: $e");
     } finally {
-      _isProcessing = false;
+      if (mounted) {
+        setState(() => _isProcessing = false);
+      }
     }
   }
 
+  void _startLockTimer() {
+    _lockTimer?.cancel();
+    const duration = Duration(milliseconds: 100);
+    int momentumTicks = 0;
+    _lockTimer = Timer.periodic(duration, (timer) {
+      if (!mounted) {
+        timer.cancel();
+        return;
+      }
+      
+      // Momentum logic: add or subtract
+      if (_currentHits >= _requiredHits) {
+        momentumTicks++;
+      } else {
+        momentumTicks--;
+      }
+      
+      if (momentumTicks < 0) momentumTicks = 0;
+      
+      setState(() {
+        _lockProgress = (momentumTicks / 3.0).clamp(0.0, 1.0); // 3 ticks target
+      });
+      
+      if (momentumTicks >= 3) {
+        timer.cancel();
+        if (_state == DetectionState.locking) {
+          _triggerCapture();
+        }
+      } else if (momentumTicks <= 0 && _state == DetectionState.locking) {
+        // Complete momentum loss -> Cancel lock
+        timer.cancel();
+        setState(() {
+          _state = DetectionState.searching;
+          _lockProgress = 0.0;
+        });
+      }
+    });
+  }
+
+  void _cancelLockTimer() {
+    _lockTimer?.cancel();
+    _lockTimer = null;
+  }
+
+  Future<void> _triggerCapture() async {
+    setState(() {
+      _state = DetectionState.capturing;
+      _lockProgress = 1.0;
+      _showFlash = true;
+    });
+    
+    // Quick 200ms white flash without blocking
+    Future.delayed(const Duration(milliseconds: 200), () {
+      if (mounted) setState(() => _showFlash = false);
+    });
+    
+    await _captureAndAnalyze();
+  }
+
   Future<void> _captureAndAnalyze() async {
     if (_controller == null || !_controller!.value.isInitialized) return;
 
     // 1. Stop stream to avoid resource conflict
     await _controller!.stopImageStream();
     
-    // Show loading dialog
     if (!mounted) return;
-    _showLoadingDialog();
 
     try {
       // 2. Take high-res picture
@@ -154,50 +235,44 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
 
         // 5. Show result and resume camera
         if (mounted) {
-          Navigator.of(context).pop(); // Close loading
-          _showResultSheet(record);
+          await _showResultSheet(record);
+          _startCooldown();
         }
       } else {
          if (mounted) {
-          Navigator.of(context).pop();
           ScaffoldMessenger.of(context).showSnackBar(
             const SnackBar(content: Text("No palm bunches detected in final snap."))
           );
+          _startCooldown();
         }
       }
     } catch (e) {
-      if (mounted) Navigator.of(context).pop();
       print("Capture error: $e");
-    } finally {
-      // Restart stream
-      _controller!.startImageStream((image) {
-        if (_isProcessing) return;
-        _frameCount++;
-        if (_frameCount % _frameThrottle != 0) return;
-        _processStreamFrame(image);
-      });
+      if (mounted) _startCooldown();
     }
   }
-
-  void _showLoadingDialog() {
-    showDialog(
-      context: context,
-      barrierDismissible: false,
-      builder: (context) => const Center(
-        child: Column(
-          mainAxisSize: MainAxisSize.min,
-          children: [
-            CircularProgressIndicator(color: Colors.white),
-            SizedBox(height: 16),
-            Text("Final Grading...", style: TextStyle(color: Colors.white)),
-          ],
-        ),
-      ),
-    );
+  
+  void _startCooldown() {
+    if (!mounted) return;
+    setState(() {
+      _state = DetectionState.cooldown;
+      _detections = null; // Clear boxes
+    });
+    
+    // Clear detection history to ignore old hits
+    _detectionHistory.fillRange(0, _detectionHistory.length, false);
+
+    _cooldownTimer?.cancel();
+    _cooldownTimer = Timer(const Duration(seconds: 3), () {
+      if (!mounted) return;
+      setState(() {
+        _state = DetectionState.searching;
+      });
+      _controller?.startImageStream(_handleImageStream);
+    });
   }
 
-  void _showResultSheet(PalmRecord record) {
-    // Determine color based on ripeness class
+  Future<void> _showResultSheet(PalmRecord record) async {
     Color statusColor = const Color(0xFFFF9800); // Default orange
     if (record.ripenessClass == 'Empty_Bunch' || record.ripenessClass == 'Abnormal') {
       statusColor = const Color(0xFFF44336);
@@ -205,9 +280,11 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
       statusColor = const Color(0xFF4CAF50);
     }
 
-    showModalBottomSheet(
+    await showModalBottomSheet(
       context: context,
       isScrollControlled: true,
+      isDismissible: false,
+      enableDrag: false,
       shape: const RoundedRectangleBorder(borderRadius: BorderRadius.vertical(top: Radius.circular(20))),
       builder: (context) => Container(
         padding: const EdgeInsets.all(24),
@@ -245,6 +322,8 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
       return const Scaffold(body: Center(child: CircularProgressIndicator()));
     }
 
+    final isLockedVisual = _state == DetectionState.locking || _state == DetectionState.capturing;
+
     return Scaffold(
       backgroundColor: Colors.black,
       body: Stack(
@@ -255,7 +334,7 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
           ),
           
           // Bounding Box Overlays
-          if (_detections != null)
+          if (_detections != null && _state != DetectionState.capturing && _state != DetectionState.cooldown)
             Positioned.fill(
               child: LayoutBuilder(
                 builder: (context, constraints) {
@@ -282,14 +361,18 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
               child: Row(
                 children: [
                   Icon(
-                    _isLocked ? Icons.lock : Icons.center_focus_weak,
-                    color: _isLocked ? Colors.green : Colors.yellow,
+                    _state == DetectionState.cooldown ? Icons.pause_circle_filled : 
+                    isLockedVisual ? Icons.lock : Icons.center_focus_weak,
+                    color: _state == DetectionState.cooldown ? Colors.blue : 
+                           isLockedVisual ? Colors.green : Colors.yellow,
                   ),
                   const SizedBox(width: 8),
                   Text(
-                    _isLocked ? "LOCKED" : "TARGETING...",
+                    _state == DetectionState.cooldown ? "COOLDOWN" : 
+                    isLockedVisual ? "LOCKING" : "SEARCHING...",
                     style: TextStyle(
-                      color: _isLocked ? Colors.green : Colors.yellow,
+                      color: _state == DetectionState.cooldown ? Colors.blue : 
+                             isLockedVisual ? Colors.green : Colors.yellow,
                       fontWeight: FontWeight.bold,
                     ),
                   ),
@@ -303,35 +386,65 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
             ),
           ),
 
-          // Bottom Controls
-          Positioned(
-            bottom: 40,
-            left: 0,
-            right: 0,
-            child: Center(
-              child: GestureDetector(
-                onTap: _isLocked ? _captureAndAnalyze : null,
-                child: Container(
-                  width: 80,
-                  height: 80,
-                  decoration: BoxDecoration(
-                    shape: BoxShape.circle,
-                    border: Border.all(color: Colors.white, width: 4),
-                    color: _isLocked ? Colors.green.withOpacity(0.8) : Colors.white24,
-                  ),
-                  child: Icon(
-                    _isLocked ? Icons.camera_alt : Icons.hourglass_empty,
-                    color: Colors.white,
-                    size: 32,
+          // Progress Overlay for Locking
+          if (_state == DetectionState.locking)
+            Positioned.fill(
+              child: Center(
+                child: SizedBox(
+                  width: 120,
+                  height: 120,
+                  child: TweenAnimationBuilder<double>(
+                    tween: Tween<double>(begin: 0.0, end: _lockProgress),
+                    duration: const Duration(milliseconds: 100),
+                    builder: (context, value, _) => CircularProgressIndicator(
+                      value: value,
+                      strokeWidth: 8,
+                      color: Colors.greenAccent,
+                      backgroundColor: Colors.white24,
+                    ),
                   ),
                 ),
               ),
             ),
+
+          // White flash overlay
+          Positioned.fill(
+            child: IgnorePointer(
+              child: AnimatedOpacity(
+                opacity: _showFlash ? 1.0 : 0.0,
+                duration: const Duration(milliseconds: 200),
+                child: Container(color: Colors.white),
+              ),
+            ),
           ),
-          
-          if (!_isLocked)
+            
+          if (_state == DetectionState.capturing && !_showFlash)
+            Positioned.fill(
+              child: Container(
+                color: Colors.black45,
+                child: const Center(
+                  child: CircularProgressIndicator(color: Colors.white),
+                ),
+              ),
+            ),
+            
+          if (_state == DetectionState.cooldown)
+             Positioned.fill(
+              child: Container(
+                color: Colors.black45,
+                child: const Center(
+                  child: Text(
+                    "Resuming scan...",
+                    style: TextStyle(color: Colors.white, fontSize: 18, fontWeight: FontWeight.bold),
+                  ),
+                ),
+              ),
+            ),
+
+          // Bottom Hint
+          if (_state == DetectionState.searching)
             const Positioned(
-              bottom: 130,
+              bottom: 40,
               left: 0,
               right: 0,
               child: Center(
@@ -349,7 +462,7 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
   Widget _buildOverlayBox(DetectionResult detection, BoxConstraints constraints) {
     final rect = detection.normalizedBox;
     // Show green only if the system is overall "Locked" and this detection is high confidence
-    final color = (_isLocked && detection.confidence > _lockThreshold) ? Colors.green : Colors.yellow;
+    final color = ((_state == DetectionState.locking || _state == DetectionState.capturing) && detection.confidence > _lockThreshold) ? Colors.green : Colors.yellow;
 
     return Positioned(
       left: rect.left * constraints.maxWidth,
@@ -378,6 +491,8 @@ class _LiveAnalysisScreenState extends State<LiveAnalysisScreen> {
 
   @override
   void dispose() {
+    _lockTimer?.cancel();
+    _cooldownTimer?.cancel();
     _controller?.dispose();
     _tfliteService.dispose();
     super.dispose();

+ 215 - 144
palm_oil_mobile/lib/services/tflite_service.dart

@@ -1,6 +1,8 @@
 import 'dart:io';
 import 'dart:math';
 import 'dart:ui';
+import 'dart:isolate';
+import 'dart:async';
 import 'package:flutter/services.dart';
 import 'package:flutter/foundation.dart';
 import 'package:image/image.dart' as img;
@@ -8,12 +10,10 @@ import 'package:image_picker/image_picker.dart';
 import 'package:tflite_flutter/tflite_flutter.dart';
 import 'package:camera/camera.dart';
 
-/// A detection result parsed from the model's end-to-end output.
 class DetectionResult {
   final String className;
   final int classIndex;
   final double confidence;
-  /// Normalized bounding box (0.0 - 1.0)
   final Rect normalizedBox;
 
   const DetectionResult({
@@ -24,44 +24,73 @@ class DetectionResult {
   });
 
   Color getStatusColor() {
-    if (className == 'Empty_Bunch' || className == 'Abnormal') return const Color(0xFFF44336); // Colors.red
-    if (className == 'Ripe' || className == 'Overripe') return const Color(0xFF4CAF50); // Colors.green
-    return const Color(0xFFFF9800); // Colors.orange
+    if (className == 'Empty_Bunch' || className == 'Abnormal') return const Color(0xFFF44336);
+    if (className == 'Ripe' || className == 'Overripe') return const Color(0xFF4CAF50);
+    return const Color(0xFFFF9800);
   }
 }
 
-/// Custom TFLite inference service that correctly decodes the end-to-end
-/// YOLO model output format [1, N, 6] = [batch, detections, (x1,y1,x2,y2,conf,class_id)].
 class TfliteService {
   static const _modelAsset = 'best.tflite';
   static const _labelsAsset = 'labels.txt';
   static const int _inputSize = 640;
   static const double _confidenceThreshold = 0.25;
 
-  Interpreter? _interpreter;
+  Isolate? _isolate;
+  SendPort? _sendPort;
+  ReceivePort? _receivePort;
+
   List<String> _labels = [];
   final ImagePicker _picker = ImagePicker();
   bool _isInitialized = false;
+  bool _isIsolateBusy = false;
 
   bool get isInitialized => _isInitialized;
 
   Future<void> initModel() async {
     try {
-      // Load labels
       final labelData = await rootBundle.loadString('assets/$_labelsAsset');
       _labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
 
-      // Load model
-      final interpreterOptions = InterpreterOptions()..threads = 4;
-      _interpreter = await Interpreter.fromAsset(
-        'assets/$_modelAsset',
-        options: interpreterOptions,
-      );
+      final modelData = await rootBundle.load('assets/$_modelAsset');
+      final modelBytes = modelData.buffer.asUint8List();
+
+      _receivePort = ReceivePort();
+      _isolate = await Isolate.spawn(_isolateEntry, _receivePort!.sendPort);
+      
+      final completer = Completer<SendPort>();
+      StreamSubscription? sub;
+      sub = _receivePort!.listen((message) {
+        if (message is SendPort) {
+          completer.complete(message);
+          sub?.cancel();
+        }
+      });
+      _sendPort = await completer.future;
+
+      final initCompleter = Completer<void>();
+      final initReplyPort = ReceivePort();
+      
+      _sendPort!.send({
+        'command': 'init',
+        'modelBytes': modelBytes,
+        'labelData': labelData,
+        'replyPort': initReplyPort.sendPort,
+      });
+
+      StreamSubscription? initSub;
+      initSub = initReplyPort.listen((message) {
+        if (message == 'init_done') {
+          initCompleter.complete();
+          initSub?.cancel();
+          initReplyPort.close();
+        }
+      });
+
+      await initCompleter.future;
 
       _isInitialized = true;
-      print('TfliteService: Model loaded. Labels: $_labels');
-      print('TfliteService: Input: ${_interpreter!.getInputTensors().map((t) => t.shape)}');
-      print('TfliteService: Output: ${_interpreter!.getOutputTensors().map((t) => t.shape)}');
+      print('TfliteService: Model loaded via persistent isolate.');
     } catch (e) {
       print('TfliteService init error: $e');
       rethrow;
@@ -76,26 +105,31 @@ class TfliteService {
     );
   }
 
-  /// Run inference on the image at [imagePath].
-  /// Returns a list of [DetectionResult] sorted by confidence descending.
-  /// Offloaded to a background isolate to keep UI smooth.
   Future<List<DetectionResult>> runInference(String imagePath) async {
     if (!_isInitialized) await initModel();
 
     final imageBytes = await File(imagePath).readAsBytes();
     
-    // We pass the raw bytes and asset paths to the isolate.
-    // The isolate will handle decoding, resizing, and inference.
-    return await _runInferenceInIsolate(imageBytes);
+    final replyPort = ReceivePort();
+    _sendPort!.send({
+      'command': 'inference_static',
+      'imageBytes': imageBytes,
+      'replyPort': replyPort.sendPort,
+    });
+
+    final detections = await replyPort.first;
+    replyPort.close();
+    return detections as List<DetectionResult>;
   }
 
-  /// Run inference on a [CameraImage] from the stream.
-  /// Throttled by the caller.
   Future<List<DetectionResult>> runInferenceOnStream(CameraImage image) async {
     if (!_isInitialized) await initModel();
+    if (_isIsolateBusy) return <DetectionResult>[];
 
-    // We pass the CameraImage planes to the isolate for conversion and inference.
-    return await compute(_inferenceStreamTaskWrapper, {
+    _isIsolateBusy = true;
+    final replyPort = ReceivePort();
+    _sendPort!.send({
+      'command': 'inference_stream',
       'planes': image.planes.map((p) => {
         'bytes': p.bytes,
         'bytesPerRow': p.bytesPerRow,
@@ -104,37 +138,119 @@ class TfliteService {
       'width': image.width,
       'height': image.height,
       'format': image.format.group,
-      'modelBytes': (await rootBundle.load('assets/$_modelAsset')).buffer.asUint8List(),
-      'labelData': await rootBundle.loadString('assets/$_labelsAsset'),
+      'replyPort': replyPort.sendPort,
     });
+
+    final detections = await replyPort.first;
+    replyPort.close();
+    _isIsolateBusy = false;
+    return detections as List<DetectionResult>;
   }
 
-  static List<DetectionResult> _inferenceStreamTaskWrapper(Map<String, dynamic> args) {
-    final modelBytes = args['modelBytes'] as Uint8List;
-    final labelData = args['labelData'] as String;
-    final planes = args['planes'] as List<dynamic>;
-    final width = args['width'] as int;
-    final height = args['height'] as int;
-    
-    final interpreter = Interpreter.fromBuffer(modelBytes);
-    final labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
+  static void _isolateEntry(SendPort sendPort) {
+    final receivePort = ReceivePort();
+    sendPort.send(receivePort.sendPort);
+
+    Interpreter? interpreter;
+    List<String> labels = [];
+
+    receivePort.listen((message) {
+      if (message is Map) {
+        final command = message['command'];
+        final replyPort = message['replyPort'] as SendPort;
+
+        if (command == 'init') {
+          final modelBytes = message['modelBytes'] as Uint8List;
+          final labelData = message['labelData'] as String;
+
+          final interpreterOptions = InterpreterOptions()..threads = 4;
+          interpreter = Interpreter.fromBuffer(modelBytes, options: interpreterOptions);
+          labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
+          
+          replyPort.send('init_done');
+        } else if (command == 'inference_static') {
+          if (interpreter == null) {
+            replyPort.send(<DetectionResult>[]);
+            return;
+          }
+          final imageBytes = message['imageBytes'] as Uint8List;
+          final results = _inferenceStaticTask(imageBytes, interpreter!, labels);
+          replyPort.send(results);
+        } else if (command == 'inference_stream') {
+          if (interpreter == null) {
+            replyPort.send(<DetectionResult>[]);
+            return;
+          }
+          final planes = message['planes'] as List<dynamic>;
+          final width = message['width'] as int;
+          final height = message['height'] as int;
+          final format = message['format'];
+          
+          final results = _inferenceStreamTask(planes, width, height, format, interpreter!, labels);
+          replyPort.send(results);
+        }
+      }
+    });
+  }
+
+  static List<DetectionResult> _inferenceStaticTask(Uint8List imageBytes, Interpreter interpreter, List<String> labels) {
+    try {
+      final decoded = img.decodeImage(imageBytes);
+      if (decoded == null) throw Exception('Could not decode image');
+
+      final int width = decoded.width;
+      final int height = decoded.height;
+      final int size = width < height ? width : height;
+      final int offsetX = (width - size) ~/ 2;
+      final int offsetY = (height - size) ~/ 2;
+      
+      final cropped = img.copyCrop(decoded, x: offsetX, y: offsetY, width: size, height: size);
+      final resized = img.copyResize(cropped, width: _inputSize, height: _inputSize, interpolation: img.Interpolation.linear);
+
+      final inputTensor = List.generate(1, (_) =>
+        List.generate(_inputSize, (y) =>
+          List.generate(_inputSize, (x) {
+            final pixel = resized.getPixel(x, y);
+            return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0];
+          })
+        )
+      );
+
+      final outputShape = interpreter.getOutputTensors()[0].shape;
+      final outputTensor = List.generate(1, (_) =>
+        List.generate(outputShape[1], (_) =>
+          List<double>.filled(outputShape[2], 0.0)
+        )
+      );
 
+      interpreter.run(inputTensor, outputTensor);
+
+      return _decodeDetections(
+        outputTensor[0], 
+        labels, 
+        cropSize: size, 
+        offsetX: offsetX, 
+        offsetY: offsetY, 
+        fullWidth: width, 
+        fullHeight: height
+      );
+    } catch (e) {
+      print('Isolate static inference error: $e');
+      return <DetectionResult>[];
+    }
+  }
+
+  static List<DetectionResult> _inferenceStreamTask(
+    List<dynamic> planes, int width, int height, dynamic format, 
+    Interpreter interpreter, List<String> labels
+  ) {
     try {
       final size = width < height ? width : height;
       final offsetX = (width - size) ~/ 2;
       final offsetY = (height - size) ~/ 2;
 
       img.Image? image;
-      if (args['format'] == ImageFormatGroup.yuv420) {
-        image = _convertYUV420ToImage(
-          planes: planes,
-          width: width,
-          height: height,
-          cropSize: size,
-          offsetX: offsetX,
-          offsetY: offsetY,
-        );
-      } else if (args['format'] == ImageFormatGroup.bgra8888) {
+      if (format == ImageFormatGroup.bgra8888) {
         final fullImage = img.Image.fromBytes(
           width: width,
           height: height,
@@ -144,11 +260,20 @@ class TfliteService {
           order: img.ChannelOrder.bgra,
         );
         image = img.copyCrop(fullImage, x: offsetX, y: offsetY, width: size, height: size);
+      } else if (format == ImageFormatGroup.yuv420) {
+        image = _convertYUV420ToImage(
+          planes: planes,
+          width: width,
+          height: height,
+          cropSize: size,
+          offsetX: offsetX,
+          offsetY: offsetY,
+        );
+      } else {
+        print("TfliteService: Unsupported format: $format. Ensure platform correctly requests YUV420 or BGRA.");
+        return <DetectionResult>[];
       }
 
-      if (image == null) return [];
-
-      // Resize and Run
       final resized = img.copyResize(image, width: _inputSize, height: _inputSize);
       
       final inputTensor = List.generate(1, (_) =>
@@ -169,7 +294,6 @@ class TfliteService {
 
       interpreter.run(inputTensor, outputTensor);
       
-      // Map detections back to full frame
       return _decodeDetections(
         outputTensor[0], 
         labels, 
@@ -179,8 +303,9 @@ class TfliteService {
         fullWidth: width, 
         fullHeight: height
       );
-    } finally {
-      interpreter.close();
+    } catch (e) {
+      print('Isolate stream inference error: $e');
+      return <DetectionResult>[];
     }
   }
 
@@ -204,32 +329,52 @@ class TfliteService {
     final uvRowStride = uPlane['bytesPerRow'] as int;
     final uvPixelStride = uPlane['bytesPerPixel'] as int;
 
-    final image = img.Image(width: cropSize, height: cropSize);
+    // Use a flat Uint8List buffer for fast native-style memory writing
+    // 3 channels: R, G, B
+    final Uint8List rgbBytes = Uint8List(cropSize * cropSize * 3);
+    int bufferIndex = 0;
 
     for (int y = 0; y < cropSize; y++) {
       for (int x = 0; x < cropSize; x++) {
         final int actualX = x + offsetX;
         final int actualY = y + offsetY;
 
-        final int uvIndex = (uvRowStride * (actualY / 2).floor()) + (uvPixelStride * (actualX / 2).floor());
+        // Mathematical offset matching
+        final int uvIndex = (uvRowStride * (actualY >> 1)) + (uvPixelStride * (actualX >> 1));
         final int yIndex = (actualY * yRowStride) + actualX;
 
-        // Ensure we don't go out of bounds
-        if (yIndex >= yBytes.length || uvIndex >= uBytes.length || uvIndex >= vBytes.length) continue;
+        // Skip if out of bounds (should not happen mathematically if offsets are valid, 
+        // but kept as safety check for corrupted frames)
+        if (yIndex >= yBytes.length || uvIndex >= uBytes.length || uvIndex >= vBytes.length) {
+            bufferIndex += 3;
+            continue;
+        }
 
         final int yp = yBytes[yIndex];
         final int up = uBytes[uvIndex];
         final int vp = vBytes[uvIndex];
 
         // Standard YUV to RGB conversion
-        int r = (yp + (1.370705 * (vp - 128))).toInt().clamp(0, 255);
-        int g = (yp - (0.337633 * (up - 128)) - (0.698001 * (vp - 128))).toInt().clamp(0, 255);
-        int b = (yp + (1.732446 * (up - 128))).toInt().clamp(0, 255);
-
-        image.setPixelRgb(x, y, r, g, b);
+        int r = (yp + (1.370705 * (vp - 128))).toInt();
+        int g = (yp - (0.337633 * (up - 128)) - (0.698001 * (vp - 128))).toInt();
+        int b = (yp + (1.732446 * (up - 128))).toInt();
+
+        // Write directly to sequential memory with inline clamping
+        rgbBytes[bufferIndex++] = r < 0 ? 0 : (r > 255 ? 255 : r);
+        rgbBytes[bufferIndex++] = g < 0 ? 0 : (g > 255 ? 255 : g);
+        rgbBytes[bufferIndex++] = b < 0 ? 0 : (b > 255 ? 255 : b);
       }
     }
-    return image;
+    
+    // Construct image mapping directly from the fast buffer
+    return img.Image.fromBytes(
+      width: cropSize, 
+      height: cropSize, 
+      bytes: rgbBytes.buffer,
+      format: img.Format.uint8,
+      numChannels: 3,
+      order: img.ChannelOrder.rgb,
+    );
   }
 
   static List<DetectionResult> _decodeDetections(
@@ -277,86 +422,12 @@ class TfliteService {
     return detections;
   }
 
-  Future<List<DetectionResult>> _runInferenceInIsolate(Uint8List imageBytes) async {
-    // We need the model and labels passed as data
-    final modelData = await rootBundle.load('assets/$_modelAsset');
-    final labelData = await rootBundle.loadString('assets/$_labelsAsset');
-    
-    // Use compute to run in a real isolate
-    return await compute(_inferenceTaskWrapper, {
-      'imageBytes': imageBytes,
-      'modelBytes': modelData.buffer.asUint8List(),
-      'labelData': labelData,
-    });
-  }
-
-  static List<DetectionResult> _inferenceTaskWrapper(Map<String, dynamic> args) {
-    return _inferenceTask(
-      args['imageBytes'] as Uint8List,
-      args['modelBytes'] as Uint8List,
-      args['labelData'] as String,
-    );
-  }
-
-  /// The static task that runs in the background isolate
-  static List<DetectionResult> _inferenceTask(Uint8List imageBytes, Uint8List modelBytes, String labelData) {
-    // 1. Initialize Interpreter inside the isolate
-    final interpreter = Interpreter.fromBuffer(modelBytes);
-    final labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
-
-    try {
-      // 2. Preprocess image
-      final decoded = img.decodeImage(imageBytes);
-      if (decoded == null) throw Exception('Could not decode image');
-
-      // Center-Square Crop
-      final int width = decoded.width;
-      final int height = decoded.height;
-      final int size = width < height ? width : height;
-      final int offsetX = (width - size) ~/ 2;
-      final int offsetY = (height - size) ~/ 2;
-      
-      final cropped = img.copyCrop(decoded, x: offsetX, y: offsetY, width: size, height: size);
-      final resized = img.copyResize(cropped, width: _inputSize, height: _inputSize, interpolation: img.Interpolation.linear);
-
-      final inputTensor = List.generate(1, (_) =>
-        List.generate(_inputSize, (y) =>
-          List.generate(_inputSize, (x) {
-            final pixel = resized.getPixel(x, y);
-            return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0];
-          })
-        )
-      );
-
-      // 3. Prepare output
-      final outputShape = interpreter.getOutputTensors()[0].shape;
-      final outputTensor = List.generate(1, (_) =>
-        List.generate(outputShape[1], (_) =>
-          List<double>.filled(outputShape[2], 0.0)
-        )
-      );
-
-      // 4. Run
-      interpreter.run(inputTensor, outputTensor);
-
-      // Map detections back to full frame
-      return _decodeDetections(
-        outputTensor[0], 
-        labels, 
-        cropSize: size, 
-        offsetX: offsetX, 
-        offsetY: offsetY, 
-        fullWidth: width, 
-        fullHeight: height
-      );
-    } finally {
-      interpreter.close();
-    }
-  }
-
   void dispose() {
-    _interpreter?.close();
-    _interpreter = null;
+    _receivePort?.close();
+    if (_isolate != null) {
+      _isolate!.kill(priority: Isolate.immediate);
+      _isolate = null;
+    }
     _isInitialized = false;
   }
 }