import 'dart:io'; import 'dart:math'; import 'dart:ui'; import 'dart:typed_data'; import 'dart:isolate'; import 'dart:async'; import 'package:flutter/services.dart'; import 'package:flutter/foundation.dart'; import 'package:image/image.dart' as img; import 'package:image_picker/image_picker.dart'; import 'package:tflite_flutter/tflite_flutter.dart'; import 'package:camera/camera.dart'; class DetectionResult { final String className; final int classIndex; final double confidence; final Rect normalizedBox; const DetectionResult({ required this.className, required this.classIndex, required this.confidence, required this.normalizedBox, }); Color getStatusColor() { if (className == 'Empty_Bunch' || className == 'Abnormal') return const Color(0xFFF44336); if (className == 'Ripe' || className == 'Overripe') return const Color(0xFF4CAF50); return const Color(0xFFFF9800); } } class TfliteService { static const _modelAsset = 'best.tflite'; static const _labelsAsset = 'labels.txt'; static const int _inputSize = 640; static const double _confidenceThreshold = 0.25; Isolate? _isolate; SendPort? _sendPort; ReceivePort? _receivePort; List _labels = []; final ImagePicker _picker = ImagePicker(); bool _isInitialized = false; bool _isIsolateBusy = false; bool get isInitialized => _isInitialized; bool get isIsolateBusy => _isIsolateBusy; Future initModel() async { try { final labelData = await rootBundle.loadString('assets/$_labelsAsset'); _labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList(); final modelData = await rootBundle.load('assets/$_modelAsset'); final modelBytes = modelData.buffer.asUint8List(); _receivePort = ReceivePort(); _isolate = await Isolate.spawn(_isolateEntry, _receivePort!.sendPort); final completer = Completer(); StreamSubscription? sub; sub = _receivePort!.listen((message) { if (message is SendPort) { completer.complete(message); sub?.cancel(); } }); _sendPort = await completer.future; final initCompleter = Completer(); final initReplyPort = ReceivePort(); _sendPort!.send({ 'command': 'init', 'modelBytes': modelBytes, 'labelData': labelData, 'replyPort': initReplyPort.sendPort, }); StreamSubscription? initSub; initSub = initReplyPort.listen((message) { if (message == 'init_done') { initCompleter.complete(); initSub?.cancel(); initReplyPort.close(); } }); await initCompleter.future; _isInitialized = true; print('TfliteService: Model loaded via persistent isolate.'); } catch (e) { print('TfliteService init error: $e'); rethrow; } } Future pickImage() async { return await _picker.pickImage( source: ImageSource.gallery, maxWidth: _inputSize.toDouble(), maxHeight: _inputSize.toDouble(), ); } Future> runInference(String imagePath) async { if (!_isInitialized) await initModel(); final imageBytes = await File(imagePath).readAsBytes(); final replyPort = ReceivePort(); _sendPort!.send({ 'command': 'inference_static', 'imageBytes': imageBytes, 'replyPort': replyPort.sendPort, }); final detections = await replyPort.first; replyPort.close(); return detections as List; } Future> runInferenceOnStream(CameraImage image) async { if (!_isInitialized) await initModel(); // The gatekeeper logic has moved up to LiveAnalysisScreen (Atomic Lock) // but we keep the safety bypass here just in case. if (_isIsolateBusy) return []; _isIsolateBusy = true; final replyPort = ReceivePort(); _sendPort!.send({ 'command': 'inference_stream', 'planes': image.planes.map((p) => { 'bytes': p.bytes, 'bytesPerRow': p.bytesPerRow, 'bytesPerPixel': p.bytesPerPixel, }).toList(), 'width': image.width, 'height': image.height, 'format': image.format.group, 'replyPort': replyPort.sendPort, }); final detections = await replyPort.first; replyPort.close(); _isIsolateBusy = false; return detections as List; } static void _isolateEntry(SendPort sendPort) { final receivePort = ReceivePort(); sendPort.send(receivePort.sendPort); Interpreter? interpreter; List labels = []; receivePort.listen((message) { if (message is Map) { final command = message['command']; final replyPort = message['replyPort'] as SendPort; if (command == 'init') { final modelBytes = message['modelBytes'] as Uint8List; final labelData = message['labelData'] as String; final interpreterOptions = InterpreterOptions()..threads = 4; interpreter = Interpreter.fromBuffer(modelBytes, options: interpreterOptions); labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList(); replyPort.send('init_done'); } else if (command == 'inference_static') { if (interpreter == null) { replyPort.send([]); return; } final imageBytes = message['imageBytes'] as Uint8List; final results = _inferenceStaticTask(imageBytes, interpreter!, labels); replyPort.send(results); } else if (command == 'inference_stream') { if (interpreter == null) { replyPort.send([]); return; } final planes = message['planes'] as List; final width = message['width'] as int; final height = message['height'] as int; final format = message['format']; final results = _inferenceStreamTask(planes, width, height, format, interpreter!, labels); replyPort.send(results); } } }); } static List _inferenceStaticTask(Uint8List imageBytes, Interpreter interpreter, List labels) { try { final decoded = img.decodeImage(imageBytes); if (decoded == null) throw Exception('Could not decode image'); final int width = decoded.width; final int height = decoded.height; final int size = width < height ? width : height; final int offsetX = (width - size) ~/ 2; final int offsetY = (height - size) ~/ 2; final cropped = img.copyCrop(decoded, x: offsetX, y: offsetY, width: size, height: size); final resized = img.copyResize(cropped, width: _inputSize, height: _inputSize, interpolation: img.Interpolation.linear); final inputTensor = List.generate(1, (_) => List.generate(_inputSize, (y) => List.generate(_inputSize, (x) { final pixel = resized.getPixel(x, y); return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0]; }) ) ); final outputShape = interpreter.getOutputTensors()[0].shape; final outputTensor = List.generate(1, (_) => List.generate(outputShape[1], (_) => List.filled(outputShape[2], 0.0) ) ); interpreter.run(inputTensor, outputTensor); return _decodeDetections( outputTensor[0], labels, cropSize: size, offsetX: offsetX, offsetY: offsetY, fullWidth: width, fullHeight: height ); } catch (e) { print('Isolate static inference error: $e'); return []; } } static List _inferenceStreamTask( List planes, int width, int height, dynamic format, Interpreter interpreter, List labels ) { try { final size = width < height ? width : height; final offsetX = (width - size) ~/ 2; final offsetY = (height - size) ~/ 2; img.Image? image; if (format == ImageFormatGroup.bgra8888) { final fullImage = img.Image.fromBytes( width: width, height: height, bytes: planes[0]['bytes'].buffer, format: img.Format.uint8, numChannels: 4, order: img.ChannelOrder.bgra, ); image = img.copyCrop(fullImage, x: offsetX, y: offsetY, width: size, height: size); } else if (format == ImageFormatGroup.yuv420) { image = _convertYUV420ToImage( planes: planes, width: width, height: height, cropSize: size, offsetX: offsetX, offsetY: offsetY, ); } else { print("TfliteService: Unsupported format: $format. Ensure platform correctly requests YUV420 or BGRA."); return []; } final resized = img.copyResize(image, width: _inputSize, height: _inputSize); final inputTensor = List.generate(1, (_) => List.generate(_inputSize, (y) => List.generate(_inputSize, (x) { final pixel = resized.getPixel(x, y); return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0]; }) ) ); final outputShape = interpreter.getOutputTensors()[0].shape; final outputTensor = List.generate(1, (_) => List.generate(outputShape[1], (_) => List.filled(outputShape[2], 0.0) ) ); interpreter.run(inputTensor, outputTensor); return _decodeDetections( outputTensor[0], labels, cropSize: size, offsetX: offsetX, offsetY: offsetY, fullWidth: width, fullHeight: height ); } catch (e) { print('Isolate stream inference error: $e'); return []; } } static img.Image _convertYUV420ToImage({ required List planes, required int width, required int height, required int cropSize, required int offsetX, required int offsetY, }) { final yPlane = planes[0]; final uPlane = planes[1]; final vPlane = planes[2]; final yBytes = yPlane['bytes'] as Uint8List; final uBytes = uPlane['bytes'] as Uint8List; final vBytes = vPlane['bytes'] as Uint8List; final yRowStride = yPlane['bytesPerRow'] as int; final uvRowStride = uPlane['bytesPerRow'] as int; final uvPixelStride = uPlane['bytesPerPixel'] as int; // Fast 32-bit Native memory buffer final Uint32List bgraData = Uint32List(cropSize * cropSize); int bufferIndex = 0; for (int y = 0; y < cropSize; y++) { for (int x = 0; x < cropSize; x++) { final int actualX = x + offsetX; final int actualY = y + offsetY; final int uvIndex = (uvRowStride * (actualY >> 1)) + (uvPixelStride * (actualX >> 1)); final int yIndex = (actualY * yRowStride) + actualX; if (yIndex >= yBytes.length || uvIndex >= uBytes.length || uvIndex >= vBytes.length) { bufferIndex++; continue; } final int yp = yBytes[yIndex]; final int up = uBytes[uvIndex]; final int vp = vBytes[uvIndex]; // Standard YUV to RGB conversion int r = (yp + (1.370705 * (vp - 128))).toInt(); int g = (yp - (0.337633 * (up - 128)) - (0.698001 * (vp - 128))).toInt(); int b = (yp + (1.732446 * (up - 128))).toInt(); // Clamp inline for max speed r = r < 0 ? 0 : (r > 255 ? 255 : r); g = g < 0 ? 0 : (g > 255 ? 255 : g); b = b < 0 ? 0 : (b > 255 ? 255 : b); // Pack into 32-bit integer: 0xAARRGGBB -> Memory writes it Little Endian: B, G, R, A. bgraData[bufferIndex++] = (255 << 24) | (r << 16) | (g << 8) | b; } } return img.Image.fromBytes( width: cropSize, height: cropSize, bytes: bgraData.buffer, format: img.Format.uint8, numChannels: 4, // Packed 4 channels (BGRA) order: img.ChannelOrder.bgra, // Explicitly tell image package it's BGRA ); } static List _decodeDetections( List> rawDetections, List labels, { int? cropSize, int? offsetX, int? offsetY, int? fullWidth, int? fullHeight, }) { final detections = []; for (final det in rawDetections) { if (det.length < 6) continue; final conf = det[4]; if (conf < _confidenceThreshold) continue; double x1 = det[0].clamp(0.0, 1.0); double y1 = det[1].clamp(0.0, 1.0); double x2 = det[2].clamp(0.0, 1.0); double y2 = det[3].clamp(0.0, 1.0); // If crop info is provided, map back to full frame if (cropSize != null && offsetX != null && offsetY != null && fullWidth != null && fullHeight != null) { x1 = (x1 * cropSize + offsetX) / fullWidth; x2 = (x2 * cropSize + offsetX) / fullWidth; y1 = (y1 * cropSize + offsetY) / fullHeight; y2 = (y2 * cropSize + offsetY) / fullHeight; } final classId = det[5].round(); if (x2 <= x1 || y2 <= y1) continue; final label = (classId >= 0 && classId < labels.length) ? labels[classId] : 'Unknown'; detections.add(DetectionResult( className: label, classIndex: classId, confidence: conf, normalizedBox: Rect.fromLTRB(x1, y1, x2, y2), )); } detections.sort((a, b) => b.confidence.compareTo(a.confidence)); return detections; } void dispose() { _receivePort?.close(); if (_isolate != null) { _isolate!.kill(priority: Isolate.immediate); _isolate = null; } _isInitialized = false; } }