import 'dart:io'; import 'dart:math'; import 'dart:ui'; import 'package:flutter/services.dart'; import 'package:flutter/foundation.dart'; import 'package:image/image.dart' as img; import 'package:image_picker/image_picker.dart'; import 'package:tflite_flutter/tflite_flutter.dart'; import 'package:camera/camera.dart'; /// A detection result parsed from the model's end-to-end output. class DetectionResult { final String className; final int classIndex; final double confidence; /// Normalized bounding box (0.0 - 1.0) final Rect normalizedBox; const DetectionResult({ required this.className, required this.classIndex, required this.confidence, required this.normalizedBox, }); Color getStatusColor() { if (className == 'Empty_Bunch' || className == 'Abnormal') return const Color(0xFFF44336); // Colors.red if (className == 'Ripe' || className == 'Overripe') return const Color(0xFF4CAF50); // Colors.green return const Color(0xFFFF9800); // Colors.orange } } /// Custom TFLite inference service that correctly decodes the end-to-end /// YOLO model output format [1, N, 6] = [batch, detections, (x1,y1,x2,y2,conf,class_id)]. class TfliteService { static const _modelAsset = 'best.tflite'; static const _labelsAsset = 'labels.txt'; static const int _inputSize = 640; static const double _confidenceThreshold = 0.25; Interpreter? _interpreter; List _labels = []; final ImagePicker _picker = ImagePicker(); bool _isInitialized = false; bool get isInitialized => _isInitialized; Future initModel() async { try { // Load labels final labelData = await rootBundle.loadString('assets/$_labelsAsset'); _labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList(); // Load model final interpreterOptions = InterpreterOptions()..threads = 4; _interpreter = await Interpreter.fromAsset( 'assets/$_modelAsset', options: interpreterOptions, ); _isInitialized = true; print('TfliteService: Model loaded. Labels: $_labels'); print('TfliteService: Input: ${_interpreter!.getInputTensors().map((t) => t.shape)}'); print('TfliteService: Output: ${_interpreter!.getOutputTensors().map((t) => t.shape)}'); } catch (e) { print('TfliteService init error: $e'); rethrow; } } Future pickImage() async { return await _picker.pickImage( source: ImageSource.gallery, maxWidth: _inputSize.toDouble(), maxHeight: _inputSize.toDouble(), ); } /// Run inference on the image at [imagePath]. /// Returns a list of [DetectionResult] sorted by confidence descending. /// Offloaded to a background isolate to keep UI smooth. Future> runInference(String imagePath) async { if (!_isInitialized) await initModel(); final imageBytes = await File(imagePath).readAsBytes(); // We pass the raw bytes and asset paths to the isolate. // The isolate will handle decoding, resizing, and inference. return await _runInferenceInIsolate(imageBytes); } /// Run inference on a [CameraImage] from the stream. /// Throttled by the caller. Future> runInferenceOnStream(CameraImage image) async { if (!_isInitialized) await initModel(); // We pass the CameraImage planes to the isolate for conversion and inference. return await compute(_inferenceStreamTaskWrapper, { 'planes': image.planes.map((p) => { 'bytes': p.bytes, 'bytesPerRow': p.bytesPerRow, 'bytesPerPixel': p.bytesPerPixel, }).toList(), 'width': image.width, 'height': image.height, 'format': image.format.group, 'modelBytes': (await rootBundle.load('assets/$_modelAsset')).buffer.asUint8List(), 'labelData': await rootBundle.loadString('assets/$_labelsAsset'), }); } static List _inferenceStreamTaskWrapper(Map args) { final modelBytes = args['modelBytes'] as Uint8List; final labelData = args['labelData'] as String; final planes = args['planes'] as List; final width = args['width'] as int; final height = args['height'] as int; final interpreter = Interpreter.fromBuffer(modelBytes); final labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList(); try { final size = width < height ? width : height; final offsetX = (width - size) ~/ 2; final offsetY = (height - size) ~/ 2; img.Image? image; if (args['format'] == ImageFormatGroup.yuv420) { image = _convertYUV420ToImage( planes: planes, width: width, height: height, cropSize: size, offsetX: offsetX, offsetY: offsetY, ); } else if (args['format'] == ImageFormatGroup.bgra8888) { final fullImage = img.Image.fromBytes( width: width, height: height, bytes: planes[0]['bytes'].buffer, format: img.Format.uint8, numChannels: 4, order: img.ChannelOrder.bgra, ); image = img.copyCrop(fullImage, x: offsetX, y: offsetY, width: size, height: size); } if (image == null) return []; // Resize and Run final resized = img.copyResize(image, width: _inputSize, height: _inputSize); final inputTensor = List.generate(1, (_) => List.generate(_inputSize, (y) => List.generate(_inputSize, (x) { final pixel = resized.getPixel(x, y); return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0]; }) ) ); final outputShape = interpreter.getOutputTensors()[0].shape; final outputTensor = List.generate(1, (_) => List.generate(outputShape[1], (_) => List.filled(outputShape[2], 0.0) ) ); interpreter.run(inputTensor, outputTensor); // Map detections back to full frame return _decodeDetections( outputTensor[0], labels, cropSize: size, offsetX: offsetX, offsetY: offsetY, fullWidth: width, fullHeight: height ); } finally { interpreter.close(); } } static img.Image _convertYUV420ToImage({ required List planes, required int width, required int height, required int cropSize, required int offsetX, required int offsetY, }) { final yPlane = planes[0]; final uPlane = planes[1]; final vPlane = planes[2]; final yBytes = yPlane['bytes'] as Uint8List; final uBytes = uPlane['bytes'] as Uint8List; final vBytes = vPlane['bytes'] as Uint8List; final yRowStride = yPlane['bytesPerRow'] as int; final uvRowStride = uPlane['bytesPerRow'] as int; final uvPixelStride = uPlane['bytesPerPixel'] as int; final image = img.Image(width: cropSize, height: cropSize); for (int y = 0; y < cropSize; y++) { for (int x = 0; x < cropSize; x++) { final int actualX = x + offsetX; final int actualY = y + offsetY; final int uvIndex = (uvRowStride * (actualY / 2).floor()) + (uvPixelStride * (actualX / 2).floor()); final int yIndex = (actualY * yRowStride) + actualX; // Ensure we don't go out of bounds if (yIndex >= yBytes.length || uvIndex >= uBytes.length || uvIndex >= vBytes.length) continue; final int yp = yBytes[yIndex]; final int up = uBytes[uvIndex]; final int vp = vBytes[uvIndex]; // Standard YUV to RGB conversion int r = (yp + (1.370705 * (vp - 128))).toInt().clamp(0, 255); int g = (yp - (0.337633 * (up - 128)) - (0.698001 * (vp - 128))).toInt().clamp(0, 255); int b = (yp + (1.732446 * (up - 128))).toInt().clamp(0, 255); image.setPixelRgb(x, y, r, g, b); } } return image; } static List _decodeDetections( List> rawDetections, List labels, { int? cropSize, int? offsetX, int? offsetY, int? fullWidth, int? fullHeight, }) { final detections = []; for (final det in rawDetections) { if (det.length < 6) continue; final conf = det[4]; if (conf < _confidenceThreshold) continue; double x1 = det[0].clamp(0.0, 1.0); double y1 = det[1].clamp(0.0, 1.0); double x2 = det[2].clamp(0.0, 1.0); double y2 = det[3].clamp(0.0, 1.0); // If crop info is provided, map back to full frame if (cropSize != null && offsetX != null && offsetY != null && fullWidth != null && fullHeight != null) { x1 = (x1 * cropSize + offsetX) / fullWidth; x2 = (x2 * cropSize + offsetX) / fullWidth; y1 = (y1 * cropSize + offsetY) / fullHeight; y2 = (y2 * cropSize + offsetY) / fullHeight; } final classId = det[5].round(); if (x2 <= x1 || y2 <= y1) continue; final label = (classId >= 0 && classId < labels.length) ? labels[classId] : 'Unknown'; detections.add(DetectionResult( className: label, classIndex: classId, confidence: conf, normalizedBox: Rect.fromLTRB(x1, y1, x2, y2), )); } detections.sort((a, b) => b.confidence.compareTo(a.confidence)); return detections; } Future> _runInferenceInIsolate(Uint8List imageBytes) async { // We need the model and labels passed as data final modelData = await rootBundle.load('assets/$_modelAsset'); final labelData = await rootBundle.loadString('assets/$_labelsAsset'); // Use compute to run in a real isolate return await compute(_inferenceTaskWrapper, { 'imageBytes': imageBytes, 'modelBytes': modelData.buffer.asUint8List(), 'labelData': labelData, }); } static List _inferenceTaskWrapper(Map args) { return _inferenceTask( args['imageBytes'] as Uint8List, args['modelBytes'] as Uint8List, args['labelData'] as String, ); } /// The static task that runs in the background isolate static List _inferenceTask(Uint8List imageBytes, Uint8List modelBytes, String labelData) { // 1. Initialize Interpreter inside the isolate final interpreter = Interpreter.fromBuffer(modelBytes); final labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList(); try { // 2. Preprocess image final decoded = img.decodeImage(imageBytes); if (decoded == null) throw Exception('Could not decode image'); // Center-Square Crop final int width = decoded.width; final int height = decoded.height; final int size = width < height ? width : height; final int offsetX = (width - size) ~/ 2; final int offsetY = (height - size) ~/ 2; final cropped = img.copyCrop(decoded, x: offsetX, y: offsetY, width: size, height: size); final resized = img.copyResize(cropped, width: _inputSize, height: _inputSize, interpolation: img.Interpolation.linear); final inputTensor = List.generate(1, (_) => List.generate(_inputSize, (y) => List.generate(_inputSize, (x) { final pixel = resized.getPixel(x, y); return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0]; }) ) ); // 3. Prepare output final outputShape = interpreter.getOutputTensors()[0].shape; final outputTensor = List.generate(1, (_) => List.generate(outputShape[1], (_) => List.filled(outputShape[2], 0.0) ) ); // 4. Run interpreter.run(inputTensor, outputTensor); // Map detections back to full frame return _decodeDetections( outputTensor[0], labels, cropSize: size, offsetX: offsetX, offsetY: offsetY, fullWidth: width, fullHeight: height ); } finally { interpreter.close(); } } void dispose() { _interpreter?.close(); _interpreter = null; _isInitialized = false; } }