| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443 |
- import 'dart:io';
- import 'dart:math';
- import 'dart:ui';
- import 'dart:typed_data';
- import 'dart:isolate';
- import 'dart:async';
- import 'package:flutter/services.dart';
- import 'package:flutter/foundation.dart';
- import 'package:image/image.dart' as img;
- import 'package:image_picker/image_picker.dart';
- import 'package:tflite_flutter/tflite_flutter.dart';
- import 'package:camera/camera.dart';
- class DetectionResult {
- final String className;
- final int classIndex;
- final double confidence;
- final Rect normalizedBox;
- const DetectionResult({
- required this.className,
- required this.classIndex,
- required this.confidence,
- required this.normalizedBox,
- });
- Color getStatusColor() {
- if (className == 'Empty_Bunch' || className == 'Abnormal') return const Color(0xFFF44336);
- if (className == 'Ripe' || className == 'Overripe') return const Color(0xFF4CAF50);
- return const Color(0xFFFF9800);
- }
- }
- class TfliteService {
- static const _modelAsset = 'best.tflite';
- static const _labelsAsset = 'labels.txt';
- static const int _inputSize = 640;
- static const double _confidenceThreshold = 0.25;
- Isolate? _isolate;
- SendPort? _sendPort;
- ReceivePort? _receivePort;
- List<String> _labels = [];
- final ImagePicker _picker = ImagePicker();
- bool _isInitialized = false;
- bool _isIsolateBusy = false;
- bool get isInitialized => _isInitialized;
- bool get isIsolateBusy => _isIsolateBusy;
- Future<void> initModel() async {
- try {
- final labelData = await rootBundle.loadString('assets/$_labelsAsset');
- _labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
- final modelData = await rootBundle.load('assets/$_modelAsset');
- final modelBytes = modelData.buffer.asUint8List();
- _receivePort = ReceivePort();
- _isolate = await Isolate.spawn(_isolateEntry, _receivePort!.sendPort);
-
- final completer = Completer<SendPort>();
- StreamSubscription? sub;
- sub = _receivePort!.listen((message) {
- if (message is SendPort) {
- completer.complete(message);
- sub?.cancel();
- }
- });
- _sendPort = await completer.future;
- final initCompleter = Completer<void>();
- final initReplyPort = ReceivePort();
-
- _sendPort!.send({
- 'command': 'init',
- 'modelBytes': modelBytes,
- 'labelData': labelData,
- 'replyPort': initReplyPort.sendPort,
- });
- StreamSubscription? initSub;
- initSub = initReplyPort.listen((message) {
- if (message == 'init_done') {
- initCompleter.complete();
- initSub?.cancel();
- initReplyPort.close();
- }
- });
- await initCompleter.future;
- _isInitialized = true;
- print('TfliteService: Model loaded via persistent isolate.');
- } catch (e) {
- print('TfliteService init error: $e');
- rethrow;
- }
- }
- Future<XFile?> pickImage() async {
- return await _picker.pickImage(
- source: ImageSource.gallery,
- maxWidth: _inputSize.toDouble(),
- maxHeight: _inputSize.toDouble(),
- );
- }
- Future<List<DetectionResult>> runInference(String imagePath) async {
- if (!_isInitialized) await initModel();
- final imageBytes = await File(imagePath).readAsBytes();
-
- final replyPort = ReceivePort();
- _sendPort!.send({
- 'command': 'inference_static',
- 'imageBytes': imageBytes,
- 'replyPort': replyPort.sendPort,
- });
- final detections = await replyPort.first;
- replyPort.close();
- return detections as List<DetectionResult>;
- }
- Future<List<DetectionResult>> runInferenceOnStream(CameraImage image) async {
- if (!_isInitialized) await initModel();
- // The gatekeeper logic has moved up to LiveAnalysisScreen (Atomic Lock)
- // but we keep the safety bypass here just in case.
- if (_isIsolateBusy) return <DetectionResult>[];
- _isIsolateBusy = true;
- final replyPort = ReceivePort();
-
- _sendPort!.send({
- 'command': 'inference_stream',
- 'planes': image.planes.map((p) => {
- 'bytes': p.bytes,
- 'bytesPerRow': p.bytesPerRow,
- 'bytesPerPixel': p.bytesPerPixel,
- }).toList(),
- 'width': image.width,
- 'height': image.height,
- 'format': image.format.group,
- 'replyPort': replyPort.sendPort,
- });
- final detections = await replyPort.first;
- replyPort.close();
- _isIsolateBusy = false;
- return detections as List<DetectionResult>;
- }
- static void _isolateEntry(SendPort sendPort) {
- final receivePort = ReceivePort();
- sendPort.send(receivePort.sendPort);
- Interpreter? interpreter;
- List<String> labels = [];
- receivePort.listen((message) {
- if (message is Map) {
- final command = message['command'];
- final replyPort = message['replyPort'] as SendPort;
- if (command == 'init') {
- final modelBytes = message['modelBytes'] as Uint8List;
- final labelData = message['labelData'] as String;
- final interpreterOptions = InterpreterOptions()..threads = 4;
- interpreter = Interpreter.fromBuffer(modelBytes, options: interpreterOptions);
- labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
-
- replyPort.send('init_done');
- } else if (command == 'inference_static') {
- if (interpreter == null) {
- replyPort.send(<DetectionResult>[]);
- return;
- }
- final imageBytes = message['imageBytes'] as Uint8List;
- final results = _inferenceStaticTask(imageBytes, interpreter!, labels);
- replyPort.send(results);
- } else if (command == 'inference_stream') {
- if (interpreter == null) {
- replyPort.send(<DetectionResult>[]);
- return;
- }
- final planes = message['planes'] as List<dynamic>;
- final width = message['width'] as int;
- final height = message['height'] as int;
- final format = message['format'];
-
- final results = _inferenceStreamTask(planes, width, height, format, interpreter!, labels);
- replyPort.send(results);
- }
- }
- });
- }
- static List<DetectionResult> _inferenceStaticTask(Uint8List imageBytes, Interpreter interpreter, List<String> labels) {
- try {
- final decoded = img.decodeImage(imageBytes);
- if (decoded == null) throw Exception('Could not decode image');
- final int width = decoded.width;
- final int height = decoded.height;
- final int size = width < height ? width : height;
- final int offsetX = (width - size) ~/ 2;
- final int offsetY = (height - size) ~/ 2;
-
- final cropped = img.copyCrop(decoded, x: offsetX, y: offsetY, width: size, height: size);
- final resized = img.copyResize(cropped, width: _inputSize, height: _inputSize, interpolation: img.Interpolation.linear);
- final inputTensor = List.generate(1, (_) =>
- List.generate(_inputSize, (y) =>
- List.generate(_inputSize, (x) {
- final pixel = resized.getPixel(x, y);
- return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0];
- })
- )
- );
- final outputShape = interpreter.getOutputTensors()[0].shape;
- final outputTensor = List.generate(1, (_) =>
- List.generate(outputShape[1], (_) =>
- List<double>.filled(outputShape[2], 0.0)
- )
- );
- interpreter.run(inputTensor, outputTensor);
- return _decodeDetections(
- outputTensor[0],
- labels,
- cropSize: size,
- offsetX: offsetX,
- offsetY: offsetY,
- fullWidth: width,
- fullHeight: height
- );
- } catch (e) {
- print('Isolate static inference error: $e');
- return <DetectionResult>[];
- }
- }
- static List<DetectionResult> _inferenceStreamTask(
- List<dynamic> planes, int width, int height, dynamic format,
- Interpreter interpreter, List<String> labels
- ) {
- try {
- final size = width < height ? width : height;
- final offsetX = (width - size) ~/ 2;
- final offsetY = (height - size) ~/ 2;
- img.Image? image;
- if (format == ImageFormatGroup.bgra8888) {
- final fullImage = img.Image.fromBytes(
- width: width,
- height: height,
- bytes: planes[0]['bytes'].buffer,
- format: img.Format.uint8,
- numChannels: 4,
- order: img.ChannelOrder.bgra,
- );
- image = img.copyCrop(fullImage, x: offsetX, y: offsetY, width: size, height: size);
- } else if (format == ImageFormatGroup.yuv420) {
- image = _convertYUV420ToImage(
- planes: planes,
- width: width,
- height: height,
- cropSize: size,
- offsetX: offsetX,
- offsetY: offsetY,
- );
- } else {
- print("TfliteService: Unsupported format: $format. Ensure platform correctly requests YUV420 or BGRA.");
- return <DetectionResult>[];
- }
- final resized = img.copyResize(image, width: _inputSize, height: _inputSize);
-
- final inputTensor = List.generate(1, (_) =>
- List.generate(_inputSize, (y) =>
- List.generate(_inputSize, (x) {
- final pixel = resized.getPixel(x, y);
- return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0];
- })
- )
- );
- final outputShape = interpreter.getOutputTensors()[0].shape;
- final outputTensor = List.generate(1, (_) =>
- List.generate(outputShape[1], (_) =>
- List<double>.filled(outputShape[2], 0.0)
- )
- );
- interpreter.run(inputTensor, outputTensor);
-
- return _decodeDetections(
- outputTensor[0],
- labels,
- cropSize: size,
- offsetX: offsetX,
- offsetY: offsetY,
- fullWidth: width,
- fullHeight: height
- );
- } catch (e) {
- print('Isolate stream inference error: $e');
- return <DetectionResult>[];
- }
- }
- static img.Image _convertYUV420ToImage({
- required List<dynamic> planes,
- required int width,
- required int height,
- required int cropSize,
- required int offsetX,
- required int offsetY,
- }) {
- final yPlane = planes[0];
- final uPlane = planes[1];
- final vPlane = planes[2];
- final yBytes = yPlane['bytes'] as Uint8List;
- final uBytes = uPlane['bytes'] as Uint8List;
- final vBytes = vPlane['bytes'] as Uint8List;
- final yRowStride = yPlane['bytesPerRow'] as int;
- final uvRowStride = uPlane['bytesPerRow'] as int;
- final uvPixelStride = uPlane['bytesPerPixel'] as int;
- // Fast 32-bit Native memory buffer
- final Uint32List bgraData = Uint32List(cropSize * cropSize);
- int bufferIndex = 0;
- for (int y = 0; y < cropSize; y++) {
- for (int x = 0; x < cropSize; x++) {
- final int actualX = x + offsetX;
- final int actualY = y + offsetY;
- final int uvIndex = (uvRowStride * (actualY >> 1)) + (uvPixelStride * (actualX >> 1));
- final int yIndex = (actualY * yRowStride) + actualX;
- if (yIndex >= yBytes.length || uvIndex >= uBytes.length || uvIndex >= vBytes.length) {
- bufferIndex++;
- continue;
- }
- final int yp = yBytes[yIndex];
- final int up = uBytes[uvIndex];
- final int vp = vBytes[uvIndex];
- // Standard YUV to RGB conversion
- int r = (yp + (1.370705 * (vp - 128))).toInt();
- int g = (yp - (0.337633 * (up - 128)) - (0.698001 * (vp - 128))).toInt();
- int b = (yp + (1.732446 * (up - 128))).toInt();
- // Clamp inline for max speed
- r = r < 0 ? 0 : (r > 255 ? 255 : r);
- g = g < 0 ? 0 : (g > 255 ? 255 : g);
- b = b < 0 ? 0 : (b > 255 ? 255 : b);
- // Pack into 32-bit integer: 0xAARRGGBB -> Memory writes it Little Endian: B, G, R, A.
- bgraData[bufferIndex++] = (255 << 24) | (r << 16) | (g << 8) | b;
- }
- }
-
- return img.Image.fromBytes(
- width: cropSize,
- height: cropSize,
- bytes: bgraData.buffer,
- format: img.Format.uint8,
- numChannels: 4, // Packed 4 channels (BGRA)
- order: img.ChannelOrder.bgra, // Explicitly tell image package it's BGRA
- );
- }
- /// Decodes YOLO26 NMS-Free detections.
- /// Unlike legacy YOLOv8, this model produces unique, final predictions
- /// directly in the output tensor, eliminating the need for a secondary
- /// Non-Max Suppression (NMS) loop in Dart.
- static List<DetectionResult> _decodeDetections(
- List<List<double>> rawDetections,
- List<String> labels, {
- int? cropSize,
- int? offsetX,
- int? offsetY,
- int? fullWidth,
- int? fullHeight,
- }) {
- // YOLO26 E2E models typically return a fixed number of detections (e.g., top 100)
- // We only need to filter by confidence and map back to the original frame.
- final detections = <DetectionResult>[];
- for (final det in rawDetections) {
- if (det.length < 6) continue;
- final conf = det[4];
- if (conf < _confidenceThreshold) continue;
- double x1 = det[0].clamp(0.0, 1.0);
- double y1 = det[1].clamp(0.0, 1.0);
- double x2 = det[2].clamp(0.0, 1.0);
- double y2 = det[3].clamp(0.0, 1.0);
-
- // If crop info is provided, map back to full frame
- if (cropSize != null && offsetX != null && offsetY != null && fullWidth != null && fullHeight != null) {
- x1 = (x1 * cropSize + offsetX) / fullWidth;
- x2 = (x2 * cropSize + offsetX) / fullWidth;
- y1 = (y1 * cropSize + offsetY) / fullHeight;
- y2 = (y2 * cropSize + offsetY) / fullHeight;
- }
- final classId = det[5].round();
- if (x2 <= x1 || y2 <= y1) continue;
- final label = (classId >= 0 && classId < labels.length) ? labels[classId] : 'Unknown';
- detections.add(DetectionResult(
- className: label,
- classIndex: classId,
- confidence: conf,
- normalizedBox: Rect.fromLTRB(x1, y1, x2, y2),
- ));
- }
- detections.sort((a, b) => b.confidence.compareTo(a.confidence));
- return detections;
- }
- void dispose() {
- _receivePort?.close();
- if (_isolate != null) {
- _isolate!.kill(priority: Isolate.immediate);
- _isolate = null;
- }
- _isInitialized = false;
- }
- }
|