tflite_service.dart 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443
  1. import 'dart:io';
  2. import 'dart:math';
  3. import 'dart:ui';
  4. import 'dart:typed_data';
  5. import 'dart:isolate';
  6. import 'dart:async';
  7. import 'package:flutter/services.dart';
  8. import 'package:flutter/foundation.dart';
  9. import 'package:image/image.dart' as img;
  10. import 'package:image_picker/image_picker.dart';
  11. import 'package:tflite_flutter/tflite_flutter.dart';
  12. import 'package:camera/camera.dart';
  13. class DetectionResult {
  14. final String className;
  15. final int classIndex;
  16. final double confidence;
  17. final Rect normalizedBox;
  18. const DetectionResult({
  19. required this.className,
  20. required this.classIndex,
  21. required this.confidence,
  22. required this.normalizedBox,
  23. });
  24. Color getStatusColor() {
  25. if (className == 'Empty_Bunch' || className == 'Abnormal') return const Color(0xFFF44336);
  26. if (className == 'Ripe' || className == 'Overripe') return const Color(0xFF4CAF50);
  27. return const Color(0xFFFF9800);
  28. }
  29. }
  30. class TfliteService {
  31. static const _modelAsset = 'best.tflite';
  32. static const _labelsAsset = 'labels.txt';
  33. static const int _inputSize = 640;
  34. static const double _confidenceThreshold = 0.25;
  35. Isolate? _isolate;
  36. SendPort? _sendPort;
  37. ReceivePort? _receivePort;
  38. List<String> _labels = [];
  39. final ImagePicker _picker = ImagePicker();
  40. bool _isInitialized = false;
  41. bool _isIsolateBusy = false;
  42. bool get isInitialized => _isInitialized;
  43. bool get isIsolateBusy => _isIsolateBusy;
  44. Future<void> initModel() async {
  45. try {
  46. final labelData = await rootBundle.loadString('assets/$_labelsAsset');
  47. _labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
  48. final modelData = await rootBundle.load('assets/$_modelAsset');
  49. final modelBytes = modelData.buffer.asUint8List();
  50. _receivePort = ReceivePort();
  51. _isolate = await Isolate.spawn(_isolateEntry, _receivePort!.sendPort);
  52. final completer = Completer<SendPort>();
  53. StreamSubscription? sub;
  54. sub = _receivePort!.listen((message) {
  55. if (message is SendPort) {
  56. completer.complete(message);
  57. sub?.cancel();
  58. }
  59. });
  60. _sendPort = await completer.future;
  61. final initCompleter = Completer<void>();
  62. final initReplyPort = ReceivePort();
  63. _sendPort!.send({
  64. 'command': 'init',
  65. 'modelBytes': modelBytes,
  66. 'labelData': labelData,
  67. 'replyPort': initReplyPort.sendPort,
  68. });
  69. StreamSubscription? initSub;
  70. initSub = initReplyPort.listen((message) {
  71. if (message == 'init_done') {
  72. initCompleter.complete();
  73. initSub?.cancel();
  74. initReplyPort.close();
  75. }
  76. });
  77. await initCompleter.future;
  78. _isInitialized = true;
  79. print('TfliteService: Model loaded via persistent isolate.');
  80. } catch (e) {
  81. print('TfliteService init error: $e');
  82. rethrow;
  83. }
  84. }
  85. Future<XFile?> pickImage() async {
  86. return await _picker.pickImage(
  87. source: ImageSource.gallery,
  88. maxWidth: _inputSize.toDouble(),
  89. maxHeight: _inputSize.toDouble(),
  90. );
  91. }
  92. Future<List<DetectionResult>> runInference(String imagePath) async {
  93. if (!_isInitialized) await initModel();
  94. final imageBytes = await File(imagePath).readAsBytes();
  95. final replyPort = ReceivePort();
  96. _sendPort!.send({
  97. 'command': 'inference_static',
  98. 'imageBytes': imageBytes,
  99. 'replyPort': replyPort.sendPort,
  100. });
  101. final detections = await replyPort.first;
  102. replyPort.close();
  103. return detections as List<DetectionResult>;
  104. }
  105. Future<List<DetectionResult>> runInferenceOnStream(CameraImage image) async {
  106. if (!_isInitialized) await initModel();
  107. // The gatekeeper logic has moved up to LiveAnalysisScreen (Atomic Lock)
  108. // but we keep the safety bypass here just in case.
  109. if (_isIsolateBusy) return <DetectionResult>[];
  110. _isIsolateBusy = true;
  111. final replyPort = ReceivePort();
  112. _sendPort!.send({
  113. 'command': 'inference_stream',
  114. 'planes': image.planes.map((p) => {
  115. 'bytes': p.bytes,
  116. 'bytesPerRow': p.bytesPerRow,
  117. 'bytesPerPixel': p.bytesPerPixel,
  118. }).toList(),
  119. 'width': image.width,
  120. 'height': image.height,
  121. 'format': image.format.group,
  122. 'replyPort': replyPort.sendPort,
  123. });
  124. final detections = await replyPort.first;
  125. replyPort.close();
  126. _isIsolateBusy = false;
  127. return detections as List<DetectionResult>;
  128. }
  129. static void _isolateEntry(SendPort sendPort) {
  130. final receivePort = ReceivePort();
  131. sendPort.send(receivePort.sendPort);
  132. Interpreter? interpreter;
  133. List<String> labels = [];
  134. receivePort.listen((message) {
  135. if (message is Map) {
  136. final command = message['command'];
  137. final replyPort = message['replyPort'] as SendPort;
  138. if (command == 'init') {
  139. final modelBytes = message['modelBytes'] as Uint8List;
  140. final labelData = message['labelData'] as String;
  141. final interpreterOptions = InterpreterOptions()..threads = 4;
  142. interpreter = Interpreter.fromBuffer(modelBytes, options: interpreterOptions);
  143. labels = labelData.split('\n').where((l) => l.trim().isNotEmpty).map((l) => l.trim()).toList();
  144. replyPort.send('init_done');
  145. } else if (command == 'inference_static') {
  146. if (interpreter == null) {
  147. replyPort.send(<DetectionResult>[]);
  148. return;
  149. }
  150. final imageBytes = message['imageBytes'] as Uint8List;
  151. final results = _inferenceStaticTask(imageBytes, interpreter!, labels);
  152. replyPort.send(results);
  153. } else if (command == 'inference_stream') {
  154. if (interpreter == null) {
  155. replyPort.send(<DetectionResult>[]);
  156. return;
  157. }
  158. final planes = message['planes'] as List<dynamic>;
  159. final width = message['width'] as int;
  160. final height = message['height'] as int;
  161. final format = message['format'];
  162. final results = _inferenceStreamTask(planes, width, height, format, interpreter!, labels);
  163. replyPort.send(results);
  164. }
  165. }
  166. });
  167. }
  168. static List<DetectionResult> _inferenceStaticTask(Uint8List imageBytes, Interpreter interpreter, List<String> labels) {
  169. try {
  170. final decoded = img.decodeImage(imageBytes);
  171. if (decoded == null) throw Exception('Could not decode image');
  172. final int width = decoded.width;
  173. final int height = decoded.height;
  174. final int size = width < height ? width : height;
  175. final int offsetX = (width - size) ~/ 2;
  176. final int offsetY = (height - size) ~/ 2;
  177. final cropped = img.copyCrop(decoded, x: offsetX, y: offsetY, width: size, height: size);
  178. final resized = img.copyResize(cropped, width: _inputSize, height: _inputSize, interpolation: img.Interpolation.linear);
  179. final inputTensor = List.generate(1, (_) =>
  180. List.generate(_inputSize, (y) =>
  181. List.generate(_inputSize, (x) {
  182. final pixel = resized.getPixel(x, y);
  183. return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0];
  184. })
  185. )
  186. );
  187. final outputShape = interpreter.getOutputTensors()[0].shape;
  188. final outputTensor = List.generate(1, (_) =>
  189. List.generate(outputShape[1], (_) =>
  190. List<double>.filled(outputShape[2], 0.0)
  191. )
  192. );
  193. interpreter.run(inputTensor, outputTensor);
  194. return _decodeDetections(
  195. outputTensor[0],
  196. labels,
  197. cropSize: size,
  198. offsetX: offsetX,
  199. offsetY: offsetY,
  200. fullWidth: width,
  201. fullHeight: height
  202. );
  203. } catch (e) {
  204. print('Isolate static inference error: $e');
  205. return <DetectionResult>[];
  206. }
  207. }
  208. static List<DetectionResult> _inferenceStreamTask(
  209. List<dynamic> planes, int width, int height, dynamic format,
  210. Interpreter interpreter, List<String> labels
  211. ) {
  212. try {
  213. final size = width < height ? width : height;
  214. final offsetX = (width - size) ~/ 2;
  215. final offsetY = (height - size) ~/ 2;
  216. img.Image? image;
  217. if (format == ImageFormatGroup.bgra8888) {
  218. final fullImage = img.Image.fromBytes(
  219. width: width,
  220. height: height,
  221. bytes: planes[0]['bytes'].buffer,
  222. format: img.Format.uint8,
  223. numChannels: 4,
  224. order: img.ChannelOrder.bgra,
  225. );
  226. image = img.copyCrop(fullImage, x: offsetX, y: offsetY, width: size, height: size);
  227. } else if (format == ImageFormatGroup.yuv420) {
  228. image = _convertYUV420ToImage(
  229. planes: planes,
  230. width: width,
  231. height: height,
  232. cropSize: size,
  233. offsetX: offsetX,
  234. offsetY: offsetY,
  235. );
  236. } else {
  237. print("TfliteService: Unsupported format: $format. Ensure platform correctly requests YUV420 or BGRA.");
  238. return <DetectionResult>[];
  239. }
  240. final resized = img.copyResize(image, width: _inputSize, height: _inputSize);
  241. final inputTensor = List.generate(1, (_) =>
  242. List.generate(_inputSize, (y) =>
  243. List.generate(_inputSize, (x) {
  244. final pixel = resized.getPixel(x, y);
  245. return [pixel.r / 255.0, pixel.g / 255.0, pixel.b / 255.0];
  246. })
  247. )
  248. );
  249. final outputShape = interpreter.getOutputTensors()[0].shape;
  250. final outputTensor = List.generate(1, (_) =>
  251. List.generate(outputShape[1], (_) =>
  252. List<double>.filled(outputShape[2], 0.0)
  253. )
  254. );
  255. interpreter.run(inputTensor, outputTensor);
  256. return _decodeDetections(
  257. outputTensor[0],
  258. labels,
  259. cropSize: size,
  260. offsetX: offsetX,
  261. offsetY: offsetY,
  262. fullWidth: width,
  263. fullHeight: height
  264. );
  265. } catch (e) {
  266. print('Isolate stream inference error: $e');
  267. return <DetectionResult>[];
  268. }
  269. }
  270. static img.Image _convertYUV420ToImage({
  271. required List<dynamic> planes,
  272. required int width,
  273. required int height,
  274. required int cropSize,
  275. required int offsetX,
  276. required int offsetY,
  277. }) {
  278. final yPlane = planes[0];
  279. final uPlane = planes[1];
  280. final vPlane = planes[2];
  281. final yBytes = yPlane['bytes'] as Uint8List;
  282. final uBytes = uPlane['bytes'] as Uint8List;
  283. final vBytes = vPlane['bytes'] as Uint8List;
  284. final yRowStride = yPlane['bytesPerRow'] as int;
  285. final uvRowStride = uPlane['bytesPerRow'] as int;
  286. final uvPixelStride = uPlane['bytesPerPixel'] as int;
  287. // Fast 32-bit Native memory buffer
  288. final Uint32List bgraData = Uint32List(cropSize * cropSize);
  289. int bufferIndex = 0;
  290. for (int y = 0; y < cropSize; y++) {
  291. for (int x = 0; x < cropSize; x++) {
  292. final int actualX = x + offsetX;
  293. final int actualY = y + offsetY;
  294. final int uvIndex = (uvRowStride * (actualY >> 1)) + (uvPixelStride * (actualX >> 1));
  295. final int yIndex = (actualY * yRowStride) + actualX;
  296. if (yIndex >= yBytes.length || uvIndex >= uBytes.length || uvIndex >= vBytes.length) {
  297. bufferIndex++;
  298. continue;
  299. }
  300. final int yp = yBytes[yIndex];
  301. final int up = uBytes[uvIndex];
  302. final int vp = vBytes[uvIndex];
  303. // Standard YUV to RGB conversion
  304. int r = (yp + (1.370705 * (vp - 128))).toInt();
  305. int g = (yp - (0.337633 * (up - 128)) - (0.698001 * (vp - 128))).toInt();
  306. int b = (yp + (1.732446 * (up - 128))).toInt();
  307. // Clamp inline for max speed
  308. r = r < 0 ? 0 : (r > 255 ? 255 : r);
  309. g = g < 0 ? 0 : (g > 255 ? 255 : g);
  310. b = b < 0 ? 0 : (b > 255 ? 255 : b);
  311. // Pack into 32-bit integer: 0xAARRGGBB -> Memory writes it Little Endian: B, G, R, A.
  312. bgraData[bufferIndex++] = (255 << 24) | (r << 16) | (g << 8) | b;
  313. }
  314. }
  315. return img.Image.fromBytes(
  316. width: cropSize,
  317. height: cropSize,
  318. bytes: bgraData.buffer,
  319. format: img.Format.uint8,
  320. numChannels: 4, // Packed 4 channels (BGRA)
  321. order: img.ChannelOrder.bgra, // Explicitly tell image package it's BGRA
  322. );
  323. }
  324. /// Decodes YOLO26 NMS-Free detections.
  325. /// Unlike legacy YOLOv8, this model produces unique, final predictions
  326. /// directly in the output tensor, eliminating the need for a secondary
  327. /// Non-Max Suppression (NMS) loop in Dart.
  328. static List<DetectionResult> _decodeDetections(
  329. List<List<double>> rawDetections,
  330. List<String> labels, {
  331. int? cropSize,
  332. int? offsetX,
  333. int? offsetY,
  334. int? fullWidth,
  335. int? fullHeight,
  336. }) {
  337. // YOLO26 E2E models typically return a fixed number of detections (e.g., top 100)
  338. // We only need to filter by confidence and map back to the original frame.
  339. final detections = <DetectionResult>[];
  340. for (final det in rawDetections) {
  341. if (det.length < 6) continue;
  342. final conf = det[4];
  343. if (conf < _confidenceThreshold) continue;
  344. double x1 = det[0].clamp(0.0, 1.0);
  345. double y1 = det[1].clamp(0.0, 1.0);
  346. double x2 = det[2].clamp(0.0, 1.0);
  347. double y2 = det[3].clamp(0.0, 1.0);
  348. // If crop info is provided, map back to full frame
  349. if (cropSize != null && offsetX != null && offsetY != null && fullWidth != null && fullHeight != null) {
  350. x1 = (x1 * cropSize + offsetX) / fullWidth;
  351. x2 = (x2 * cropSize + offsetX) / fullWidth;
  352. y1 = (y1 * cropSize + offsetY) / fullHeight;
  353. y2 = (y2 * cropSize + offsetY) / fullHeight;
  354. }
  355. final classId = det[5].round();
  356. if (x2 <= x1 || y2 <= y1) continue;
  357. final label = (classId >= 0 && classId < labels.length) ? labels[classId] : 'Unknown';
  358. detections.add(DetectionResult(
  359. className: label,
  360. classIndex: classId,
  361. confidence: conf,
  362. normalizedBox: Rect.fromLTRB(x1, y1, x2, y2),
  363. ));
  364. }
  365. detections.sort((a, b) => b.confidence.compareTo(a.confidence));
  366. return detections;
  367. }
  368. void dispose() {
  369. _receivePort?.close();
  370. if (_isolate != null) {
  371. _isolate!.kill(priority: Isolate.immediate);
  372. _isolate = null;
  373. }
  374. _isInitialized = false;
  375. }
  376. }