| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748 |
- import tensorflow as tf
- import numpy as np
- import os
- import cv2
- # 1. Path setup
- saved_model_path = 'best_saved_model'
- # Try to find where saved_model.pb actually is
- target_saved_model = saved_model_path
- if not os.path.exists(os.path.join(saved_model_path, 'saved_model.pb')):
- for root, dirs, files in os.walk(saved_model_path):
- if 'saved_model.pb' in files:
- target_saved_model = root
- break
- print(f"Using SavedModel at: {target_saved_model}")
- # 2. Representative Dataset
- def representative_dataset():
- img_dir = 'unified_dataset/images/val'
- count = 0
- for f in os.listdir(img_dir):
- if f.endswith(('.jpg', '.jpeg', '.png')) and count < 50:
- img = cv2.imread(os.path.join(img_dir, f))
- if img is None: continue
- img = cv2.resize(img, (640, 640))
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- img = img.astype(np.float32) / 255.0
- img = np.expand_dims(img, axis=0)
- yield [img]
- count += 1
- # 3. Converter
- converter = tf.lite.TFLiteConverter.from_saved_model(target_saved_model)
- converter.optimizations = [tf.lite.Optimize.DEFAULT]
- converter.representative_dataset = representative_dataset
- converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
- converter.inference_input_type = tf.int8
- converter.inference_output_type = tf.int8
- try:
- tflite_model = converter.convert()
- output_path = 'best_int8.tflite'
- with open(output_path, 'wb') as f:
- f.write(tflite_model)
- print(f"Success: {output_path} generated.")
- except Exception as e:
- print(f"Conversion failed: {e}")
|