Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/lite/python/optimize/calibrator.py: 26%

77 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2019 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15"""Python wrapper for post training quantization with calibration.""" 

16import numpy as np 

17 

18from tensorflow.lite.python.convert_phase import Component 

19from tensorflow.lite.python.convert_phase import convert_phase 

20from tensorflow.lite.python.convert_phase import SubComponent 

21from tensorflow.lite.python.interpreter import Interpreter 

22from tensorflow.python.framework import dtypes 

23from tensorflow.python.util.lazy_loader import LazyLoader 

24 

25# Lazy load since some of the performance benchmark skylark rules 

26# break dependencies. Must use double quotes to match code internal rewrite 

27# rule. 

28_calibration_wrapper = LazyLoader( 

29 "_calibration_wrapper", 

30 globals(), 

31 ( 

32 "tensorflow.lite.python.optimize." 

33 "_pywrap_tensorflow_lite_calibration_wrapper" 

34 ), 

35) 

36 

37 

38def add_intermediate_tensors(model_content): 

39 """Adds intermediate tensors to fused op if needed.""" 

40 return _calibration_wrapper.AddIntermediateTensors(model_content) 

41 

42 

43class Calibrator: 

44 """Calibrates a floating point model and then quantizes it. 

45 

46 This is an internal class, not a public interface. 

47 """ 

48 

49 def __init__( 

50 self, 

51 model_content, 

52 custom_op_registerers_by_name=None, 

53 custom_op_registerers_by_func=None, 

54 ): 

55 """Constructor. 

56 

57 Args: 

58 model_content: Content of a TF-Lite Flatbuffer file. 

59 custom_op_registerers_by_name: List of str (symbol names) that take a 

60 pointer to a MutableOpResolver and register custom ops. 

61 custom_op_registerers_by_func: List of functions that take a pointer to a 

62 MutableOpResolver and register custom ops. 

63 

64 Raises: 

65 ValueError: If the calibrator was unable to open the model. 

66 """ 

67 if not model_content: 

68 raise ValueError("`model_content` must be specified.") 

69 if custom_op_registerers_by_name is None: 

70 custom_op_registerers_by_name = [] 

71 if custom_op_registerers_by_func is None: 

72 custom_op_registerers_by_func = [] 

73 try: 

74 self._calibrator = _calibration_wrapper.CalibrationWrapper( 

75 model_content, 

76 custom_op_registerers_by_name, 

77 custom_op_registerers_by_func, 

78 ) 

79 self._model_content = model_content 

80 except Exception as e: 

81 raise ValueError("Failed to parse the model: %s." % e) 

82 if not self._calibrator: 

83 raise ValueError("Failed to parse the model.") 

84 self._interpreter = None 

85 

86 def _create_input_array_from_dict(self, signature_key, inputs): 

87 input_array = [] 

88 signature_runner = self._interpreter.get_signature_runner(signature_key) 

89 input_details = sorted( 

90 signature_runner.get_input_details().items(), 

91 key=lambda item: item[1]["index"], 

92 ) 

93 for input_name, _ in input_details: 

94 input_array.append(inputs[input_name]) 

95 return input_array 

96 

97 def _feed_tensors(self, dataset_gen, resize_input): 

98 """Feed tensors to the calibrator.""" 

99 initialized = {} 

100 

101 for sample in dataset_gen(): 

102 if isinstance(sample, tuple): 

103 if not isinstance(sample[1], dict): 

104 raise ValueError( 

105 "You need to provide either a dictionary with input " 

106 "names and values in the second arugment in the " 

107 "tuple" 

108 ) 

109 # Convert signature based inputs to the tensor index based data. 

110 if self._interpreter is None: 

111 self._interpreter = Interpreter(model_content=self._model_content) 

112 signature_key = sample[0] 

113 input_array = self._create_input_array_from_dict( 

114 signature_key, sample[1] 

115 ) 

116 elif isinstance(sample, dict): 

117 # Convert signature based inputs to the tensor index based data. 

118 if self._interpreter is None: 

119 self._interpreter = Interpreter(model_content=self._model_content) 

120 signature_key = None 

121 input_array = self._create_input_array_from_dict(None, sample) 

122 elif isinstance(sample, list): 

123 signature_key = None 

124 input_array = sample 

125 else: 

126 raise ValueError( 

127 "You need to provide either a dictionary with input " 

128 "names and values, a tuple with signature key and a " 

129 "dictionary with input names and values, or an array " 

130 "with input values in the order of input tensors of " 

131 "the graph in the representative_dataset function. " 

132 "Unsupported value from dataset: {}.".format(sample) 

133 ) 

134 

135 if signature_key not in initialized: 

136 initialized[signature_key] = True 

137 if resize_input: 

138 if signature_key is not None: 

139 self._calibrator.Prepare( 

140 [list(s.shape) for s in input_array], signature_key 

141 ) 

142 else: 

143 self._calibrator.Prepare([list(s.shape) for s in input_array]) 

144 else: 

145 if signature_key is not None: 

146 self._calibrator.Prepare(signature_key) 

147 else: 

148 self._calibrator.Prepare() 

149 if signature_key is not None: 

150 self._calibrator.FeedTensor(input_array, signature_key) 

151 else: 

152 self._calibrator.FeedTensor(input_array) 

153 

154 @convert_phase( 

155 Component.OPTIMIZE_TFLITE_MODEL, 

156 SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER, 

157 ) 

158 def calibrate_and_quantize( 

159 self, 

160 dataset_gen, 

161 input_type, 

162 output_type, 

163 allow_float, 

164 activations_type=dtypes.int8, 

165 bias_type=dtypes.int32, 

166 resize_input=True, 

167 disable_per_channel=False, 

168 ): 

169 """Calibrates the model with specified generator and then quantizes it. 

170 

171 The input shapes of the calibrator are resized with the calibration data if 

172 `resize_input` is set. 

173 

174 Returns: 

175 A quantized model. 

176 

177 Args: 

178 dataset_gen: A generator that generates calibration samples. 

179 input_type: A tf.dtype representing the desired real-value input type. 

180 output_type: A tf.dtype representing the desired real-value output type. 

181 allow_float: A boolean. False if the resulting model cannot perform float 

182 computation, useful when targeting an integer-only backend. If False, an 

183 error will be thrown if an operation cannot be quantized, otherwise the 

184 model will fallback to float ops. 

185 activations_type: A tf.dtype representing the desired type for 

186 activations. 

187 bias_type: A tf.dtype representing the desired type for bias. 

188 resize_input: A boolean. True if the shape of the sample data is different 

189 from the input. 

190 disable_per_channel: A boolean. True if disabling per-channel 

191 quantization. 

192 """ 

193 self._feed_tensors(dataset_gen, resize_input) 

194 return self._calibrator.QuantizeModel( 

195 np.dtype(input_type.as_numpy_dtype()).num, 

196 np.dtype(output_type.as_numpy_dtype()).num, 

197 allow_float, 

198 np.dtype(activations_type.as_numpy_dtype()).num, 

199 np.dtype(bias_type.as_numpy_dtype()).num, 

200 disable_per_channel, 

201 ) 

202 

203 @convert_phase( 

204 Component.OPTIMIZE_TFLITE_MODEL, 

205 SubComponent.QUANTIZE_USING_DEPRECATED_QUANTIZER, 

206 ) 

207 def calibrate_and_quantize_single( 

208 self, 

209 dataset_gen, 

210 input_type, 

211 output_type, 

212 allow_float, 

213 op_output_name, 

214 resize_input=True, 

215 ): 

216 """Calibrates the model with specified generator and then quantizes it. 

217 

218 Only the single op with output op_output_name will be quantized. 

219 The input shapes of the calibrator are resized with the calibration data. 

220 

221 Returns: 

222 A quantized model. 

223 

224 Args: 

225 dataset_gen: A generator that generates calibration samples. 

226 input_type: A tf.dtype representing the desired real-value input type. 

227 output_type: A tf.dtype representing the desired real-value output type. 

228 allow_float: A boolean. False if the resulting model cannot perform float 

229 computation, useful when targeting an integer-only backend. If False, an 

230 error will be thrown if an operation cannot be quantized, otherwise the 

231 model will fallback to float ops. 

232 op_output_name: A string, only this op will be quantized. 

233 resize_input: A boolean. True if the shape of the sample data is different 

234 from the input. 

235 """ 

236 self._feed_tensors(dataset_gen, resize_input) 

237 return self._calibrator.QuantizeModel( 

238 np.dtype(input_type.as_numpy_dtype()).num, 

239 np.dtype(output_type.as_numpy_dtype()).num, 

240 allow_float, 

241 op_output_name, 

242 ) 

243 

244 @convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.CALIBRATE) 

245 def calibrate(self, dataset_gen): 

246 """Calibrates the model with specified generator. 

247 

248 Returns: 

249 A model with min and max calibration stats. 

250 

251 Args: 

252 dataset_gen: A generator that generates calibration samples. 

253 """ 

254 self._feed_tensors(dataset_gen, resize_input=True) 

255 return self._calibrator.Calibrate()