Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/data/ops/options.py: 33%

294 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2021 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15"""API for specifying `tf.data` options.""" 

16 

17import enum 

18import platform 

19 

20from absl import logging 

21 

22from tensorflow.core.framework import dataset_options_pb2 

23from tensorflow.core.framework import model_pb2 

24from tensorflow.python.data.ops import test_mode 

25from tensorflow.python.data.util import options as options_lib 

26from tensorflow.python.util import deprecation 

27from tensorflow.python.util.tf_export import tf_export 

28 

29 

30@tf_export("data.experimental.AutotuneAlgorithm") 

31class AutotuneAlgorithm(enum.Enum): 

32 """Represents the type of autotuning algorithm to use. 

33 

34 DEFAULT: The default behavior is implementation specific and may change over 

35 time. 

36 

37 HILL_CLIMB: In each optimization step, this algorithm chooses the optimial 

38 parameter and increases its value by 1. 

39 

40 GRADIENT_DESCENT: In each optimization step, this algorithm updates the 

41 parameter values in the optimal direction. 

42 

43 MAX_PARALLELISM: Similar to HILL_CLIMB but uses a relaxed stopping condition, 

44 allowing the optimization to oversubscribe the CPU. 

45 

46 STAGE_BASED: In each optimization step, this algorithm chooses the worst 

47 bottleneck parameter and increases its value by 1. 

48 """ 

49 DEFAULT = 0 

50 HILL_CLIMB = 1 

51 GRADIENT_DESCENT = 2 

52 MAX_PARALLELISM = 3 

53 STAGE_BASED = 4 

54 

55 @classmethod 

56 def _to_proto(cls, obj): 

57 if obj == cls.DEFAULT: 

58 return model_pb2.AutotuneAlgorithm.DEFAULT 

59 if obj == cls.HILL_CLIMB: 

60 return model_pb2.AutotuneAlgorithm.HILL_CLIMB 

61 if obj == cls.GRADIENT_DESCENT: 

62 return model_pb2.AutotuneAlgorithm.GRADIENT_DESCENT 

63 if obj == cls.MAX_PARALLELISM: 

64 return model_pb2.AutotuneAlgorithm.MAX_PARALLELISM 

65 if obj == cls.STAGE_BASED: 

66 return model_pb2.AutotuneAlgorithm.STAGE_BASED 

67 raise ValueError( 

68 f"Invalid `obj.` Supported values include `DEFAULT`, `HILL_CLIMB` " 

69 f"`GRADIENT_DESCENT`, and `STAGE_BASED`. Got {obj.name}.") 

70 

71 @classmethod 

72 def _from_proto(cls, pb): 

73 if pb == model_pb2.AutotuneAlgorithm.DEFAULT: 

74 return cls.DEFAULT 

75 if pb == model_pb2.AutotuneAlgorithm.HILL_CLIMB: 

76 return cls.HILL_CLIMB 

77 if pb == model_pb2.AutotuneAlgorithm.GRADIENT_DESCENT: 

78 return cls.GRADIENT_DESCENT 

79 if pb == model_pb2.AutotuneAlgorithm.MAX_PARALLELISM: 

80 return cls.MAX_PARALLELISM 

81 if pb == model_pb2.AutotuneAlgorithm.STAGE_BASED: 

82 return cls.STAGE_BASED 

83 raise ValueError( 

84 f"Invalid `pb.` Supported values include `DEFAULT`, `HILL_CLIMB`, " 

85 f"`GRADIENT_DESCENT` and `STAGE_BASED`. Got {pb}.") 

86 

87 

88@tf_export("data.experimental.AutoShardPolicy") 

89class AutoShardPolicy(enum.IntEnum): 

90 """Represents the type of auto-sharding to use. 

91 

92 OFF: No sharding will be performed. 

93 

94 AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding. 

95 

96 FILE: Shards by input files (i.e. each worker will get a set of files to 

97 process). When this option is selected, make sure that there is at least as 

98 many files as workers. If there are fewer input files than workers, a runtime 

99 error will be raised. 

100 

101 DATA: Shards by elements produced by the dataset. Each worker will process the 

102 whole dataset and discard the portion that is not for itself. Note that for 

103 this mode to correctly partitions the dataset elements, the dataset needs to 

104 produce elements in a deterministic order. 

105 

106 HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a 

107 placeholder to replace with `shard(num_workers, worker_index)`. 

108 """ 

109 

110 # LINT.IfChange 

111 OFF = -1 

112 AUTO = 0 

113 FILE = 1 

114 DATA = 2 

115 HINT = 3 

116 # LINT.ThenChange(//tensorflow/python/data/experimental/ops/data_service_ops.py:tf_data_service_sharding_policy) 

117 

118 @classmethod 

119 def _to_proto(cls, obj): 

120 """Convert enum to proto.""" 

121 if obj == cls.OFF: 

122 return dataset_options_pb2.AutoShardPolicy.OFF 

123 if obj == cls.FILE: 

124 return dataset_options_pb2.AutoShardPolicy.FILE 

125 if obj == cls.DATA: 

126 return dataset_options_pb2.AutoShardPolicy.DATA 

127 if obj == cls.AUTO: 

128 return dataset_options_pb2.AutoShardPolicy.AUTO 

129 if obj == cls.HINT: 

130 return dataset_options_pb2.AutoShardPolicy.HINT 

131 raise ValueError( 

132 f"Invalid `obj.` Supported values include `OFF`, `FILE`, `DATA`," 

133 f"`AUTO`, and `HINT`. Got {obj.name}." 

134 ) 

135 

136 @classmethod 

137 def _from_proto(cls, pb): 

138 """Convert proto to enum.""" 

139 if pb == dataset_options_pb2.AutoShardPolicy.OFF: 

140 return cls.OFF 

141 if pb == dataset_options_pb2.AutoShardPolicy.FILE: 

142 return cls.FILE 

143 if pb == dataset_options_pb2.AutoShardPolicy.DATA: 

144 return cls.DATA 

145 if pb == dataset_options_pb2.AutoShardPolicy.AUTO: 

146 return cls.AUTO 

147 if pb == dataset_options_pb2.AutoShardPolicy.HINT: 

148 return cls.HINT 

149 raise ValueError( 

150 f"Invalid `pb.` Supported values include `OFF`, `FILE`, `DATA`," 

151 f"`AUTO`, and `HINT`. Got {pb}." 

152 ) 

153 

154 

155@tf_export("data.experimental.ExternalStatePolicy") 

156class ExternalStatePolicy(enum.Enum): 

157 """Represents how to handle external state during serialization. 

158 

159 See the `tf.data.Options.experimental_external_state_policy` documentation 

160 for more information. 

161 """ 

162 WARN = 0 

163 IGNORE = 1 

164 FAIL = 2 

165 

166 @classmethod 

167 def _to_proto(cls, obj): 

168 """Convert enum to proto.""" 

169 if obj == cls.IGNORE: 

170 return dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE 

171 if obj == cls.FAIL: 

172 return dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL 

173 if obj == cls.WARN: 

174 return dataset_options_pb2.ExternalStatePolicy.POLICY_WARN 

175 raise ValueError( 

176 f"Invalid `obj.` Supported values include `POLICY_IGNORE`," 

177 f"`POLICY_FAIL`, `POLICY_WARN`. Got {obj.name}.") 

178 

179 @classmethod 

180 def _from_proto(cls, pb): 

181 """Convert proto to enum.""" 

182 if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_IGNORE: 

183 return cls.IGNORE 

184 if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_FAIL: 

185 return cls.FAIL 

186 if pb == dataset_options_pb2.ExternalStatePolicy.POLICY_WARN: 

187 return cls.WARN 

188 raise ValueError( 

189 f"Invalid `pb.` Supported values include `POLICY_IGNORE`," 

190 f"`POLICY_FAIL`, `POLICY_WARN`. Got {pb}.") 

191 

192 

193@tf_export("data.experimental.AutotuneOptions") 

194class AutotuneOptions(options_lib.OptionsBase): 

195 """Represents options for autotuning dataset performance. 

196 

197 ```python 

198 options = tf.data.Options() 

199 options.autotune.enabled = False 

200 dataset = dataset.with_options(options) 

201 ``` 

202 """ 

203 

204 enabled = options_lib.create_option( 

205 name="enabled", 

206 ty=bool, 

207 docstring="Whether to automatically tune performance knobs. If None, " 

208 "defaults to True.") 

209 

210 cpu_budget = options_lib.create_option( 

211 name="cpu_budget", 

212 ty=int, 

213 docstring="When autotuning is enabled (through `autotune`), determines " 

214 "the CPU budget to use. Values greater than the number of schedulable " 

215 "CPU cores are allowed but may result in CPU contention. If None, " 

216 "defaults to the number of schedulable CPU cores.") 

217 

218 ram_budget = options_lib.create_option( 

219 name="ram_budget", 

220 ty=int, 

221 docstring="When autotuning is enabled (through `autotune`), determines " 

222 "the RAM budget to use. Values greater than the available RAM in bytes " 

223 "may result in OOM. If None, defaults to half of the available RAM in " 

224 "bytes.") 

225 

226 autotune_algorithm = options_lib.create_option( 

227 name="autotune_algorithm", 

228 ty=AutotuneAlgorithm, 

229 docstring="When autotuning is enabled (through `autotune`), determines " 

230 "the algorithm to use.") 

231 

232 def _to_proto(self): 

233 pb = dataset_options_pb2.AutotuneOptions() 

234 if self.enabled is not None: 

235 pb.enabled = self.enabled 

236 if self.cpu_budget is not None: 

237 pb.cpu_budget = self.cpu_budget 

238 if self.ram_budget is not None: 

239 pb.ram_budget = self.ram_budget 

240 if self.autotune_algorithm is not None: 

241 pb.autotune_algorithm = AutotuneAlgorithm._to_proto( # pylint: disable=protected-access 

242 self.autotune_algorithm) 

243 return pb 

244 

245 def _from_proto(self, pb): 

246 if pb.WhichOneof("optional_enabled") is not None: 

247 self.enabled = pb.enabled 

248 if pb.WhichOneof("optional_cpu_budget") is not None: 

249 self.cpu_budget = pb.cpu_budget 

250 if pb.WhichOneof("optional_ram_budget") is not None: 

251 self.ram_budget = pb.ram_budget 

252 if pb.WhichOneof("optional_autotune_algorithm") is not None: 

253 self.autotune_algorithm = AutotuneAlgorithm._from_proto( # pylint: disable=protected-access 

254 pb.autotune_algorithm) 

255 

256 def _set_mutable(self, mutable): 

257 """Change the mutability value to `mutable` on this options and children.""" 

258 # pylint: disable=protected-access 

259 object.__setattr__(self, "_mutable", mutable) 

260 

261 

262@tf_export("data.experimental.DistributeOptions") 

263class DistributeOptions(options_lib.OptionsBase): 

264 """Represents options for distributed data processing. 

265 

266 You can set the distribution options of a dataset through the 

267 `experimental_distribute` property of `tf.data.Options`; the property is 

268 an instance of `tf.data.experimental.DistributeOptions`. 

269 

270 ```python 

271 options = tf.data.Options() 

272 options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF 

273 dataset = dataset.with_options(options) 

274 ``` 

275 """ 

276 

277 auto_shard_policy = options_lib.create_option( 

278 name="auto_shard_policy", 

279 ty=AutoShardPolicy, 

280 docstring="The type of sharding to use. See " 

281 "`tf.data.experimental.AutoShardPolicy` for additional information.", 

282 default_factory=lambda: AutoShardPolicy.AUTO) 

283 

284 num_devices = options_lib.create_option( 

285 name="num_devices", 

286 ty=int, 

287 docstring= 

288 "The number of devices attached to this input pipeline. This will be " 

289 "automatically set by `MultiDeviceIterator`.") 

290 

291 def _to_proto(self): 

292 pb = dataset_options_pb2.DistributeOptions() 

293 pb.auto_shard_policy = AutoShardPolicy._to_proto(self.auto_shard_policy) # pylint: disable=protected-access 

294 if self.num_devices is not None: 

295 pb.num_devices = self.num_devices 

296 return pb 

297 

298 def _from_proto(self, pb): 

299 self.auto_shard_policy = AutoShardPolicy._from_proto(pb.auto_shard_policy) # pylint: disable=protected-access 

300 if pb.WhichOneof("optional_num_devices") is not None: 

301 self.num_devices = pb.num_devices 

302 

303 

304@tf_export("data.experimental.OptimizationOptions") 

305class OptimizationOptions(options_lib.OptionsBase): 

306 """Represents options for dataset optimizations. 

307 

308 You can set the optimization options of a dataset through the 

309 `experimental_optimization` property of `tf.data.Options`; the property is 

310 an instance of `tf.data.experimental.OptimizationOptions`. 

311 

312 ```python 

313 options = tf.data.Options() 

314 options.experimental_optimization.noop_elimination = True 

315 options.experimental_optimization.apply_default_optimizations = False 

316 dataset = dataset.with_options(options) 

317 ``` 

318 """ 

319 apply_default_optimizations = options_lib.create_option( 

320 name="apply_default_optimizations", 

321 ty=bool, 

322 docstring= 

323 "Whether to apply default graph optimizations. If False, only graph " 

324 "optimizations that have been explicitly enabled will be applied.") 

325 

326 filter_fusion = options_lib.create_option( 

327 name="filter_fusion", 

328 ty=bool, 

329 docstring= 

330 "Whether to fuse filter transformations. If None, defaults to False.") 

331 

332 filter_parallelization = options_lib.create_option( 

333 name="filter_parallelization", 

334 ty=bool, 

335 docstring= 

336 "Whether to parallelize stateless filter transformations. If None, " 

337 "defaults to False.") 

338 

339 inject_prefetch = options_lib.create_option( 

340 name="inject_prefetch", 

341 ty=bool, 

342 docstring= 

343 "Whether to inject prefetch transformation as the last transformation " 

344 "when the last transformation is a synchronous transformation. If None, " 

345 "defaults to True.") 

346 

347 map_and_batch_fusion = options_lib.create_option( 

348 name="map_and_batch_fusion", 

349 ty=bool, 

350 docstring= 

351 "Whether to fuse map and batch transformations. If None, defaults to " 

352 "True.") 

353 

354 map_and_filter_fusion = options_lib.create_option( 

355 name="map_and_filter_fusion", 

356 ty=bool, 

357 docstring= 

358 "Whether to fuse map and filter transformations. If None, defaults to " 

359 "False.") 

360 

361 map_fusion = options_lib.create_option( 

362 name="map_fusion", 

363 ty=bool, 

364 docstring="Whether to fuse map transformations. If None, defaults to " 

365 "False.") 

366 

367 map_parallelization = options_lib.create_option( 

368 name="map_parallelization", 

369 ty=bool, 

370 docstring= 

371 "Whether to parallelize stateless map transformations. If None, defaults " 

372 "to True.") 

373 

374 noop_elimination = options_lib.create_option( 

375 name="noop_elimination", 

376 ty=bool, 

377 docstring= 

378 "Whether to eliminate no-op transformations. If None, defaults to True.") 

379 

380 parallel_batch = options_lib.create_option( 

381 name="parallel_batch", 

382 ty=bool, 

383 docstring="Whether to parallelize copying of batch elements. If None, " 

384 "defaults to True.") 

385 

386 shuffle_and_repeat_fusion = options_lib.create_option( 

387 name="shuffle_and_repeat_fusion", 

388 ty=bool, 

389 docstring="Whether to fuse shuffle and repeat transformations. If None, " 

390 "defaults to True.") 

391 

392 warm_start = options_lib.create_option( 

393 name="warm_start", 

394 ty=bool, 

395 docstring=( 

396 "Whether to start background threads of asynchronous transformations" 

397 " upon iterator creation (as opposed to upon first call to" 

398 " `GetNext`). If None, defaults to False. It should be noted that" 

399 " this possibly improves the latency of the initial 'GetNext' call at" 

400 " the expense of requiring more memory to hold prefetched elements" 

401 " between the time of iterator construction and usage." 

402 ), 

403 default_factory=lambda: True if test_mode.TEST_MODE else False, 

404 ) 

405 

406 def _to_proto(self): 

407 pb = dataset_options_pb2.OptimizationOptions() 

408 if self.apply_default_optimizations is not None: 

409 pb.apply_default_optimizations = self.apply_default_optimizations 

410 if self.filter_fusion is not None: 

411 pb.filter_fusion = self.filter_fusion 

412 if self.filter_parallelization is not None: 

413 pb.filter_parallelization = self.filter_parallelization 

414 if self.inject_prefetch is not None: 

415 pb.inject_prefetch = self.inject_prefetch 

416 if self.map_and_batch_fusion is not None: 

417 pb.map_and_batch_fusion = self.map_and_batch_fusion 

418 if self.map_and_filter_fusion is not None: 

419 pb.map_and_filter_fusion = self.map_and_filter_fusion 

420 if self.map_fusion is not None: 

421 pb.map_fusion = self.map_fusion 

422 if self.map_parallelization is not None: 

423 pb.map_parallelization = self.map_parallelization 

424 if self.noop_elimination is not None: 

425 pb.noop_elimination = self.noop_elimination 

426 if self.parallel_batch is not None: 

427 pb.parallel_batch = self.parallel_batch 

428 if self.shuffle_and_repeat_fusion is not None: 

429 pb.shuffle_and_repeat_fusion = self.shuffle_and_repeat_fusion 

430 if self.warm_start is not None: 

431 pb.warm_start = self.warm_start 

432 return pb 

433 

434 def _from_proto(self, pb): 

435 if pb.WhichOneof("optional_apply_default_optimizations") is not None: 

436 self.apply_default_optimizations = pb.apply_default_optimizations 

437 if pb.WhichOneof("optional_filter_fusion") is not None: 

438 self.filter_fusion = pb.filter_fusion 

439 if pb.WhichOneof("optional_filter_parallelization") is not None: 

440 self.filter_parallelization = pb.filter_parallelization 

441 if pb.WhichOneof("optional_inject_prefetch") is not None: 

442 self.inject_prefetch = pb.inject_prefetch 

443 if pb.WhichOneof("optional_map_and_batch_fusion") is not None: 

444 self.map_and_batch_fusion = pb.map_and_batch_fusion 

445 if pb.WhichOneof("optional_map_and_filter_fusion") is not None: 

446 self.map_and_filter_fusion = pb.map_and_filter_fusion 

447 if pb.WhichOneof("optional_map_fusion") is not None: 

448 self.map_fusion = pb.map_fusion 

449 if pb.WhichOneof("optional_map_parallelization") is not None: 

450 self.map_parallelization = pb.map_parallelization 

451 if pb.WhichOneof("optional_noop_elimination") is not None: 

452 self.noop_elimination = pb.noop_elimination 

453 if pb.WhichOneof("optional_parallel_batch") is not None: 

454 self.parallel_batch = pb.parallel_batch 

455 if pb.WhichOneof("optional_shuffle_and_repeat_fusion") is not None: 

456 self.shuffle_and_repeat_fusion = pb.shuffle_and_repeat_fusion 

457 if pb.WhichOneof("optional_warm_start") is not None: 

458 self.warm_start = pb.warm_start 

459 

460 def _set_mutable(self, mutable): 

461 """Change the mutability value to `mutable` on this options and children.""" 

462 # pylint: disable=protected-access 

463 object.__setattr__(self, "_mutable", mutable) 

464 

465 

466@deprecation.deprecated_endpoints("data.experimental.ThreadingOptions") 

467@tf_export("data.experimental.ThreadingOptions", "data.ThreadingOptions") 

468class ThreadingOptions(options_lib.OptionsBase): 

469 """Represents options for dataset threading. 

470 

471 You can set the threading options of a dataset through the 

472 `threading` property of `tf.data.Options`; the property is 

473 an instance of `tf.data.ThreadingOptions`. 

474 

475 ```python 

476 options = tf.data.Options() 

477 options.threading.private_threadpool_size = 10 

478 dataset = dataset.with_options(options) 

479 ``` 

480 """ 

481 

482 max_intra_op_parallelism = options_lib.create_option( 

483 name="max_intra_op_parallelism", 

484 ty=int, 

485 docstring= 

486 "If set, it overrides the maximum degree of intra-op parallelism.") 

487 

488 private_threadpool_size = options_lib.create_option( 

489 name="private_threadpool_size", 

490 ty=int, 

491 docstring= 

492 "If set, the dataset will use a private threadpool of the given size. " 

493 "The value 0 can be used to indicate that the threadpool size should be " 

494 "determined at runtime based on the number of available CPU cores.") 

495 

496 def _to_proto(self): 

497 pb = dataset_options_pb2.ThreadingOptions() 

498 if self.max_intra_op_parallelism is not None: 

499 pb.max_intra_op_parallelism = self.max_intra_op_parallelism 

500 if self.private_threadpool_size is not None: 

501 pb.private_threadpool_size = self.private_threadpool_size 

502 return pb 

503 

504 def _from_proto(self, pb): 

505 if pb.WhichOneof("optional_max_intra_op_parallelism") is not None: 

506 self.max_intra_op_parallelism = pb.max_intra_op_parallelism 

507 if pb.WhichOneof("optional_private_threadpool_size") is not None: 

508 self.private_threadpool_size = pb.private_threadpool_size 

509 

510 

511@tf_export("data.Options") 

512class Options(options_lib.OptionsBase): 

513 """Represents options for `tf.data.Dataset`. 

514 

515 A `tf.data.Options` object can be, for instance, used to control which static 

516 optimizations to apply to the input pipeline graph or whether to use 

517 performance modeling to dynamically tune the parallelism of operations such as 

518 `tf.data.Dataset.map` or `tf.data.Dataset.interleave`. 

519 

520 The options are set for the entire dataset and are carried over to datasets 

521 created through tf.data transformations. 

522 

523 The options can be set by constructing an `Options` object and using the 

524 `tf.data.Dataset.with_options(options)` transformation, which returns a 

525 dataset with the options set. 

526 

527 >>> dataset = tf.data.Dataset.range(42) 

528 >>> options = tf.data.Options() 

529 >>> options.deterministic = False 

530 >>> dataset = dataset.with_options(options) 

531 >>> print(dataset.options().deterministic) 

532 False 

533 

534 Note: A known limitation of the `tf.data.Options` implementation is that the 

535 options are not preserved across tf.function boundaries. In particular, to 

536 set options for a dataset that is iterated within a tf.function, the options 

537 need to be set within the same tf.function. 

538 """ 

539 

540 autotune = options_lib.create_option( 

541 name="autotune", 

542 ty=AutotuneOptions, 

543 docstring="The autotuning options associated with the dataset. See " 

544 "`tf.data.experimental.AutotuneOptions` for more details.", 

545 default_factory=AutotuneOptions) 

546 

547 deterministic = options_lib.create_option( 

548 name="deterministic", 

549 ty=bool, 

550 docstring= 

551 "Whether the outputs need to be produced in deterministic order. If None," 

552 " defaults to True.") 

553 

554 experimental_deterministic = options_lib.create_option( 

555 name="experimental_deterministic", 

556 ty=bool, 

557 docstring="DEPRECATED. Use `deterministic` instead.") 

558 

559 experimental_distribute = options_lib.create_option( 

560 name="experimental_distribute", 

561 ty=DistributeOptions, 

562 docstring= 

563 "The distribution strategy options associated with the dataset. See " 

564 "`tf.data.experimental.DistributeOptions` for more details.", 

565 default_factory=DistributeOptions) 

566 

567 experimental_external_state_policy = options_lib.create_option( 

568 name="experimental_external_state_policy", 

569 ty=ExternalStatePolicy, 

570 docstring="This option can be used to override the default policy for " 

571 "how to handle external state when serializing a dataset or " 

572 "checkpointing its iterator. There are three settings available - " 

573 "IGNORE: External state is ignored without a warning; WARN: External " 

574 "state is ignored and a warning is logged; FAIL: External state results " 

575 "in an error.") 

576 

577 experimental_optimization = options_lib.create_option( 

578 name="experimental_optimization", 

579 ty=OptimizationOptions, 

580 docstring= 

581 "The optimization options associated with the dataset. See " 

582 "`tf.data.experimental.OptimizationOptions` for more details.", 

583 default_factory=OptimizationOptions) 

584 

585 experimental_slack = options_lib.create_option( 

586 name="experimental_slack", 

587 ty=bool, 

588 docstring="Whether to introduce 'slack' in the last `prefetch` of the " 

589 "input pipeline, if it exists. This may reduce CPU contention with " 

590 "accelerator host-side activity at the start of a step. The slack " 

591 "frequency is determined by the number of devices attached to this " 

592 "input pipeline. If None, defaults to False.") 

593 

594 experimental_symbolic_checkpoint = options_lib.create_option( 

595 name="experimental_symbolic_checkpoint", 

596 ty=bool, 

597 docstring="Whether to checkpoint internal input pipeline state " 

598 "maintaining cursors into data sources that identify last " 

599 "element(s) produced as output to the tf.data consumer. This " 

600 "is alternative to the default 'explicit' checkpointing which " 

601 "stores the internal input pipeline state in the checkpoint. " 

602 "Note that symbolic checkpointing is not supported for " 

603 "transformations that can reorder elements.") 

604 

605 experimental_threading = options_lib.create_option( 

606 name="experimental_threading", 

607 ty=ThreadingOptions, 

608 docstring="DEPRECATED. Use `threading` instead.") 

609 

610 threading = options_lib.create_option( 

611 name="threading", 

612 ty=ThreadingOptions, 

613 docstring="The threading options associated with the dataset. See " 

614 "`tf.data.ThreadingOptions` for more details.", 

615 default_factory=ThreadingOptions) 

616 

617 def __getattribute__(self, name): 

618 if name == "experimental_threading": 

619 logging.warning("options.experimental_threading is deprecated. " 

620 "Use options.threading instead.") 

621 return getattr(self, "threading") 

622 if name == "experimental_deterministic": 

623 # TODO(aaudibert): Uncomment after internal uses have been updated. 

624 # logging.warning("options.experimental_deterministic is deprecated. " 

625 # "Use options.deterministic instead.") 

626 return getattr(self, "deterministic") 

627 return super(Options, self).__getattribute__(name) 

628 

629 def __setattr__(self, name, value): 

630 if name == "experimental_threading": 

631 logging.warning("options.experimental_threading is deprecated. " 

632 "Use options.threading instead.") 

633 super(Options, self).__setattr__("threading", value) 

634 return 

635 if name == "experimental_deterministic": 

636 # TODO(aaudibert): Uncomment after internal uses have been updated. 

637 # logging.warning("options.experimental_deterministic is deprecated. " 

638 # "Use options.deterministic instead.") 

639 super(Options, self).__setattr__("deterministic", value) 

640 return 

641 if name == "experimental_symbolic_checkpoint": 

642 # TODO(b/276269493): Add support for MacOS. 

643 if platform.system() == "Darwin": 

644 logging.warning("Symbolic checkpointing is not supported on MacOS.") 

645 return 

646 super(Options, self).__setattr__(name, value) 

647 

648 def _to_proto(self): 

649 pb = dataset_options_pb2.Options() 

650 if self.deterministic is not None: 

651 pb.deterministic = self.deterministic 

652 pb.autotune_options.CopyFrom(self.autotune._to_proto()) # pylint: disable=protected-access 

653 pb.distribute_options.CopyFrom(self.experimental_distribute._to_proto()) # pylint: disable=protected-access 

654 if self.experimental_external_state_policy is not None: 

655 pb.external_state_policy = ( 

656 ExternalStatePolicy._to_proto( # pylint: disable=protected-access 

657 self.experimental_external_state_policy)) 

658 pb.optimization_options.CopyFrom(self.experimental_optimization._to_proto()) # pylint: disable=protected-access 

659 if self.experimental_slack is not None: 

660 pb.slack = self.experimental_slack 

661 if self.experimental_symbolic_checkpoint is not None: 

662 pb.symbolic_checkpoint = self.experimental_symbolic_checkpoint 

663 pb.threading_options.CopyFrom(self.threading._to_proto()) # pylint: disable=protected-access 

664 return pb 

665 

666 def _from_proto(self, pb): 

667 if pb.WhichOneof("optional_deterministic") is not None: 

668 self.deterministic = pb.deterministic 

669 self.autotune._from_proto(pb.autotune_options) # pylint: disable=protected-access 

670 self.experimental_distribute._from_proto(pb.distribute_options) # pylint: disable=protected-access 

671 if pb.WhichOneof("optional_external_state_policy") is not None: 

672 self.experimental_external_state_policy = ( 

673 ExternalStatePolicy._from_proto( # pylint: disable=protected-access 

674 pb.external_state_policy)) 

675 self.experimental_optimization._from_proto(pb.optimization_options) # pylint: disable=protected-access 

676 if pb.WhichOneof("optional_slack") is not None: 

677 self.experimental_slack = pb.slack 

678 if pb.WhichOneof("optional_symbolic_checkpoint") is not None: 

679 self.experimental_symbolic_checkpoint = pb.symbolic_checkpoint 

680 self.threading._from_proto(pb.threading_options) # pylint: disable=protected-access 

681 

682 def _set_mutable(self, mutable): 

683 """Change the mutability value to `mutable` on this options and children.""" 

684 # pylint: disable=protected-access 

685 object.__setattr__(self, "_mutable", mutable) 

686 self.autotune._set_mutable(mutable) 

687 self.experimental_distribute._set_mutable(mutable) 

688 self.experimental_optimization._set_mutable(mutable) 

689 self.threading._set_mutable(mutable) 

690 

691 def merge(self, options): 

692 """Merges itself with the given `tf.data.Options`. 

693 

694 If this object and the `options` to merge set an option differently, a 

695 warning is generated and this object's value is updated with the `options` 

696 object's value. 

697 

698 Args: 

699 options: The `tf.data.Options` to merge with. 

700 

701 Returns: 

702 New `tf.data.Options` object which is the result of merging self with 

703 the input `tf.data.Options`. 

704 """ 

705 return options_lib.merge_options(self, options)