Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_candidate_sampling_ops.py: 10%

432 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1"""Python wrappers around TensorFlow ops. 

2 

3This file is MACHINE GENERATED! Do not edit. 

4""" 

5 

6import collections 

7 

8from tensorflow.python import pywrap_tfe as pywrap_tfe 

9from tensorflow.python.eager import context as _context 

10from tensorflow.python.eager import core as _core 

11from tensorflow.python.eager import execute as _execute 

12from tensorflow.python.framework import dtypes as _dtypes 

13from tensorflow.security.fuzzing.py import annotation_types as _atypes 

14 

15from tensorflow.python.framework import op_def_registry as _op_def_registry 

16from tensorflow.python.framework import ops as _ops 

17from tensorflow.python.framework import op_def_library as _op_def_library 

18from tensorflow.python.util.deprecation import deprecated_endpoints 

19from tensorflow.python.util import dispatch as _dispatch 

20from tensorflow.python.util.tf_export import tf_export 

21 

22from typing import TypeVar 

23_AllCandidateSamplerOutput = collections.namedtuple( 

24 "AllCandidateSampler", 

25 ["sampled_candidates", "true_expected_count", "sampled_expected_count"]) 

26 

27 

28def all_candidate_sampler(true_classes, num_true, num_sampled, unique, seed=0, seed2=0, name=None): 

29 r"""Generates labels for candidate sampling with a learned unigram distribution. 

30 

31 See explanations of candidate sampling and the data formats at 

32 go/candidate-sampling. 

33 

34 For each batch, this op picks a single set of sampled candidate labels. 

35 

36 The advantages of sampling candidates per-batch are simplicity and the 

37 possibility of efficient dense matrix multiplication. The disadvantage is that 

38 the sampled candidates must be chosen independently of the context and of the 

39 true labels. 

40 

41 Args: 

42 true_classes: A `Tensor` of type `int64`. 

43 A batch_size * num_true matrix, in which each row contains the 

44 IDs of the num_true target_classes in the corresponding original label. 

45 num_true: An `int` that is `>= 1`. Number of true labels per context. 

46 num_sampled: An `int` that is `>= 1`. Number of candidates to produce. 

47 unique: A `bool`. 

48 If unique is true, we sample with rejection, so that all sampled 

49 candidates in a batch are unique. This requires some approximation to 

50 estimate the post-rejection sampling probabilities. 

51 seed: An optional `int`. Defaults to `0`. 

52 If either seed or seed2 are set to be non-zero, the random number 

53 generator is seeded by the given seed. Otherwise, it is seeded by a 

54 random seed. 

55 seed2: An optional `int`. Defaults to `0`. 

56 An second seed to avoid seed collision. 

57 name: A name for the operation (optional). 

58 

59 Returns: 

60 A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count). 

61 

62 sampled_candidates: A `Tensor` of type `int64`. 

63 true_expected_count: A `Tensor` of type `float32`. 

64 sampled_expected_count: A `Tensor` of type `float32`. 

65 """ 

66 _ctx = _context._context or _context.context() 

67 tld = _ctx._thread_local_data 

68 if tld.is_eager: 

69 try: 

70 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

71 _ctx, "AllCandidateSampler", name, true_classes, "num_true", num_true, 

72 "num_sampled", num_sampled, "unique", unique, "seed", seed, "seed2", 

73 seed2) 

74 _result = _AllCandidateSamplerOutput._make(_result) 

75 return _result 

76 except _core._NotOkStatusException as e: 

77 _ops.raise_from_not_ok_status(e, name) 

78 except _core._FallbackException: 

79 pass 

80 try: 

81 return all_candidate_sampler_eager_fallback( 

82 true_classes, num_true=num_true, num_sampled=num_sampled, 

83 unique=unique, seed=seed, seed2=seed2, name=name, ctx=_ctx) 

84 except _core._SymbolicException: 

85 pass # Add nodes to the TensorFlow graph. 

86 # Add nodes to the TensorFlow graph. 

87 num_true = _execute.make_int(num_true, "num_true") 

88 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

89 unique = _execute.make_bool(unique, "unique") 

90 if seed is None: 

91 seed = 0 

92 seed = _execute.make_int(seed, "seed") 

93 if seed2 is None: 

94 seed2 = 0 

95 seed2 = _execute.make_int(seed2, "seed2") 

96 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

97 "AllCandidateSampler", true_classes=true_classes, num_true=num_true, 

98 num_sampled=num_sampled, unique=unique, 

99 seed=seed, seed2=seed2, name=name) 

100 _result = _outputs[:] 

101 if _execute.must_record_gradient(): 

102 _attrs = ("num_true", _op._get_attr_int("num_true"), "num_sampled", 

103 _op._get_attr_int("num_sampled"), "unique", 

104 _op._get_attr_bool("unique"), "seed", _op._get_attr_int("seed"), 

105 "seed2", _op._get_attr_int("seed2")) 

106 _inputs_flat = _op.inputs 

107 _execute.record_gradient( 

108 "AllCandidateSampler", _inputs_flat, _attrs, _result) 

109 _result = _AllCandidateSamplerOutput._make(_result) 

110 return _result 

111 

112AllCandidateSampler = tf_export("raw_ops.AllCandidateSampler")(_ops.to_raw_op(all_candidate_sampler)) 

113 

114 

115def all_candidate_sampler_eager_fallback(true_classes, num_true, num_sampled, unique, seed, seed2, name, ctx): 

116 num_true = _execute.make_int(num_true, "num_true") 

117 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

118 unique = _execute.make_bool(unique, "unique") 

119 if seed is None: 

120 seed = 0 

121 seed = _execute.make_int(seed, "seed") 

122 if seed2 is None: 

123 seed2 = 0 

124 seed2 = _execute.make_int(seed2, "seed2") 

125 true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64) 

126 _inputs_flat = [true_classes] 

127 _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique", 

128 unique, "seed", seed, "seed2", seed2) 

129 _result = _execute.execute(b"AllCandidateSampler", 3, inputs=_inputs_flat, 

130 attrs=_attrs, ctx=ctx, name=name) 

131 if _execute.must_record_gradient(): 

132 _execute.record_gradient( 

133 "AllCandidateSampler", _inputs_flat, _attrs, _result) 

134 _result = _AllCandidateSamplerOutput._make(_result) 

135 return _result 

136 

137_ComputeAccidentalHitsOutput = collections.namedtuple( 

138 "ComputeAccidentalHits", 

139 ["indices", "ids", "weights"]) 

140 

141 

142def compute_accidental_hits(true_classes, sampled_candidates, num_true, seed=0, seed2=0, name=None): 

143 r"""Computes the ids of the positions in sampled_candidates that match true_labels. 

144 

145 When doing log-odds NCE, the result of this op should be passed through a 

146 SparseToDense op, then added to the logits of the sampled candidates. This has 

147 the effect of 'removing' the sampled labels that match the true labels by 

148 making the classifier sure that they are sampled labels. 

149 

150 Args: 

151 true_classes: A `Tensor` of type `int64`. 

152 The true_classes output of UnpackSparseLabels. 

153 sampled_candidates: A `Tensor` of type `int64`. 

154 The sampled_candidates output of CandidateSampler. 

155 num_true: An `int`. Number of true labels per context. 

156 seed: An optional `int`. Defaults to `0`. 

157 If either seed or seed2 are set to be non-zero, the random number 

158 generator is seeded by the given seed. Otherwise, it is seeded by a 

159 random seed. 

160 seed2: An optional `int`. Defaults to `0`. 

161 An second seed to avoid seed collision. 

162 name: A name for the operation (optional). 

163 

164 Returns: 

165 A tuple of `Tensor` objects (indices, ids, weights). 

166 

167 indices: A `Tensor` of type `int32`. 

168 ids: A `Tensor` of type `int64`. 

169 weights: A `Tensor` of type `float32`. 

170 """ 

171 _ctx = _context._context or _context.context() 

172 tld = _ctx._thread_local_data 

173 if tld.is_eager: 

174 try: 

175 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

176 _ctx, "ComputeAccidentalHits", name, true_classes, sampled_candidates, 

177 "num_true", num_true, "seed", seed, "seed2", seed2) 

178 _result = _ComputeAccidentalHitsOutput._make(_result) 

179 return _result 

180 except _core._NotOkStatusException as e: 

181 _ops.raise_from_not_ok_status(e, name) 

182 except _core._FallbackException: 

183 pass 

184 try: 

185 return compute_accidental_hits_eager_fallback( 

186 true_classes, sampled_candidates, num_true=num_true, seed=seed, 

187 seed2=seed2, name=name, ctx=_ctx) 

188 except _core._SymbolicException: 

189 pass # Add nodes to the TensorFlow graph. 

190 # Add nodes to the TensorFlow graph. 

191 num_true = _execute.make_int(num_true, "num_true") 

192 if seed is None: 

193 seed = 0 

194 seed = _execute.make_int(seed, "seed") 

195 if seed2 is None: 

196 seed2 = 0 

197 seed2 = _execute.make_int(seed2, "seed2") 

198 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

199 "ComputeAccidentalHits", true_classes=true_classes, 

200 sampled_candidates=sampled_candidates, 

201 num_true=num_true, seed=seed, seed2=seed2, 

202 name=name) 

203 _result = _outputs[:] 

204 if _execute.must_record_gradient(): 

205 _attrs = ("num_true", _op._get_attr_int("num_true"), "seed", 

206 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2")) 

207 _inputs_flat = _op.inputs 

208 _execute.record_gradient( 

209 "ComputeAccidentalHits", _inputs_flat, _attrs, _result) 

210 _result = _ComputeAccidentalHitsOutput._make(_result) 

211 return _result 

212 

213ComputeAccidentalHits = tf_export("raw_ops.ComputeAccidentalHits")(_ops.to_raw_op(compute_accidental_hits)) 

214 

215 

216def compute_accidental_hits_eager_fallback(true_classes, sampled_candidates, num_true, seed, seed2, name, ctx): 

217 num_true = _execute.make_int(num_true, "num_true") 

218 if seed is None: 

219 seed = 0 

220 seed = _execute.make_int(seed, "seed") 

221 if seed2 is None: 

222 seed2 = 0 

223 seed2 = _execute.make_int(seed2, "seed2") 

224 true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64) 

225 sampled_candidates = _ops.convert_to_tensor(sampled_candidates, _dtypes.int64) 

226 _inputs_flat = [true_classes, sampled_candidates] 

227 _attrs = ("num_true", num_true, "seed", seed, "seed2", seed2) 

228 _result = _execute.execute(b"ComputeAccidentalHits", 3, inputs=_inputs_flat, 

229 attrs=_attrs, ctx=ctx, name=name) 

230 if _execute.must_record_gradient(): 

231 _execute.record_gradient( 

232 "ComputeAccidentalHits", _inputs_flat, _attrs, _result) 

233 _result = _ComputeAccidentalHitsOutput._make(_result) 

234 return _result 

235 

236_FixedUnigramCandidateSamplerOutput = collections.namedtuple( 

237 "FixedUnigramCandidateSampler", 

238 ["sampled_candidates", "true_expected_count", "sampled_expected_count"]) 

239 

240 

241def fixed_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, vocab_file="", distortion=1, num_reserved_ids=0, num_shards=1, shard=0, unigrams=[], seed=0, seed2=0, name=None): 

242 r"""Generates labels for candidate sampling with a learned unigram distribution. 

243 

244 A unigram sampler could use a fixed unigram distribution read from a 

245 file or passed in as an in-memory array instead of building up the distribution 

246 from data on the fly. There is also an option to skew the distribution by 

247 applying a distortion power to the weights. 

248 

249 The vocabulary file should be in CSV-like format, with the last field 

250 being the weight associated with the word. 

251 

252 For each batch, this op picks a single set of sampled candidate labels. 

253 

254 The advantages of sampling candidates per-batch are simplicity and the 

255 possibility of efficient dense matrix multiplication. The disadvantage is that 

256 the sampled candidates must be chosen independently of the context and of the 

257 true labels. 

258 

259 Args: 

260 true_classes: A `Tensor` of type `int64`. 

261 A batch_size * num_true matrix, in which each row contains the 

262 IDs of the num_true target_classes in the corresponding original label. 

263 num_true: An `int` that is `>= 1`. Number of true labels per context. 

264 num_sampled: An `int` that is `>= 1`. 

265 Number of candidates to randomly sample. 

266 unique: A `bool`. 

267 If unique is true, we sample with rejection, so that all sampled 

268 candidates in a batch are unique. This requires some approximation to 

269 estimate the post-rejection sampling probabilities. 

270 range_max: An `int` that is `>= 1`. 

271 The sampler will sample integers from the interval [0, range_max). 

272 vocab_file: An optional `string`. Defaults to `""`. 

273 Each valid line in this file (which should have a CSV-like format) 

274 corresponds to a valid word ID. IDs are in sequential order, starting from 

275 num_reserved_ids. The last entry in each line is expected to be a value 

276 corresponding to the count or relative probability. Exactly one of vocab_file 

277 and unigrams needs to be passed to this op. 

278 distortion: An optional `float`. Defaults to `1`. 

279 The distortion is used to skew the unigram probability distribution. 

280 Each weight is first raised to the distortion's power before adding to the 

281 internal unigram distribution. As a result, distortion = 1.0 gives regular 

282 unigram sampling (as defined by the vocab file), and distortion = 0.0 gives 

283 a uniform distribution. 

284 num_reserved_ids: An optional `int`. Defaults to `0`. 

285 Optionally some reserved IDs can be added in the range [0, 

286 ..., num_reserved_ids) by the users. One use case is that a special unknown 

287 word token is used as ID 0. These IDs will have a sampling probability of 0. 

288 num_shards: An optional `int` that is `>= 1`. Defaults to `1`. 

289 A sampler can be used to sample from a subset of the original range 

290 in order to speed up the whole computation through parallelism. This parameter 

291 (together with 'shard') indicates the number of partitions that are being 

292 used in the overall computation. 

293 shard: An optional `int` that is `>= 0`. Defaults to `0`. 

294 A sampler can be used to sample from a subset of the original range 

295 in order to speed up the whole computation through parallelism. This parameter 

296 (together with 'num_shards') indicates the particular partition number of a 

297 sampler op, when partitioning is being used. 

298 unigrams: An optional list of `floats`. Defaults to `[]`. 

299 A list of unigram counts or probabilities, one per ID in sequential 

300 order. Exactly one of vocab_file and unigrams should be passed to this op. 

301 seed: An optional `int`. Defaults to `0`. 

302 If either seed or seed2 are set to be non-zero, the random number 

303 generator is seeded by the given seed. Otherwise, it is seeded by a 

304 random seed. 

305 seed2: An optional `int`. Defaults to `0`. 

306 An second seed to avoid seed collision. 

307 name: A name for the operation (optional). 

308 

309 Returns: 

310 A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count). 

311 

312 sampled_candidates: A `Tensor` of type `int64`. 

313 true_expected_count: A `Tensor` of type `float32`. 

314 sampled_expected_count: A `Tensor` of type `float32`. 

315 """ 

316 _ctx = _context._context or _context.context() 

317 tld = _ctx._thread_local_data 

318 if tld.is_eager: 

319 try: 

320 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

321 _ctx, "FixedUnigramCandidateSampler", name, true_classes, "num_true", 

322 num_true, "num_sampled", num_sampled, "unique", unique, "range_max", 

323 range_max, "vocab_file", vocab_file, "distortion", distortion, 

324 "num_reserved_ids", num_reserved_ids, "num_shards", num_shards, 

325 "shard", shard, "unigrams", unigrams, "seed", seed, "seed2", seed2) 

326 _result = _FixedUnigramCandidateSamplerOutput._make(_result) 

327 return _result 

328 except _core._NotOkStatusException as e: 

329 _ops.raise_from_not_ok_status(e, name) 

330 except _core._FallbackException: 

331 pass 

332 try: 

333 return fixed_unigram_candidate_sampler_eager_fallback( 

334 true_classes, num_true=num_true, num_sampled=num_sampled, 

335 unique=unique, range_max=range_max, vocab_file=vocab_file, 

336 distortion=distortion, num_reserved_ids=num_reserved_ids, 

337 num_shards=num_shards, shard=shard, unigrams=unigrams, seed=seed, 

338 seed2=seed2, name=name, ctx=_ctx) 

339 except _core._SymbolicException: 

340 pass # Add nodes to the TensorFlow graph. 

341 # Add nodes to the TensorFlow graph. 

342 num_true = _execute.make_int(num_true, "num_true") 

343 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

344 unique = _execute.make_bool(unique, "unique") 

345 range_max = _execute.make_int(range_max, "range_max") 

346 if vocab_file is None: 

347 vocab_file = "" 

348 vocab_file = _execute.make_str(vocab_file, "vocab_file") 

349 if distortion is None: 

350 distortion = 1 

351 distortion = _execute.make_float(distortion, "distortion") 

352 if num_reserved_ids is None: 

353 num_reserved_ids = 0 

354 num_reserved_ids = _execute.make_int(num_reserved_ids, "num_reserved_ids") 

355 if num_shards is None: 

356 num_shards = 1 

357 num_shards = _execute.make_int(num_shards, "num_shards") 

358 if shard is None: 

359 shard = 0 

360 shard = _execute.make_int(shard, "shard") 

361 if unigrams is None: 

362 unigrams = [] 

363 if not isinstance(unigrams, (list, tuple)): 

364 raise TypeError( 

365 "Expected list for 'unigrams' argument to " 

366 "'fixed_unigram_candidate_sampler' Op, not %r." % unigrams) 

367 unigrams = [_execute.make_float(_f, "unigrams") for _f in unigrams] 

368 if seed is None: 

369 seed = 0 

370 seed = _execute.make_int(seed, "seed") 

371 if seed2 is None: 

372 seed2 = 0 

373 seed2 = _execute.make_int(seed2, "seed2") 

374 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

375 "FixedUnigramCandidateSampler", true_classes=true_classes, 

376 num_true=num_true, 

377 num_sampled=num_sampled, 

378 unique=unique, range_max=range_max, 

379 vocab_file=vocab_file, 

380 distortion=distortion, 

381 num_reserved_ids=num_reserved_ids, 

382 num_shards=num_shards, shard=shard, 

383 unigrams=unigrams, seed=seed, 

384 seed2=seed2, name=name) 

385 _result = _outputs[:] 

386 if _execute.must_record_gradient(): 

387 _attrs = ("num_true", _op._get_attr_int("num_true"), "num_sampled", 

388 _op._get_attr_int("num_sampled"), "unique", 

389 _op._get_attr_bool("unique"), "range_max", 

390 _op._get_attr_int("range_max"), "vocab_file", 

391 _op.get_attr("vocab_file"), "distortion", 

392 _op.get_attr("distortion"), "num_reserved_ids", 

393 _op._get_attr_int("num_reserved_ids"), "num_shards", 

394 _op._get_attr_int("num_shards"), "shard", 

395 _op._get_attr_int("shard"), "unigrams", 

396 _op.get_attr("unigrams"), "seed", _op._get_attr_int("seed"), 

397 "seed2", _op._get_attr_int("seed2")) 

398 _inputs_flat = _op.inputs 

399 _execute.record_gradient( 

400 "FixedUnigramCandidateSampler", _inputs_flat, _attrs, _result) 

401 _result = _FixedUnigramCandidateSamplerOutput._make(_result) 

402 return _result 

403 

404FixedUnigramCandidateSampler = tf_export("raw_ops.FixedUnigramCandidateSampler")(_ops.to_raw_op(fixed_unigram_candidate_sampler)) 

405 

406 

407def fixed_unigram_candidate_sampler_eager_fallback(true_classes, num_true, num_sampled, unique, range_max, vocab_file, distortion, num_reserved_ids, num_shards, shard, unigrams, seed, seed2, name, ctx): 

408 num_true = _execute.make_int(num_true, "num_true") 

409 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

410 unique = _execute.make_bool(unique, "unique") 

411 range_max = _execute.make_int(range_max, "range_max") 

412 if vocab_file is None: 

413 vocab_file = "" 

414 vocab_file = _execute.make_str(vocab_file, "vocab_file") 

415 if distortion is None: 

416 distortion = 1 

417 distortion = _execute.make_float(distortion, "distortion") 

418 if num_reserved_ids is None: 

419 num_reserved_ids = 0 

420 num_reserved_ids = _execute.make_int(num_reserved_ids, "num_reserved_ids") 

421 if num_shards is None: 

422 num_shards = 1 

423 num_shards = _execute.make_int(num_shards, "num_shards") 

424 if shard is None: 

425 shard = 0 

426 shard = _execute.make_int(shard, "shard") 

427 if unigrams is None: 

428 unigrams = [] 

429 if not isinstance(unigrams, (list, tuple)): 

430 raise TypeError( 

431 "Expected list for 'unigrams' argument to " 

432 "'fixed_unigram_candidate_sampler' Op, not %r." % unigrams) 

433 unigrams = [_execute.make_float(_f, "unigrams") for _f in unigrams] 

434 if seed is None: 

435 seed = 0 

436 seed = _execute.make_int(seed, "seed") 

437 if seed2 is None: 

438 seed2 = 0 

439 seed2 = _execute.make_int(seed2, "seed2") 

440 true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64) 

441 _inputs_flat = [true_classes] 

442 _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique", 

443 unique, "range_max", range_max, "vocab_file", vocab_file, "distortion", 

444 distortion, "num_reserved_ids", num_reserved_ids, "num_shards", num_shards, 

445 "shard", shard, "unigrams", unigrams, "seed", seed, "seed2", seed2) 

446 _result = _execute.execute(b"FixedUnigramCandidateSampler", 3, 

447 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

448 name=name) 

449 if _execute.must_record_gradient(): 

450 _execute.record_gradient( 

451 "FixedUnigramCandidateSampler", _inputs_flat, _attrs, _result) 

452 _result = _FixedUnigramCandidateSamplerOutput._make(_result) 

453 return _result 

454 

455_LearnedUnigramCandidateSamplerOutput = collections.namedtuple( 

456 "LearnedUnigramCandidateSampler", 

457 ["sampled_candidates", "true_expected_count", "sampled_expected_count"]) 

458 

459 

460def learned_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=0, seed2=0, name=None): 

461 r"""Generates labels for candidate sampling with a learned unigram distribution. 

462 

463 See explanations of candidate sampling and the data formats at 

464 go/candidate-sampling. 

465 

466 For each batch, this op picks a single set of sampled candidate labels. 

467 

468 The advantages of sampling candidates per-batch are simplicity and the 

469 possibility of efficient dense matrix multiplication. The disadvantage is that 

470 the sampled candidates must be chosen independently of the context and of the 

471 true labels. 

472 

473 Args: 

474 true_classes: A `Tensor` of type `int64`. 

475 A batch_size * num_true matrix, in which each row contains the 

476 IDs of the num_true target_classes in the corresponding original label. 

477 num_true: An `int` that is `>= 1`. Number of true labels per context. 

478 num_sampled: An `int` that is `>= 1`. 

479 Number of candidates to randomly sample. 

480 unique: A `bool`. 

481 If unique is true, we sample with rejection, so that all sampled 

482 candidates in a batch are unique. This requires some approximation to 

483 estimate the post-rejection sampling probabilities. 

484 range_max: An `int` that is `>= 1`. 

485 The sampler will sample integers from the interval [0, range_max). 

486 seed: An optional `int`. Defaults to `0`. 

487 If either seed or seed2 are set to be non-zero, the random number 

488 generator is seeded by the given seed. Otherwise, it is seeded by a 

489 random seed. 

490 seed2: An optional `int`. Defaults to `0`. 

491 An second seed to avoid seed collision. 

492 name: A name for the operation (optional). 

493 

494 Returns: 

495 A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count). 

496 

497 sampled_candidates: A `Tensor` of type `int64`. 

498 true_expected_count: A `Tensor` of type `float32`. 

499 sampled_expected_count: A `Tensor` of type `float32`. 

500 """ 

501 _ctx = _context._context or _context.context() 

502 tld = _ctx._thread_local_data 

503 if tld.is_eager: 

504 try: 

505 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

506 _ctx, "LearnedUnigramCandidateSampler", name, true_classes, 

507 "num_true", num_true, "num_sampled", num_sampled, "unique", unique, 

508 "range_max", range_max, "seed", seed, "seed2", seed2) 

509 _result = _LearnedUnigramCandidateSamplerOutput._make(_result) 

510 return _result 

511 except _core._NotOkStatusException as e: 

512 _ops.raise_from_not_ok_status(e, name) 

513 except _core._FallbackException: 

514 pass 

515 try: 

516 return learned_unigram_candidate_sampler_eager_fallback( 

517 true_classes, num_true=num_true, num_sampled=num_sampled, 

518 unique=unique, range_max=range_max, seed=seed, seed2=seed2, 

519 name=name, ctx=_ctx) 

520 except _core._SymbolicException: 

521 pass # Add nodes to the TensorFlow graph. 

522 # Add nodes to the TensorFlow graph. 

523 num_true = _execute.make_int(num_true, "num_true") 

524 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

525 unique = _execute.make_bool(unique, "unique") 

526 range_max = _execute.make_int(range_max, "range_max") 

527 if seed is None: 

528 seed = 0 

529 seed = _execute.make_int(seed, "seed") 

530 if seed2 is None: 

531 seed2 = 0 

532 seed2 = _execute.make_int(seed2, "seed2") 

533 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

534 "LearnedUnigramCandidateSampler", true_classes=true_classes, 

535 num_true=num_true, 

536 num_sampled=num_sampled, 

537 unique=unique, range_max=range_max, 

538 seed=seed, seed2=seed2, name=name) 

539 _result = _outputs[:] 

540 if _execute.must_record_gradient(): 

541 _attrs = ("num_true", _op._get_attr_int("num_true"), "num_sampled", 

542 _op._get_attr_int("num_sampled"), "unique", 

543 _op._get_attr_bool("unique"), "range_max", 

544 _op._get_attr_int("range_max"), "seed", 

545 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2")) 

546 _inputs_flat = _op.inputs 

547 _execute.record_gradient( 

548 "LearnedUnigramCandidateSampler", _inputs_flat, _attrs, _result) 

549 _result = _LearnedUnigramCandidateSamplerOutput._make(_result) 

550 return _result 

551 

552LearnedUnigramCandidateSampler = tf_export("raw_ops.LearnedUnigramCandidateSampler")(_ops.to_raw_op(learned_unigram_candidate_sampler)) 

553 

554 

555def learned_unigram_candidate_sampler_eager_fallback(true_classes, num_true, num_sampled, unique, range_max, seed, seed2, name, ctx): 

556 num_true = _execute.make_int(num_true, "num_true") 

557 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

558 unique = _execute.make_bool(unique, "unique") 

559 range_max = _execute.make_int(range_max, "range_max") 

560 if seed is None: 

561 seed = 0 

562 seed = _execute.make_int(seed, "seed") 

563 if seed2 is None: 

564 seed2 = 0 

565 seed2 = _execute.make_int(seed2, "seed2") 

566 true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64) 

567 _inputs_flat = [true_classes] 

568 _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique", 

569 unique, "range_max", range_max, "seed", seed, "seed2", seed2) 

570 _result = _execute.execute(b"LearnedUnigramCandidateSampler", 3, 

571 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

572 name=name) 

573 if _execute.must_record_gradient(): 

574 _execute.record_gradient( 

575 "LearnedUnigramCandidateSampler", _inputs_flat, _attrs, _result) 

576 _result = _LearnedUnigramCandidateSamplerOutput._make(_result) 

577 return _result 

578 

579_LogUniformCandidateSamplerOutput = collections.namedtuple( 

580 "LogUniformCandidateSampler", 

581 ["sampled_candidates", "true_expected_count", "sampled_expected_count"]) 

582 

583 

584def log_uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=0, seed2=0, name=None): 

585 r"""Generates labels for candidate sampling with a log-uniform distribution. 

586 

587 See explanations of candidate sampling and the data formats at 

588 go/candidate-sampling. 

589 

590 For each batch, this op picks a single set of sampled candidate labels. 

591 

592 The advantages of sampling candidates per-batch are simplicity and the 

593 possibility of efficient dense matrix multiplication. The disadvantage is that 

594 the sampled candidates must be chosen independently of the context and of the 

595 true labels. 

596 

597 Args: 

598 true_classes: A `Tensor` of type `int64`. 

599 A batch_size * num_true matrix, in which each row contains the 

600 IDs of the num_true target_classes in the corresponding original label. 

601 num_true: An `int` that is `>= 1`. Number of true labels per context. 

602 num_sampled: An `int` that is `>= 1`. 

603 Number of candidates to randomly sample. 

604 unique: A `bool`. 

605 If unique is true, we sample with rejection, so that all sampled 

606 candidates in a batch are unique. This requires some approximation to 

607 estimate the post-rejection sampling probabilities. 

608 range_max: An `int` that is `>= 1`. 

609 The sampler will sample integers from the interval [0, range_max). 

610 seed: An optional `int`. Defaults to `0`. 

611 If either seed or seed2 are set to be non-zero, the random number 

612 generator is seeded by the given seed. Otherwise, it is seeded by a 

613 random seed. 

614 seed2: An optional `int`. Defaults to `0`. 

615 An second seed to avoid seed collision. 

616 name: A name for the operation (optional). 

617 

618 Returns: 

619 A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count). 

620 

621 sampled_candidates: A `Tensor` of type `int64`. 

622 true_expected_count: A `Tensor` of type `float32`. 

623 sampled_expected_count: A `Tensor` of type `float32`. 

624 """ 

625 _ctx = _context._context or _context.context() 

626 tld = _ctx._thread_local_data 

627 if tld.is_eager: 

628 try: 

629 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

630 _ctx, "LogUniformCandidateSampler", name, true_classes, "num_true", 

631 num_true, "num_sampled", num_sampled, "unique", unique, "range_max", 

632 range_max, "seed", seed, "seed2", seed2) 

633 _result = _LogUniformCandidateSamplerOutput._make(_result) 

634 return _result 

635 except _core._NotOkStatusException as e: 

636 _ops.raise_from_not_ok_status(e, name) 

637 except _core._FallbackException: 

638 pass 

639 try: 

640 return log_uniform_candidate_sampler_eager_fallback( 

641 true_classes, num_true=num_true, num_sampled=num_sampled, 

642 unique=unique, range_max=range_max, seed=seed, seed2=seed2, 

643 name=name, ctx=_ctx) 

644 except _core._SymbolicException: 

645 pass # Add nodes to the TensorFlow graph. 

646 # Add nodes to the TensorFlow graph. 

647 num_true = _execute.make_int(num_true, "num_true") 

648 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

649 unique = _execute.make_bool(unique, "unique") 

650 range_max = _execute.make_int(range_max, "range_max") 

651 if seed is None: 

652 seed = 0 

653 seed = _execute.make_int(seed, "seed") 

654 if seed2 is None: 

655 seed2 = 0 

656 seed2 = _execute.make_int(seed2, "seed2") 

657 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

658 "LogUniformCandidateSampler", true_classes=true_classes, 

659 num_true=num_true, 

660 num_sampled=num_sampled, unique=unique, 

661 range_max=range_max, seed=seed, 

662 seed2=seed2, name=name) 

663 _result = _outputs[:] 

664 if _execute.must_record_gradient(): 

665 _attrs = ("num_true", _op._get_attr_int("num_true"), "num_sampled", 

666 _op._get_attr_int("num_sampled"), "unique", 

667 _op._get_attr_bool("unique"), "range_max", 

668 _op._get_attr_int("range_max"), "seed", 

669 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2")) 

670 _inputs_flat = _op.inputs 

671 _execute.record_gradient( 

672 "LogUniformCandidateSampler", _inputs_flat, _attrs, _result) 

673 _result = _LogUniformCandidateSamplerOutput._make(_result) 

674 return _result 

675 

676LogUniformCandidateSampler = tf_export("raw_ops.LogUniformCandidateSampler")(_ops.to_raw_op(log_uniform_candidate_sampler)) 

677 

678 

679def log_uniform_candidate_sampler_eager_fallback(true_classes, num_true, num_sampled, unique, range_max, seed, seed2, name, ctx): 

680 num_true = _execute.make_int(num_true, "num_true") 

681 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

682 unique = _execute.make_bool(unique, "unique") 

683 range_max = _execute.make_int(range_max, "range_max") 

684 if seed is None: 

685 seed = 0 

686 seed = _execute.make_int(seed, "seed") 

687 if seed2 is None: 

688 seed2 = 0 

689 seed2 = _execute.make_int(seed2, "seed2") 

690 true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64) 

691 _inputs_flat = [true_classes] 

692 _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique", 

693 unique, "range_max", range_max, "seed", seed, "seed2", seed2) 

694 _result = _execute.execute(b"LogUniformCandidateSampler", 3, 

695 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

696 name=name) 

697 if _execute.must_record_gradient(): 

698 _execute.record_gradient( 

699 "LogUniformCandidateSampler", _inputs_flat, _attrs, _result) 

700 _result = _LogUniformCandidateSamplerOutput._make(_result) 

701 return _result 

702 

703_ThreadUnsafeUnigramCandidateSamplerOutput = collections.namedtuple( 

704 "ThreadUnsafeUnigramCandidateSampler", 

705 ["sampled_candidates", "true_expected_count", "sampled_expected_count"]) 

706 

707 

708def thread_unsafe_unigram_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=0, seed2=0, name=None): 

709 r"""Generates labels for candidate sampling with a learned unigram distribution. 

710 

711 See explanations of candidate sampling and the data formats at 

712 go/candidate-sampling. 

713 

714 For each batch, this op picks a single set of sampled candidate labels. 

715 

716 The advantages of sampling candidates per-batch are simplicity and the 

717 possibility of efficient dense matrix multiplication. The disadvantage is that 

718 the sampled candidates must be chosen independently of the context and of the 

719 true labels. 

720 

721 Args: 

722 true_classes: A `Tensor` of type `int64`. 

723 A batch_size * num_true matrix, in which each row contains the 

724 IDs of the num_true target_classes in the corresponding original label. 

725 num_true: An `int` that is `>= 1`. Number of true labels per context. 

726 num_sampled: An `int` that is `>= 1`. 

727 Number of candidates to randomly sample. 

728 unique: A `bool`. 

729 If unique is true, we sample with rejection, so that all sampled 

730 candidates in a batch are unique. This requires some approximation to 

731 estimate the post-rejection sampling probabilities. 

732 range_max: An `int` that is `>= 1`. 

733 The sampler will sample integers from the interval [0, range_max). 

734 seed: An optional `int`. Defaults to `0`. 

735 If either seed or seed2 are set to be non-zero, the random number 

736 generator is seeded by the given seed. Otherwise, it is seeded by a 

737 random seed. 

738 seed2: An optional `int`. Defaults to `0`. 

739 An second seed to avoid seed collision. 

740 name: A name for the operation (optional). 

741 

742 Returns: 

743 A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count). 

744 

745 sampled_candidates: A `Tensor` of type `int64`. 

746 true_expected_count: A `Tensor` of type `float32`. 

747 sampled_expected_count: A `Tensor` of type `float32`. 

748 """ 

749 _ctx = _context._context or _context.context() 

750 tld = _ctx._thread_local_data 

751 if tld.is_eager: 

752 try: 

753 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

754 _ctx, "ThreadUnsafeUnigramCandidateSampler", name, true_classes, 

755 "num_true", num_true, "num_sampled", num_sampled, "unique", unique, 

756 "range_max", range_max, "seed", seed, "seed2", seed2) 

757 _result = _ThreadUnsafeUnigramCandidateSamplerOutput._make(_result) 

758 return _result 

759 except _core._NotOkStatusException as e: 

760 _ops.raise_from_not_ok_status(e, name) 

761 except _core._FallbackException: 

762 pass 

763 try: 

764 return thread_unsafe_unigram_candidate_sampler_eager_fallback( 

765 true_classes, num_true=num_true, num_sampled=num_sampled, 

766 unique=unique, range_max=range_max, seed=seed, seed2=seed2, 

767 name=name, ctx=_ctx) 

768 except _core._SymbolicException: 

769 pass # Add nodes to the TensorFlow graph. 

770 # Add nodes to the TensorFlow graph. 

771 num_true = _execute.make_int(num_true, "num_true") 

772 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

773 unique = _execute.make_bool(unique, "unique") 

774 range_max = _execute.make_int(range_max, "range_max") 

775 if seed is None: 

776 seed = 0 

777 seed = _execute.make_int(seed, "seed") 

778 if seed2 is None: 

779 seed2 = 0 

780 seed2 = _execute.make_int(seed2, "seed2") 

781 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

782 "ThreadUnsafeUnigramCandidateSampler", true_classes=true_classes, 

783 num_true=num_true, 

784 num_sampled=num_sampled, 

785 unique=unique, 

786 range_max=range_max, seed=seed, 

787 seed2=seed2, name=name) 

788 _result = _outputs[:] 

789 if _execute.must_record_gradient(): 

790 _attrs = ("num_true", _op._get_attr_int("num_true"), "num_sampled", 

791 _op._get_attr_int("num_sampled"), "unique", 

792 _op._get_attr_bool("unique"), "range_max", 

793 _op._get_attr_int("range_max"), "seed", 

794 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2")) 

795 _inputs_flat = _op.inputs 

796 _execute.record_gradient( 

797 "ThreadUnsafeUnigramCandidateSampler", _inputs_flat, _attrs, _result) 

798 _result = _ThreadUnsafeUnigramCandidateSamplerOutput._make(_result) 

799 return _result 

800 

801ThreadUnsafeUnigramCandidateSampler = tf_export("raw_ops.ThreadUnsafeUnigramCandidateSampler")(_ops.to_raw_op(thread_unsafe_unigram_candidate_sampler)) 

802 

803 

804def thread_unsafe_unigram_candidate_sampler_eager_fallback(true_classes, num_true, num_sampled, unique, range_max, seed, seed2, name, ctx): 

805 num_true = _execute.make_int(num_true, "num_true") 

806 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

807 unique = _execute.make_bool(unique, "unique") 

808 range_max = _execute.make_int(range_max, "range_max") 

809 if seed is None: 

810 seed = 0 

811 seed = _execute.make_int(seed, "seed") 

812 if seed2 is None: 

813 seed2 = 0 

814 seed2 = _execute.make_int(seed2, "seed2") 

815 true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64) 

816 _inputs_flat = [true_classes] 

817 _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique", 

818 unique, "range_max", range_max, "seed", seed, "seed2", seed2) 

819 _result = _execute.execute(b"ThreadUnsafeUnigramCandidateSampler", 3, 

820 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

821 name=name) 

822 if _execute.must_record_gradient(): 

823 _execute.record_gradient( 

824 "ThreadUnsafeUnigramCandidateSampler", _inputs_flat, _attrs, _result) 

825 _result = _ThreadUnsafeUnigramCandidateSamplerOutput._make(_result) 

826 return _result 

827 

828_UniformCandidateSamplerOutput = collections.namedtuple( 

829 "UniformCandidateSampler", 

830 ["sampled_candidates", "true_expected_count", "sampled_expected_count"]) 

831 

832 

833def uniform_candidate_sampler(true_classes, num_true, num_sampled, unique, range_max, seed=0, seed2=0, name=None): 

834 r"""Generates labels for candidate sampling with a uniform distribution. 

835 

836 See explanations of candidate sampling and the data formats at 

837 go/candidate-sampling. 

838 

839 For each batch, this op picks a single set of sampled candidate labels. 

840 

841 The advantages of sampling candidates per-batch are simplicity and the 

842 possibility of efficient dense matrix multiplication. The disadvantage is that 

843 the sampled candidates must be chosen independently of the context and of the 

844 true labels. 

845 

846 Args: 

847 true_classes: A `Tensor` of type `int64`. 

848 A batch_size * num_true matrix, in which each row contains the 

849 IDs of the num_true target_classes in the corresponding original label. 

850 num_true: An `int` that is `>= 1`. Number of true labels per context. 

851 num_sampled: An `int` that is `>= 1`. 

852 Number of candidates to randomly sample. 

853 unique: A `bool`. 

854 If unique is true, we sample with rejection, so that all sampled 

855 candidates in a batch are unique. This requires some approximation to 

856 estimate the post-rejection sampling probabilities. 

857 range_max: An `int` that is `>= 1`. 

858 The sampler will sample integers from the interval [0, range_max). 

859 seed: An optional `int`. Defaults to `0`. 

860 If either seed or seed2 are set to be non-zero, the random number 

861 generator is seeded by the given seed. Otherwise, it is seeded by a 

862 random seed. 

863 seed2: An optional `int`. Defaults to `0`. 

864 An second seed to avoid seed collision. 

865 name: A name for the operation (optional). 

866 

867 Returns: 

868 A tuple of `Tensor` objects (sampled_candidates, true_expected_count, sampled_expected_count). 

869 

870 sampled_candidates: A `Tensor` of type `int64`. 

871 true_expected_count: A `Tensor` of type `float32`. 

872 sampled_expected_count: A `Tensor` of type `float32`. 

873 """ 

874 _ctx = _context._context or _context.context() 

875 tld = _ctx._thread_local_data 

876 if tld.is_eager: 

877 try: 

878 _result = pywrap_tfe.TFE_Py_FastPathExecute( 

879 _ctx, "UniformCandidateSampler", name, true_classes, "num_true", 

880 num_true, "num_sampled", num_sampled, "unique", unique, "range_max", 

881 range_max, "seed", seed, "seed2", seed2) 

882 _result = _UniformCandidateSamplerOutput._make(_result) 

883 return _result 

884 except _core._NotOkStatusException as e: 

885 _ops.raise_from_not_ok_status(e, name) 

886 except _core._FallbackException: 

887 pass 

888 try: 

889 return uniform_candidate_sampler_eager_fallback( 

890 true_classes, num_true=num_true, num_sampled=num_sampled, 

891 unique=unique, range_max=range_max, seed=seed, seed2=seed2, 

892 name=name, ctx=_ctx) 

893 except _core._SymbolicException: 

894 pass # Add nodes to the TensorFlow graph. 

895 # Add nodes to the TensorFlow graph. 

896 num_true = _execute.make_int(num_true, "num_true") 

897 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

898 unique = _execute.make_bool(unique, "unique") 

899 range_max = _execute.make_int(range_max, "range_max") 

900 if seed is None: 

901 seed = 0 

902 seed = _execute.make_int(seed, "seed") 

903 if seed2 is None: 

904 seed2 = 0 

905 seed2 = _execute.make_int(seed2, "seed2") 

906 _, _, _op, _outputs = _op_def_library._apply_op_helper( 

907 "UniformCandidateSampler", true_classes=true_classes, 

908 num_true=num_true, num_sampled=num_sampled, 

909 unique=unique, range_max=range_max, 

910 seed=seed, seed2=seed2, name=name) 

911 _result = _outputs[:] 

912 if _execute.must_record_gradient(): 

913 _attrs = ("num_true", _op._get_attr_int("num_true"), "num_sampled", 

914 _op._get_attr_int("num_sampled"), "unique", 

915 _op._get_attr_bool("unique"), "range_max", 

916 _op._get_attr_int("range_max"), "seed", 

917 _op._get_attr_int("seed"), "seed2", _op._get_attr_int("seed2")) 

918 _inputs_flat = _op.inputs 

919 _execute.record_gradient( 

920 "UniformCandidateSampler", _inputs_flat, _attrs, _result) 

921 _result = _UniformCandidateSamplerOutput._make(_result) 

922 return _result 

923 

924UniformCandidateSampler = tf_export("raw_ops.UniformCandidateSampler")(_ops.to_raw_op(uniform_candidate_sampler)) 

925 

926 

927def uniform_candidate_sampler_eager_fallback(true_classes, num_true, num_sampled, unique, range_max, seed, seed2, name, ctx): 

928 num_true = _execute.make_int(num_true, "num_true") 

929 num_sampled = _execute.make_int(num_sampled, "num_sampled") 

930 unique = _execute.make_bool(unique, "unique") 

931 range_max = _execute.make_int(range_max, "range_max") 

932 if seed is None: 

933 seed = 0 

934 seed = _execute.make_int(seed, "seed") 

935 if seed2 is None: 

936 seed2 = 0 

937 seed2 = _execute.make_int(seed2, "seed2") 

938 true_classes = _ops.convert_to_tensor(true_classes, _dtypes.int64) 

939 _inputs_flat = [true_classes] 

940 _attrs = ("num_true", num_true, "num_sampled", num_sampled, "unique", 

941 unique, "range_max", range_max, "seed", seed, "seed2", seed2) 

942 _result = _execute.execute(b"UniformCandidateSampler", 3, 

943 inputs=_inputs_flat, attrs=_attrs, ctx=ctx, 

944 name=name) 

945 if _execute.must_record_gradient(): 

946 _execute.record_gradient( 

947 "UniformCandidateSampler", _inputs_flat, _attrs, _result) 

948 _result = _UniformCandidateSamplerOutput._make(_result) 

949 return _result 

950