Coverage for /pythoncovmergedfiles/medio/medio/src/fuzz_text_ops.py: 54%
48 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1###### Coverage stub
2import atexit
3import coverage
4cov = coverage.coverage(data_file='.coverage', cover_pylib=True)
5cov.start()
6# Register an exist handler that will print coverage
7def exit_handler():
8 cov.stop()
9 cov.save()
10atexit.register(exit_handler)
11####### End of coverage stub
12#!/usr/bin/python3
13# Copyright 2023 Google LLC
14#
15# Licensed under the Apache License, Version 2.0 (the "License");
16# you may not use this file except in compliance with the License.
17# You may obtain a copy of the License at
18#
19# http://www.apache.org/licenses/LICENSE-2.0
20#
21# Unless required by applicable law or agreed to in writing, software
22# distributed under the License is distributed on an "AS IS" BASIS,
23# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
24# See the License for the specific language governing permissions and
25# limitations under the License.
26#
27##########################################################################
28"""Module for hitting the native text modules"""
29import sys
30import atheris
33# Instrument selected imports rather than atheris.instrument_all() since the
34# python package will have a huge amount of code and instrument_all() will
35# take many minutes to complete.
36with atheris.instrument_imports():
37 import numpy as np
38 import tensorflow as tf
39 from tensorflow_addons import text
42@atheris.instrument_func
43def fuzz_parse_time(data):
44 fdp = atheris.FuzzedDataProvider(data)
46 formats = [
47 'SECOND', 'MILLISECOND', 'MICROSECOND', 'NANOSECOND',
48 fdp.ConsumeUnicodeNoSurrogates(fdp.ConsumeIntInRange(1, 32))
49 ]
50 try:
51 text.parse_time(time_string=fdp.ConsumeUnicodeNoSurrogates(
52 fdp.ConsumeIntInRange(1, 1024)),
53 time_format=fdp.ConsumeUnicodeNoSurrogates(
54 fdp.ConsumeIntInRange(1, 1024)),
55 output_unit=fdp.PickValueInList(formats))
56 except tf.errors.InvalidArgumentError:
57 pass
60@atheris.instrument_func
61def fuzz_skip_gram(data):
62 fdp = atheris.FuzzedDataProvider(data)
64 input_bytes = []
65 for idx in range(fdp.ConsumeIntInRange(1, 100)):
66 input_bytes.append(fdp.ConsumeBytes(fdp.ConsumeIntInRange(1, 64)))
67 input_tensor = tf.constant(input_bytes)
69 keys_inp = []
70 for idx in range(5):
71 keys_inp.append(fdp.ConsumeBytes(fdp.ConsumeIntInRange(1, 16)))
72 keys = tf.constant(keys_inp)
73 values = tf.constant([0, 1, 2, 3, 4], tf.dtypes.int64)
75 tf.lookup.StaticHashTable(tf.lookup.KeyValueTensorInitializer(keys, values),
76 -1)
77 no_table_output = text.skip_gram_ops._filter_input(
78 input_tensor=input_tensor,
79 vocab_freq_table=None,
80 vocab_min_count=None,
81 vocab_subsampling=None,
82 corpus_size=None,
83 seed=None,
84 )
87@atheris.instrument_func
88def TestOneInput(data):
89 fuzz_parse_time(data)
90 try:
91 fuzz_skip_gram(data)
92 except (tf.errors.FailedPreconditionError, UnicodeDecodeError):
93 pass
96def main():
97 atheris.Setup(sys.argv, TestOneInput)
98 atheris.Fuzz()
101if __name__ == "__main__":
102 main()