Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/datasets/boston_housing.py: 27%

22 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15"""Boston housing price regression dataset.""" 

16 

17import numpy as np 

18 

19from keras.src.utils.data_utils import get_file 

20 

21# isort: off 

22from tensorflow.python.util.tf_export import keras_export 

23 

24 

25@keras_export("keras.datasets.boston_housing.load_data") 

26def load_data(path="boston_housing.npz", test_split=0.2, seed=113): 

27 """Loads the Boston Housing dataset. 

28 

29 This is a dataset taken from the StatLib library which is maintained at 

30 Carnegie Mellon University. 

31 

32 **WARNING:** This dataset has an ethical problem: the authors of this 

33 dataset included a variable, "B", that may appear to assume that racial 

34 self-segregation influences house prices. As such, we strongly discourage 

35 the use of this dataset, unless in the context of illustrating ethical 

36 issues in data science and machine learning. 

37 

38 Samples contain 13 attributes of houses at different locations around the 

39 Boston suburbs in the late 1970s. Targets are the median values of 

40 the houses at a location (in k$). 

41 

42 The attributes themselves are defined in the 

43 [StatLib website](http://lib.stat.cmu.edu/datasets/boston). 

44 

45 Args: 

46 path: path where to cache the dataset locally 

47 (relative to `~/.keras/datasets`). 

48 test_split: fraction of the data to reserve as test set. 

49 seed: Random seed for shuffling the data 

50 before computing the test split. 

51 

52 Returns: 

53 Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`. 

54 

55 **x_train, x_test**: numpy arrays with shape `(num_samples, 13)` 

56 containing either the training samples (for x_train), 

57 or test samples (for y_train). 

58 

59 **y_train, y_test**: numpy arrays of shape `(num_samples,)` containing the 

60 target scalars. The targets are float scalars typically between 10 and 

61 50 that represent the home prices in k$. 

62 """ 

63 assert 0 <= test_split < 1 

64 origin_folder = ( 

65 "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" 

66 ) 

67 path = get_file( 

68 path, 

69 origin=origin_folder + "boston_housing.npz", 

70 file_hash=( # noqa: E501 

71 "f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5" 

72 ), 

73 ) 

74 with np.load(path, allow_pickle=True) as f: 

75 x = f["x"] 

76 y = f["y"] 

77 

78 rng = np.random.RandomState(seed) 

79 indices = np.arange(len(x)) 

80 rng.shuffle(indices) 

81 x = x[indices] 

82 y = y[indices] 

83 

84 x_train = np.array(x[: int(len(x) * (1 - test_split))]) 

85 y_train = np.array(y[: int(len(x) * (1 - test_split))]) 

86 x_test = np.array(x[int(len(x) * (1 - test_split)) :]) 

87 y_test = np.array(y[int(len(x) * (1 - test_split)) :]) 

88 return (x_train, y_train), (x_test, y_test) 

89