Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pandas/core/tools/numeric.py: 12%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

108 statements  

1from __future__ import annotations 

2 

3from typing import Literal 

4 

5import numpy as np 

6 

7from pandas._libs import lib 

8from pandas._typing import ( 

9 DateTimeErrorChoices, 

10 DtypeBackend, 

11 npt, 

12) 

13from pandas.util._validators import check_dtype_backend 

14 

15from pandas.core.dtypes.cast import maybe_downcast_numeric 

16from pandas.core.dtypes.common import ( 

17 ensure_object, 

18 is_bool_dtype, 

19 is_datetime_or_timedelta_dtype, 

20 is_decimal, 

21 is_integer_dtype, 

22 is_number, 

23 is_numeric_dtype, 

24 is_scalar, 

25 is_string_dtype, 

26 needs_i8_conversion, 

27) 

28from pandas.core.dtypes.generic import ( 

29 ABCIndex, 

30 ABCSeries, 

31) 

32 

33import pandas as pd 

34from pandas.core.arrays import BaseMaskedArray 

35from pandas.core.arrays.string_ import StringDtype 

36 

37 

38def to_numeric( 

39 arg, 

40 errors: DateTimeErrorChoices = "raise", 

41 downcast: Literal["integer", "signed", "unsigned", "float"] | None = None, 

42 dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, 

43): 

44 """ 

45 Convert argument to a numeric type. 

46 

47 The default return dtype is `float64` or `int64` 

48 depending on the data supplied. Use the `downcast` parameter 

49 to obtain other dtypes. 

50 

51 Please note that precision loss may occur if really large numbers 

52 are passed in. Due to the internal limitations of `ndarray`, if 

53 numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min) 

54 or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are 

55 passed in, it is very likely they will be converted to float so that 

56 they can be stored in an `ndarray`. These warnings apply similarly to 

57 `Series` since it internally leverages `ndarray`. 

58 

59 Parameters 

60 ---------- 

61 arg : scalar, list, tuple, 1-d array, or Series 

62 Argument to be converted. 

63 errors : {'ignore', 'raise', 'coerce'}, default 'raise' 

64 - If 'raise', then invalid parsing will raise an exception. 

65 - If 'coerce', then invalid parsing will be set as NaN. 

66 - If 'ignore', then invalid parsing will return the input. 

67 downcast : str, default None 

68 Can be 'integer', 'signed', 'unsigned', or 'float'. 

69 If not None, and if the data has been successfully cast to a 

70 numerical dtype (or if the data was numeric to begin with), 

71 downcast that resulting data to the smallest numerical dtype 

72 possible according to the following rules: 

73 

74 - 'integer' or 'signed': smallest signed int dtype (min.: np.int8) 

75 - 'unsigned': smallest unsigned int dtype (min.: np.uint8) 

76 - 'float': smallest float dtype (min.: np.float32) 

77 

78 As this behaviour is separate from the core conversion to 

79 numeric values, any errors raised during the downcasting 

80 will be surfaced regardless of the value of the 'errors' input. 

81 

82 In addition, downcasting will only occur if the size 

83 of the resulting data's dtype is strictly larger than 

84 the dtype it is to be cast to, so if none of the dtypes 

85 checked satisfy that specification, no downcasting will be 

86 performed on the data. 

87 dtype_backend : {"numpy_nullable", "pyarrow"}, defaults to NumPy backed DataFrames 

88 Which dtype_backend to use, e.g. whether a DataFrame should have NumPy 

89 arrays, nullable dtypes are used for all dtypes that have a nullable 

90 implementation when "numpy_nullable" is set, pyarrow is used for all 

91 dtypes if "pyarrow" is set. 

92 

93 The dtype_backends are still experimential. 

94 

95 .. versionadded:: 2.0 

96 

97 Returns 

98 ------- 

99 ret 

100 Numeric if parsing succeeded. 

101 Return type depends on input. Series if Series, otherwise ndarray. 

102 

103 See Also 

104 -------- 

105 DataFrame.astype : Cast argument to a specified dtype. 

106 to_datetime : Convert argument to datetime. 

107 to_timedelta : Convert argument to timedelta. 

108 numpy.ndarray.astype : Cast a numpy array to a specified type. 

109 DataFrame.convert_dtypes : Convert dtypes. 

110 

111 Examples 

112 -------- 

113 Take separate series and convert to numeric, coercing when told to 

114 

115 >>> s = pd.Series(['1.0', '2', -3]) 

116 >>> pd.to_numeric(s) 

117 0 1.0 

118 1 2.0 

119 2 -3.0 

120 dtype: float64 

121 >>> pd.to_numeric(s, downcast='float') 

122 0 1.0 

123 1 2.0 

124 2 -3.0 

125 dtype: float32 

126 >>> pd.to_numeric(s, downcast='signed') 

127 0 1 

128 1 2 

129 2 -3 

130 dtype: int8 

131 >>> s = pd.Series(['apple', '1.0', '2', -3]) 

132 >>> pd.to_numeric(s, errors='ignore') 

133 0 apple 

134 1 1.0 

135 2 2 

136 3 -3 

137 dtype: object 

138 >>> pd.to_numeric(s, errors='coerce') 

139 0 NaN 

140 1 1.0 

141 2 2.0 

142 3 -3.0 

143 dtype: float64 

144 

145 Downcasting of nullable integer and floating dtypes is supported: 

146 

147 >>> s = pd.Series([1, 2, 3], dtype="Int64") 

148 >>> pd.to_numeric(s, downcast="integer") 

149 0 1 

150 1 2 

151 2 3 

152 dtype: Int8 

153 >>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64") 

154 >>> pd.to_numeric(s, downcast="float") 

155 0 1.0 

156 1 2.1 

157 2 3.0 

158 dtype: Float32 

159 """ 

160 if downcast not in (None, "integer", "signed", "unsigned", "float"): 

161 raise ValueError("invalid downcasting method provided") 

162 

163 if errors not in ("ignore", "raise", "coerce"): 

164 raise ValueError("invalid error value specified") 

165 

166 check_dtype_backend(dtype_backend) 

167 

168 is_series = False 

169 is_index = False 

170 is_scalars = False 

171 

172 if isinstance(arg, ABCSeries): 

173 is_series = True 

174 values = arg.values 

175 elif isinstance(arg, ABCIndex): 

176 is_index = True 

177 if needs_i8_conversion(arg.dtype): 

178 values = arg.view("i8") 

179 else: 

180 values = arg.values 

181 elif isinstance(arg, (list, tuple)): 

182 values = np.array(arg, dtype="O") 

183 elif is_scalar(arg): 

184 if is_decimal(arg): 

185 return float(arg) 

186 if is_number(arg): 

187 return arg 

188 is_scalars = True 

189 values = np.array([arg], dtype="O") 

190 elif getattr(arg, "ndim", 1) > 1: 

191 raise TypeError("arg must be a list, tuple, 1-d array, or Series") 

192 else: 

193 values = arg 

194 

195 orig_values = values 

196 

197 # GH33013: for IntegerArray & FloatingArray extract non-null values for casting 

198 # save mask to reconstruct the full array after casting 

199 mask: npt.NDArray[np.bool_] | None = None 

200 if isinstance(values, BaseMaskedArray): 

201 mask = values._mask 

202 values = values._data[~mask] 

203 

204 values_dtype = getattr(values, "dtype", None) 

205 if isinstance(values_dtype, pd.ArrowDtype): 

206 mask = values.isna() 

207 values = values.dropna().to_numpy() 

208 new_mask: np.ndarray | None = None 

209 if is_numeric_dtype(values_dtype): 

210 pass 

211 elif is_datetime_or_timedelta_dtype(values_dtype): 

212 values = values.view(np.int64) 

213 else: 

214 values = ensure_object(values) 

215 coerce_numeric = errors not in ("ignore", "raise") 

216 try: 

217 values, new_mask = lib.maybe_convert_numeric( # type: ignore[call-overload] # noqa 

218 values, 

219 set(), 

220 coerce_numeric=coerce_numeric, 

221 convert_to_masked_nullable=dtype_backend is not lib.no_default 

222 or isinstance(values_dtype, StringDtype), 

223 ) 

224 except (ValueError, TypeError): 

225 if errors == "raise": 

226 raise 

227 values = orig_values 

228 

229 if new_mask is not None: 

230 # Remove unnecessary values, is expected later anyway and enables 

231 # downcasting 

232 values = values[~new_mask] 

233 elif ( 

234 dtype_backend is not lib.no_default 

235 and new_mask is None 

236 or isinstance(values_dtype, StringDtype) 

237 ): 

238 new_mask = np.zeros(values.shape, dtype=np.bool_) 

239 

240 # attempt downcast only if the data has been successfully converted 

241 # to a numerical dtype and if a downcast method has been specified 

242 if downcast is not None and is_numeric_dtype(values.dtype): 

243 typecodes: str | None = None 

244 

245 if downcast in ("integer", "signed"): 

246 typecodes = np.typecodes["Integer"] 

247 elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0): 

248 typecodes = np.typecodes["UnsignedInteger"] 

249 elif downcast == "float": 

250 typecodes = np.typecodes["Float"] 

251 

252 # pandas support goes only to np.float32, 

253 # as float dtypes smaller than that are 

254 # extremely rare and not well supported 

255 float_32_char = np.dtype(np.float32).char 

256 float_32_ind = typecodes.index(float_32_char) 

257 typecodes = typecodes[float_32_ind:] 

258 

259 if typecodes is not None: 

260 # from smallest to largest 

261 for typecode in typecodes: 

262 dtype = np.dtype(typecode) 

263 if dtype.itemsize <= values.dtype.itemsize: 

264 values = maybe_downcast_numeric(values, dtype) 

265 

266 # successful conversion 

267 if values.dtype == dtype: 

268 break 

269 

270 # GH33013: for IntegerArray, BooleanArray & FloatingArray need to reconstruct 

271 # masked array 

272 if (mask is not None or new_mask is not None) and not is_string_dtype(values.dtype): 

273 if mask is None or (new_mask is not None and new_mask.shape == mask.shape): 

274 # GH 52588 

275 mask = new_mask 

276 else: 

277 mask = mask.copy() 

278 assert isinstance(mask, np.ndarray) 

279 data = np.zeros(mask.shape, dtype=values.dtype) 

280 data[~mask] = values 

281 

282 from pandas.core.arrays import ( 

283 ArrowExtensionArray, 

284 BooleanArray, 

285 FloatingArray, 

286 IntegerArray, 

287 ) 

288 

289 klass: type[IntegerArray] | type[BooleanArray] | type[FloatingArray] 

290 if is_integer_dtype(data.dtype): 

291 klass = IntegerArray 

292 elif is_bool_dtype(data.dtype): 

293 klass = BooleanArray 

294 else: 

295 klass = FloatingArray 

296 values = klass(data, mask) 

297 

298 if dtype_backend == "pyarrow" or isinstance(values_dtype, pd.ArrowDtype): 

299 values = ArrowExtensionArray(values.__arrow_array__()) 

300 

301 if is_series: 

302 return arg._constructor(values, index=arg.index, name=arg.name) 

303 elif is_index: 

304 # because we want to coerce to numeric if possible, 

305 # do not use _shallow_copy 

306 return pd.Index(values, name=arg.name) 

307 elif is_scalars: 

308 return values[0] 

309 else: 

310 return values