Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/pandas/errors/__init__.py: 68%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

76 statements  

1""" 

2Expose public exceptions & warnings 

3""" 

4from __future__ import annotations 

5 

6import ctypes 

7 

8from pandas._config.config import OptionError 

9 

10from pandas._libs.tslibs import ( 

11 OutOfBoundsDatetime, 

12 OutOfBoundsTimedelta, 

13) 

14 

15from pandas.util.version import InvalidVersion 

16 

17 

18class IntCastingNaNError(ValueError): 

19 """ 

20 Exception raised when converting (``astype``) an array with NaN to an integer type. 

21 

22 Examples 

23 -------- 

24 >>> pd.DataFrame(np.array([[1, np.nan], [2, 3]]), dtype="i8") 

25 Traceback (most recent call last): 

26 IntCastingNaNError: Cannot convert non-finite values (NA or inf) to integer 

27 """ 

28 

29 

30class NullFrequencyError(ValueError): 

31 """ 

32 Exception raised when a ``freq`` cannot be null. 

33 

34 Particularly ``DatetimeIndex.shift``, ``TimedeltaIndex.shift``, 

35 ``PeriodIndex.shift``. 

36 

37 Examples 

38 -------- 

39 >>> df = pd.DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None) 

40 >>> df.shift(2) 

41 Traceback (most recent call last): 

42 NullFrequencyError: Cannot shift with no freq 

43 """ 

44 

45 

46class PerformanceWarning(Warning): 

47 """ 

48 Warning raised when there is a possible performance impact. 

49 

50 Examples 

51 -------- 

52 >>> df = pd.DataFrame({"jim": [0, 0, 1, 1], 

53 ... "joe": ["x", "x", "z", "y"], 

54 ... "jolie": [1, 2, 3, 4]}) 

55 >>> df = df.set_index(["jim", "joe"]) 

56 >>> df 

57 jolie 

58 jim joe 

59 0 x 1 

60 x 2 

61 1 z 3 

62 y 4 

63 >>> df.loc[(1, 'z')] # doctest: +SKIP 

64 # PerformanceWarning: indexing past lexsort depth may impact performance. 

65 df.loc[(1, 'z')] 

66 jolie 

67 jim joe 

68 1 z 3 

69 """ 

70 

71 

72class UnsupportedFunctionCall(ValueError): 

73 """ 

74 Exception raised when attempting to call a unsupported numpy function. 

75 

76 For example, ``np.cumsum(groupby_object)``. 

77 

78 Examples 

79 -------- 

80 >>> df = pd.DataFrame({"A": [0, 0, 1, 1], 

81 ... "B": ["x", "x", "z", "y"], 

82 ... "C": [1, 2, 3, 4]} 

83 ... ) 

84 >>> np.cumsum(df.groupby(["A"])) 

85 Traceback (most recent call last): 

86 UnsupportedFunctionCall: numpy operations are not valid with groupby. 

87 Use .groupby(...).cumsum() instead 

88 """ 

89 

90 

91class UnsortedIndexError(KeyError): 

92 """ 

93 Error raised when slicing a MultiIndex which has not been lexsorted. 

94 

95 Subclass of `KeyError`. 

96 

97 Examples 

98 -------- 

99 >>> df = pd.DataFrame({"cat": [0, 0, 1, 1], 

100 ... "color": ["white", "white", "brown", "black"], 

101 ... "lives": [4, 4, 3, 7]}, 

102 ... ) 

103 >>> df = df.set_index(["cat", "color"]) 

104 >>> df 

105 lives 

106 cat color 

107 0 white 4 

108 white 4 

109 1 brown 3 

110 black 7 

111 >>> df.loc[(0, "black"):(1, "white")] 

112 Traceback (most recent call last): 

113 UnsortedIndexError: 'Key length (2) was greater 

114 than MultiIndex lexsort depth (1)' 

115 """ 

116 

117 

118class ParserError(ValueError): 

119 """ 

120 Exception that is raised by an error encountered in parsing file contents. 

121 

122 This is a generic error raised for errors encountered when functions like 

123 `read_csv` or `read_html` are parsing contents of a file. 

124 

125 See Also 

126 -------- 

127 read_csv : Read CSV (comma-separated) file into a DataFrame. 

128 read_html : Read HTML table into a DataFrame. 

129 

130 Examples 

131 -------- 

132 >>> data = '''a,b,c 

133 ... cat,foo,bar 

134 ... dog,foo,"baz''' 

135 >>> from io import StringIO 

136 >>> pd.read_csv(StringIO(data), skipfooter=1, engine='python') 

137 Traceback (most recent call last): 

138 ParserError: ',' expected after '"'. Error could possibly be due 

139 to parsing errors in the skipped footer rows 

140 """ 

141 

142 

143class DtypeWarning(Warning): 

144 """ 

145 Warning raised when reading different dtypes in a column from a file. 

146 

147 Raised for a dtype incompatibility. This can happen whenever `read_csv` 

148 or `read_table` encounter non-uniform dtypes in a column(s) of a given 

149 CSV file. 

150 

151 See Also 

152 -------- 

153 read_csv : Read CSV (comma-separated) file into a DataFrame. 

154 read_table : Read general delimited file into a DataFrame. 

155 

156 Notes 

157 ----- 

158 This warning is issued when dealing with larger files because the dtype 

159 checking happens per chunk read. 

160 

161 Despite the warning, the CSV file is read with mixed types in a single 

162 column which will be an object type. See the examples below to better 

163 understand this issue. 

164 

165 Examples 

166 -------- 

167 This example creates and reads a large CSV file with a column that contains 

168 `int` and `str`. 

169 

170 >>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 + 

171 ... ['1'] * 100000), 

172 ... 'b': ['b'] * 300000}) # doctest: +SKIP 

173 >>> df.to_csv('test.csv', index=False) # doctest: +SKIP 

174 >>> df2 = pd.read_csv('test.csv') # doctest: +SKIP 

175 ... # DtypeWarning: Columns (0) have mixed types 

176 

177 Important to notice that ``df2`` will contain both `str` and `int` for the 

178 same input, '1'. 

179 

180 >>> df2.iloc[262140, 0] # doctest: +SKIP 

181 '1' 

182 >>> type(df2.iloc[262140, 0]) # doctest: +SKIP 

183 <class 'str'> 

184 >>> df2.iloc[262150, 0] # doctest: +SKIP 

185 1 

186 >>> type(df2.iloc[262150, 0]) # doctest: +SKIP 

187 <class 'int'> 

188 

189 One way to solve this issue is using the `dtype` parameter in the 

190 `read_csv` and `read_table` functions to explicit the conversion: 

191 

192 >>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str}) # doctest: +SKIP 

193 

194 No warning was issued. 

195 """ 

196 

197 

198class EmptyDataError(ValueError): 

199 """ 

200 Exception raised in ``pd.read_csv`` when empty data or header is encountered. 

201 

202 Examples 

203 -------- 

204 >>> from io import StringIO 

205 >>> empty = StringIO() 

206 >>> pd.read_csv(empty) 

207 Traceback (most recent call last): 

208 EmptyDataError: No columns to parse from file 

209 """ 

210 

211 

212class ParserWarning(Warning): 

213 """ 

214 Warning raised when reading a file that doesn't use the default 'c' parser. 

215 

216 Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change 

217 parsers, generally from the default 'c' parser to 'python'. 

218 

219 It happens due to a lack of support or functionality for parsing a 

220 particular attribute of a CSV file with the requested engine. 

221 

222 Currently, 'c' unsupported options include the following parameters: 

223 

224 1. `sep` other than a single character (e.g. regex separators) 

225 2. `skipfooter` higher than 0 

226 3. `sep=None` with `delim_whitespace=False` 

227 

228 The warning can be avoided by adding `engine='python'` as a parameter in 

229 `pd.read_csv` and `pd.read_table` methods. 

230 

231 See Also 

232 -------- 

233 pd.read_csv : Read CSV (comma-separated) file into DataFrame. 

234 pd.read_table : Read general delimited file into DataFrame. 

235 

236 Examples 

237 -------- 

238 Using a `sep` in `pd.read_csv` other than a single character: 

239 

240 >>> import io 

241 >>> csv = '''a;b;c 

242 ... 1;1,8 

243 ... 1;2,1''' 

244 >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP 

245 ... # ParserWarning: Falling back to the 'python' engine... 

246 

247 Adding `engine='python'` to `pd.read_csv` removes the Warning: 

248 

249 >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python') 

250 """ 

251 

252 

253class MergeError(ValueError): 

254 """ 

255 Exception raised when merging data. 

256 

257 Subclass of ``ValueError``. 

258 

259 Examples 

260 -------- 

261 >>> left = pd.DataFrame({"a": ["a", "b", "b", "d"], 

262 ... "b": ["cat", "dog", "weasel", "horse"]}, 

263 ... index=range(4)) 

264 >>> right = pd.DataFrame({"a": ["a", "b", "c", "d"], 

265 ... "c": ["meow", "bark", "chirp", "nay"]}, 

266 ... index=range(4)).set_index("a") 

267 >>> left.join(right, on="a", validate="one_to_one",) 

268 Traceback (most recent call last): 

269 MergeError: Merge keys are not unique in left dataset; not a one-to-one merge 

270 """ 

271 

272 

273class AbstractMethodError(NotImplementedError): 

274 """ 

275 Raise this error instead of NotImplementedError for abstract methods. 

276 

277 Examples 

278 -------- 

279 >>> class Foo: 

280 ... @classmethod 

281 ... def classmethod(cls): 

282 ... raise pd.errors.AbstractMethodError(cls, methodtype="classmethod") 

283 ... def method(self): 

284 ... raise pd.errors.AbstractMethodError(self) 

285 >>> test = Foo.classmethod() 

286 Traceback (most recent call last): 

287 AbstractMethodError: This classmethod must be defined in the concrete class Foo 

288 

289 >>> test2 = Foo().method() 

290 Traceback (most recent call last): 

291 AbstractMethodError: This classmethod must be defined in the concrete class Foo 

292 """ 

293 

294 def __init__(self, class_instance, methodtype: str = "method") -> None: 

295 types = {"method", "classmethod", "staticmethod", "property"} 

296 if methodtype not in types: 

297 raise ValueError( 

298 f"methodtype must be one of {methodtype}, got {types} instead." 

299 ) 

300 self.methodtype = methodtype 

301 self.class_instance = class_instance 

302 

303 def __str__(self) -> str: 

304 if self.methodtype == "classmethod": 

305 name = self.class_instance.__name__ 

306 else: 

307 name = type(self.class_instance).__name__ 

308 return f"This {self.methodtype} must be defined in the concrete class {name}" 

309 

310 

311class NumbaUtilError(Exception): 

312 """ 

313 Error raised for unsupported Numba engine routines. 

314 

315 Examples 

316 -------- 

317 >>> df = pd.DataFrame({"key": ["a", "a", "b", "b"], "data": [1, 2, 3, 4]}, 

318 ... columns=["key", "data"]) 

319 >>> def incorrect_function(x): 

320 ... return sum(x) * 2.7 

321 >>> df.groupby("key").agg(incorrect_function, engine="numba") 

322 Traceback (most recent call last): 

323 NumbaUtilError: The first 2 arguments to incorrect_function 

324 must be ['values', 'index'] 

325 """ 

326 

327 

328class DuplicateLabelError(ValueError): 

329 """ 

330 Error raised when an operation would introduce duplicate labels. 

331 

332 Examples 

333 -------- 

334 >>> s = pd.Series([0, 1, 2], index=['a', 'b', 'c']).set_flags( 

335 ... allows_duplicate_labels=False 

336 ... ) 

337 >>> s.reindex(['a', 'a', 'b']) 

338 Traceback (most recent call last): 

339 ... 

340 DuplicateLabelError: Index has duplicates. 

341 positions 

342 label 

343 a [0, 1] 

344 """ 

345 

346 

347class InvalidIndexError(Exception): 

348 """ 

349 Exception raised when attempting to use an invalid index key. 

350 

351 Examples 

352 -------- 

353 >>> idx = pd.MultiIndex.from_product([["x", "y"], [0, 1]]) 

354 >>> df = pd.DataFrame([[1, 1, 2, 2], 

355 ... [3, 3, 4, 4]], columns=idx) 

356 >>> df 

357 x y 

358 0 1 0 1 

359 0 1 1 2 2 

360 1 3 3 4 4 

361 >>> df[:, 0] 

362 Traceback (most recent call last): 

363 InvalidIndexError: (slice(None, None, None), 0) 

364 """ 

365 

366 

367class DataError(Exception): 

368 """ 

369 Exceptionn raised when performing an operation on non-numerical data. 

370 

371 For example, calling ``ohlc`` on a non-numerical column or a function 

372 on a rolling window. 

373 

374 Examples 

375 -------- 

376 >>> ser = pd.Series(['a', 'b', 'c']) 

377 >>> ser.rolling(2).sum() 

378 Traceback (most recent call last): 

379 DataError: No numeric types to aggregate 

380 """ 

381 

382 

383class SpecificationError(Exception): 

384 """ 

385 Exception raised by ``agg`` when the functions are ill-specified. 

386 

387 The exception raised in two scenarios. 

388 

389 The first way is calling ``agg`` on a 

390 Dataframe or Series using a nested renamer (dict-of-dict). 

391 

392 The second way is calling ``agg`` on a Dataframe with duplicated functions 

393 names without assigning column name. 

394 

395 Examples 

396 -------- 

397 >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2], 

398 ... 'B': range(5), 

399 ... 'C': range(5)}) 

400 >>> df.groupby('A').B.agg({'foo': 'count'}) # doctest: +SKIP 

401 ... # SpecificationError: nested renamer is not supported 

402 

403 >>> df.groupby('A').agg({'B': {'foo': ['sum', 'max']}}) # doctest: +SKIP 

404 ... # SpecificationError: nested renamer is not supported 

405 

406 >>> df.groupby('A').agg(['min', 'min']) # doctest: +SKIP 

407 ... # SpecificationError: nested renamer is not supported 

408 """ 

409 

410 

411class SettingWithCopyError(ValueError): 

412 """ 

413 Exception raised when trying to set on a copied slice from a ``DataFrame``. 

414 

415 The ``mode.chained_assignment`` needs to be set to set to 'raise.' This can 

416 happen unintentionally when chained indexing. 

417 

418 For more information on evaluation order, 

419 see :ref:`the user guide<indexing.evaluation_order>`. 

420 

421 For more information on view vs. copy, 

422 see :ref:`the user guide<indexing.view_versus_copy>`. 

423 

424 Examples 

425 -------- 

426 >>> pd.options.mode.chained_assignment = 'raise' 

427 >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) 

428 >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP 

429 ... # SettingWithCopyError: A value is trying to be set on a copy of a... 

430 """ 

431 

432 

433class SettingWithCopyWarning(Warning): 

434 """ 

435 Warning raised when trying to set on a copied slice from a ``DataFrame``. 

436 

437 The ``mode.chained_assignment`` needs to be set to set to 'warn.' 

438 'Warn' is the default option. This can happen unintentionally when 

439 chained indexing. 

440 

441 For more information on evaluation order, 

442 see :ref:`the user guide<indexing.evaluation_order>`. 

443 

444 For more information on view vs. copy, 

445 see :ref:`the user guide<indexing.view_versus_copy>`. 

446 

447 Examples 

448 -------- 

449 >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) 

450 >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP 

451 ... # SettingWithCopyWarning: A value is trying to be set on a copy of a... 

452 """ 

453 

454 

455class ChainedAssignmentError(Warning): 

456 """ 

457 Warning raised when trying to set using chained assignment. 

458 

459 When the ``mode.copy_on_write`` option is enabled, chained assignment can 

460 never work. In such a situation, we are always setting into a temporary 

461 object that is the result of an indexing operation (getitem), which under 

462 Copy-on-Write always behaves as a copy. Thus, assigning through a chain 

463 can never update the original Series or DataFrame. 

464 

465 For more information on view vs. copy, 

466 see :ref:`the user guide<indexing.view_versus_copy>`. 

467 

468 Examples 

469 -------- 

470 >>> pd.options.mode.copy_on_write = True 

471 >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) 

472 >>> df["A"][0:3] = 10 # doctest: +SKIP 

473 ... # ChainedAssignmentError: ... 

474 >>> pd.options.mode.copy_on_write = False 

475 """ 

476 

477 

478_chained_assignment_msg = ( 

479 "A value is trying to be set on a copy of a DataFrame or Series " 

480 "through chained assignment.\n" 

481 "When using the Copy-on-Write mode, such chained assignment never works " 

482 "to update the original DataFrame or Series, because the intermediate " 

483 "object on which we are setting values always behaves as a copy.\n\n" 

484 "Try using '.loc[row_indexer, col_indexer] = value' instead, to perform " 

485 "the assignment in a single step.\n\n" 

486 "See the caveats in the documentation: " 

487 "https://pandas.pydata.org/pandas-docs/stable/user_guide/" 

488 "indexing.html#returning-a-view-versus-a-copy" 

489) 

490 

491 

492_chained_assignment_method_msg = ( 

493 "A value is trying to be set on a copy of a DataFrame or Series " 

494 "through chained assignment using an inplace method.\n" 

495 "When using the Copy-on-Write mode, such inplace method never works " 

496 "to update the original DataFrame or Series, because the intermediate " 

497 "object on which we are setting values always behaves as a copy.\n\n" 

498 "For example, when doing 'df[col].method(value, inplace=True)', try " 

499 "using 'df.method({col: value}, inplace=True)' instead, to perform " 

500 "the operation inplace on the original object.\n\n" 

501) 

502 

503 

504_chained_assignment_warning_msg = ( 

505 "ChainedAssignmentError: behaviour will change in pandas 3.0!\n" 

506 "You are setting values through chained assignment. Currently this works " 

507 "in certain cases, but when using Copy-on-Write (which will become the " 

508 "default behaviour in pandas 3.0) this will never work to update the " 

509 "original DataFrame or Series, because the intermediate object on which " 

510 "we are setting values will behave as a copy.\n" 

511 "A typical example is when you are setting values in a column of a " 

512 "DataFrame, like:\n\n" 

513 'df["col"][row_indexer] = value\n\n' 

514 'Use `df.loc[row_indexer, "col"] = values` instead, to perform the ' 

515 "assignment in a single step and ensure this keeps updating the original `df`.\n\n" 

516 "See the caveats in the documentation: " 

517 "https://pandas.pydata.org/pandas-docs/stable/user_guide/" 

518 "indexing.html#returning-a-view-versus-a-copy\n" 

519) 

520 

521 

522_chained_assignment_warning_method_msg = ( 

523 "A value is trying to be set on a copy of a DataFrame or Series " 

524 "through chained assignment using an inplace method.\n" 

525 "The behavior will change in pandas 3.0. This inplace method will " 

526 "never work because the intermediate object on which we are setting " 

527 "values always behaves as a copy.\n\n" 

528 "For example, when doing 'df[col].method(value, inplace=True)', try " 

529 "using 'df.method({col: value}, inplace=True)' or " 

530 "df[col] = df[col].method(value) instead, to perform " 

531 "the operation inplace on the original object.\n\n" 

532) 

533 

534 

535def _check_cacher(obj): 

536 # This is a mess, selection paths that return a view set the _cacher attribute 

537 # on the Series; most of them also set _item_cache which adds 1 to our relevant 

538 # reference count, but iloc does not, so we have to check if we are actually 

539 # in the item cache 

540 if hasattr(obj, "_cacher"): 

541 parent = obj._cacher[1]() 

542 # parent could be dead 

543 if parent is None: 

544 return False 

545 if hasattr(parent, "_item_cache"): 

546 if obj._cacher[0] in parent._item_cache: 

547 # Check if we are actually the item from item_cache, iloc creates a 

548 # new object 

549 return obj is parent._item_cache[obj._cacher[0]] 

550 return False 

551 

552 

553class NumExprClobberingError(NameError): 

554 """ 

555 Exception raised when trying to use a built-in numexpr name as a variable name. 

556 

557 ``eval`` or ``query`` will throw the error if the engine is set 

558 to 'numexpr'. 'numexpr' is the default engine value for these methods if the 

559 numexpr package is installed. 

560 

561 Examples 

562 -------- 

563 >>> df = pd.DataFrame({'abs': [1, 1, 1]}) 

564 >>> df.query("abs > 2") # doctest: +SKIP 

565 ... # NumExprClobberingError: Variables in expression "(abs) > (2)" overlap... 

566 >>> sin, a = 1, 2 

567 >>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP 

568 ... # NumExprClobberingError: Variables in expression "(sin) + (a)" overlap... 

569 """ 

570 

571 

572class UndefinedVariableError(NameError): 

573 """ 

574 Exception raised by ``query`` or ``eval`` when using an undefined variable name. 

575 

576 It will also specify whether the undefined variable is local or not. 

577 

578 Examples 

579 -------- 

580 >>> df = pd.DataFrame({'A': [1, 1, 1]}) 

581 >>> df.query("A > x") # doctest: +SKIP 

582 ... # UndefinedVariableError: name 'x' is not defined 

583 >>> df.query("A > @y") # doctest: +SKIP 

584 ... # UndefinedVariableError: local variable 'y' is not defined 

585 >>> pd.eval('x + 1') # doctest: +SKIP 

586 ... # UndefinedVariableError: name 'x' is not defined 

587 """ 

588 

589 def __init__(self, name: str, is_local: bool | None = None) -> None: 

590 base_msg = f"{repr(name)} is not defined" 

591 if is_local: 

592 msg = f"local variable {base_msg}" 

593 else: 

594 msg = f"name {base_msg}" 

595 super().__init__(msg) 

596 

597 

598class IndexingError(Exception): 

599 """ 

600 Exception is raised when trying to index and there is a mismatch in dimensions. 

601 

602 Examples 

603 -------- 

604 >>> df = pd.DataFrame({'A': [1, 1, 1]}) 

605 >>> df.loc[..., ..., 'A'] # doctest: +SKIP 

606 ... # IndexingError: indexer may only contain one '...' entry 

607 >>> df = pd.DataFrame({'A': [1, 1, 1]}) 

608 >>> df.loc[1, ..., ...] # doctest: +SKIP 

609 ... # IndexingError: Too many indexers 

610 >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP 

611 ... # IndexingError: Unalignable boolean Series provided as indexer... 

612 >>> s = pd.Series(range(2), 

613 ... index = pd.MultiIndex.from_product([["a", "b"], ["c"]])) 

614 >>> s.loc["a", "c", "d"] # doctest: +SKIP 

615 ... # IndexingError: Too many indexers 

616 """ 

617 

618 

619class PyperclipException(RuntimeError): 

620 """ 

621 Exception raised when clipboard functionality is unsupported. 

622 

623 Raised by ``to_clipboard()`` and ``read_clipboard()``. 

624 """ 

625 

626 

627class PyperclipWindowsException(PyperclipException): 

628 """ 

629 Exception raised when clipboard functionality is unsupported by Windows. 

630 

631 Access to the clipboard handle would be denied due to some other 

632 window process is accessing it. 

633 """ 

634 

635 def __init__(self, message: str) -> None: 

636 # attr only exists on Windows, so typing fails on other platforms 

637 message += f" ({ctypes.WinError()})" # type: ignore[attr-defined] 

638 super().__init__(message) 

639 

640 

641class CSSWarning(UserWarning): 

642 """ 

643 Warning is raised when converting css styling fails. 

644 

645 This can be due to the styling not having an equivalent value or because the 

646 styling isn't properly formatted. 

647 

648 Examples 

649 -------- 

650 >>> df = pd.DataFrame({'A': [1, 1, 1]}) 

651 >>> df.style.applymap( 

652 ... lambda x: 'background-color: blueGreenRed;' 

653 ... ).to_excel('styled.xlsx') # doctest: +SKIP 

654 CSSWarning: Unhandled color format: 'blueGreenRed' 

655 >>> df.style.applymap( 

656 ... lambda x: 'border: 1px solid red red;' 

657 ... ).to_excel('styled.xlsx') # doctest: +SKIP 

658 CSSWarning: Unhandled color format: 'blueGreenRed' 

659 """ 

660 

661 

662class PossibleDataLossError(Exception): 

663 """ 

664 Exception raised when trying to open a HDFStore file when already opened. 

665 

666 Examples 

667 -------- 

668 >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP 

669 >>> store.open("w") # doctest: +SKIP 

670 ... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]... 

671 """ 

672 

673 

674class ClosedFileError(Exception): 

675 """ 

676 Exception is raised when trying to perform an operation on a closed HDFStore file. 

677 

678 Examples 

679 -------- 

680 >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP 

681 >>> store.close() # doctest: +SKIP 

682 >>> store.keys() # doctest: +SKIP 

683 ... # ClosedFileError: my-store file is not open! 

684 """ 

685 

686 

687class IncompatibilityWarning(Warning): 

688 """ 

689 Warning raised when trying to use where criteria on an incompatible HDF5 file. 

690 """ 

691 

692 

693class AttributeConflictWarning(Warning): 

694 """ 

695 Warning raised when index attributes conflict when using HDFStore. 

696 

697 Occurs when attempting to append an index with a different 

698 name than the existing index on an HDFStore or attempting to append an index with a 

699 different frequency than the existing index on an HDFStore. 

700 

701 Examples 

702 -------- 

703 >>> idx1 = pd.Index(['a', 'b'], name='name1') 

704 >>> df1 = pd.DataFrame([[1, 2], [3, 4]], index=idx1) 

705 >>> df1.to_hdf('file', 'data', 'w', append=True) # doctest: +SKIP 

706 >>> idx2 = pd.Index(['c', 'd'], name='name2') 

707 >>> df2 = pd.DataFrame([[5, 6], [7, 8]], index=idx2) 

708 >>> df2.to_hdf('file', 'data', 'a', append=True) # doctest: +SKIP 

709 AttributeConflictWarning: the [index_name] attribute of the existing index is 

710 [name1] which conflicts with the new [name2]... 

711 """ 

712 

713 

714class DatabaseError(OSError): 

715 """ 

716 Error is raised when executing sql with bad syntax or sql that throws an error. 

717 

718 Examples 

719 -------- 

720 >>> from sqlite3 import connect 

721 >>> conn = connect(':memory:') 

722 >>> pd.read_sql('select * test', conn) # doctest: +SKIP 

723 ... # DatabaseError: Execution failed on sql 'test': near "test": syntax error 

724 """ 

725 

726 

727class PossiblePrecisionLoss(Warning): 

728 """ 

729 Warning raised by to_stata on a column with a value outside or equal to int64. 

730 

731 When the column value is outside or equal to the int64 value the column is 

732 converted to a float64 dtype. 

733 

734 Examples 

735 -------- 

736 >>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)}) 

737 >>> df.to_stata('test') # doctest: +SKIP 

738 ... # PossiblePrecisionLoss: Column converted from int64 to float64... 

739 """ 

740 

741 

742class ValueLabelTypeMismatch(Warning): 

743 """ 

744 Warning raised by to_stata on a category column that contains non-string values. 

745 

746 Examples 

747 -------- 

748 >>> df = pd.DataFrame({"categories": pd.Series(["a", 2], dtype="category")}) 

749 >>> df.to_stata('test') # doctest: +SKIP 

750 ... # ValueLabelTypeMismatch: Stata value labels (pandas categories) must be str... 

751 """ 

752 

753 

754class InvalidColumnName(Warning): 

755 """ 

756 Warning raised by to_stata the column contains a non-valid stata name. 

757 

758 Because the column name is an invalid Stata variable, the name needs to be 

759 converted. 

760 

761 Examples 

762 -------- 

763 >>> df = pd.DataFrame({"0categories": pd.Series([2, 2])}) 

764 >>> df.to_stata('test') # doctest: +SKIP 

765 ... # InvalidColumnName: Not all pandas column names were valid Stata variable... 

766 """ 

767 

768 

769class CategoricalConversionWarning(Warning): 

770 """ 

771 Warning is raised when reading a partial labeled Stata file using a iterator. 

772 

773 Examples 

774 -------- 

775 >>> from pandas.io.stata import StataReader 

776 >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP 

777 ... for i, block in enumerate(reader): 

778 ... print(i, block) 

779 ... # CategoricalConversionWarning: One or more series with value labels... 

780 """ 

781 

782 

783class LossySetitemError(Exception): 

784 """ 

785 Raised when trying to do a __setitem__ on an np.ndarray that is not lossless. 

786 

787 Notes 

788 ----- 

789 This is an internal error. 

790 """ 

791 

792 

793class NoBufferPresent(Exception): 

794 """ 

795 Exception is raised in _get_data_buffer to signal that there is no requested buffer. 

796 """ 

797 

798 

799class InvalidComparison(Exception): 

800 """ 

801 Exception is raised by _validate_comparison_value to indicate an invalid comparison. 

802 

803 Notes 

804 ----- 

805 This is an internal error. 

806 """ 

807 

808 

809__all__ = [ 

810 "AbstractMethodError", 

811 "AttributeConflictWarning", 

812 "CategoricalConversionWarning", 

813 "ClosedFileError", 

814 "CSSWarning", 

815 "DatabaseError", 

816 "DataError", 

817 "DtypeWarning", 

818 "DuplicateLabelError", 

819 "EmptyDataError", 

820 "IncompatibilityWarning", 

821 "IntCastingNaNError", 

822 "InvalidColumnName", 

823 "InvalidComparison", 

824 "InvalidIndexError", 

825 "InvalidVersion", 

826 "IndexingError", 

827 "LossySetitemError", 

828 "MergeError", 

829 "NoBufferPresent", 

830 "NullFrequencyError", 

831 "NumbaUtilError", 

832 "NumExprClobberingError", 

833 "OptionError", 

834 "OutOfBoundsDatetime", 

835 "OutOfBoundsTimedelta", 

836 "ParserError", 

837 "ParserWarning", 

838 "PerformanceWarning", 

839 "PossibleDataLossError", 

840 "PossiblePrecisionLoss", 

841 "PyperclipException", 

842 "PyperclipWindowsException", 

843 "SettingWithCopyError", 

844 "SettingWithCopyWarning", 

845 "SpecificationError", 

846 "UndefinedVariableError", 

847 "UnsortedIndexError", 

848 "UnsupportedFunctionCall", 

849 "ValueLabelTypeMismatch", 

850]