1"""
2Base and utility classes for tseries type pandas objects.
3"""
4from __future__ import annotations
5
6from abc import (
7 ABC,
8 abstractmethod,
9)
10from typing import (
11 TYPE_CHECKING,
12 Any,
13 Callable,
14 cast,
15 final,
16)
17import warnings
18
19import numpy as np
20
21from pandas._config import using_copy_on_write
22
23from pandas._libs import (
24 NaT,
25 Timedelta,
26 lib,
27)
28from pandas._libs.tslibs import (
29 BaseOffset,
30 Resolution,
31 Tick,
32 parsing,
33 to_offset,
34)
35from pandas._libs.tslibs.dtypes import freq_to_period_freqstr
36from pandas.compat.numpy import function as nv
37from pandas.errors import (
38 InvalidIndexError,
39 NullFrequencyError,
40)
41from pandas.util._decorators import (
42 Appender,
43 cache_readonly,
44 doc,
45)
46from pandas.util._exceptions import find_stack_level
47
48from pandas.core.dtypes.common import (
49 is_integer,
50 is_list_like,
51)
52from pandas.core.dtypes.concat import concat_compat
53from pandas.core.dtypes.dtypes import CategoricalDtype
54
55from pandas.core.arrays import (
56 DatetimeArray,
57 ExtensionArray,
58 PeriodArray,
59 TimedeltaArray,
60)
61from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
62import pandas.core.common as com
63import pandas.core.indexes.base as ibase
64from pandas.core.indexes.base import (
65 Index,
66 _index_shared_docs,
67)
68from pandas.core.indexes.extension import NDArrayBackedExtensionIndex
69from pandas.core.indexes.range import RangeIndex
70from pandas.core.tools.timedeltas import to_timedelta
71
72if TYPE_CHECKING:
73 from collections.abc import Sequence
74 from datetime import datetime
75
76 from pandas._typing import (
77 Axis,
78 Self,
79 npt,
80 )
81
82 from pandas import CategoricalIndex
83
84_index_doc_kwargs = dict(ibase._index_doc_kwargs)
85
86
87class DatetimeIndexOpsMixin(NDArrayBackedExtensionIndex, ABC):
88 """
89 Common ops mixin to support a unified interface datetimelike Index.
90 """
91
92 _can_hold_strings = False
93 _data: DatetimeArray | TimedeltaArray | PeriodArray
94
95 @doc(DatetimeLikeArrayMixin.mean)
96 def mean(self, *, skipna: bool = True, axis: int | None = 0):
97 return self._data.mean(skipna=skipna, axis=axis)
98
99 @property
100 def freq(self) -> BaseOffset | None:
101 return self._data.freq
102
103 @freq.setter
104 def freq(self, value) -> None:
105 # error: Property "freq" defined in "PeriodArray" is read-only [misc]
106 self._data.freq = value # type: ignore[misc]
107
108 @property
109 def asi8(self) -> npt.NDArray[np.int64]:
110 return self._data.asi8
111
112 @property
113 @doc(DatetimeLikeArrayMixin.freqstr)
114 def freqstr(self) -> str:
115 from pandas import PeriodIndex
116
117 if self._data.freqstr is not None and isinstance(
118 self._data, (PeriodArray, PeriodIndex)
119 ):
120 freq = freq_to_period_freqstr(self._data.freq.n, self._data.freq.name)
121 return freq
122 else:
123 return self._data.freqstr # type: ignore[return-value]
124
125 @cache_readonly
126 @abstractmethod
127 def _resolution_obj(self) -> Resolution:
128 ...
129
130 @cache_readonly
131 @doc(DatetimeLikeArrayMixin.resolution)
132 def resolution(self) -> str:
133 return self._data.resolution
134
135 # ------------------------------------------------------------------------
136
137 @cache_readonly
138 def hasnans(self) -> bool:
139 return self._data._hasna
140
141 def equals(self, other: Any) -> bool:
142 """
143 Determines if two Index objects contain the same elements.
144 """
145 if self.is_(other):
146 return True
147
148 if not isinstance(other, Index):
149 return False
150 elif other.dtype.kind in "iufc":
151 return False
152 elif not isinstance(other, type(self)):
153 should_try = False
154 inferable = self._data._infer_matches
155 if other.dtype == object:
156 should_try = other.inferred_type in inferable
157 elif isinstance(other.dtype, CategoricalDtype):
158 other = cast("CategoricalIndex", other)
159 should_try = other.categories.inferred_type in inferable
160
161 if should_try:
162 try:
163 other = type(self)(other)
164 except (ValueError, TypeError, OverflowError):
165 # e.g.
166 # ValueError -> cannot parse str entry, or OutOfBoundsDatetime
167 # TypeError -> trying to convert IntervalIndex to DatetimeIndex
168 # OverflowError -> Index([very_large_timedeltas])
169 return False
170
171 if self.dtype != other.dtype:
172 # have different timezone
173 return False
174
175 return np.array_equal(self.asi8, other.asi8)
176
177 @Appender(Index.__contains__.__doc__)
178 def __contains__(self, key: Any) -> bool:
179 hash(key)
180 try:
181 self.get_loc(key)
182 except (KeyError, TypeError, ValueError, InvalidIndexError):
183 return False
184 return True
185
186 def _convert_tolerance(self, tolerance, target):
187 tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
188 return super()._convert_tolerance(tolerance, target)
189
190 # --------------------------------------------------------------------
191 # Rendering Methods
192 _default_na_rep = "NaT"
193
194 def format(
195 self,
196 name: bool = False,
197 formatter: Callable | None = None,
198 na_rep: str = "NaT",
199 date_format: str | None = None,
200 ) -> list[str]:
201 """
202 Render a string representation of the Index.
203 """
204 warnings.warn(
205 # GH#55413
206 f"{type(self).__name__}.format is deprecated and will be removed "
207 "in a future version. Convert using index.astype(str) or "
208 "index.map(formatter) instead.",
209 FutureWarning,
210 stacklevel=find_stack_level(),
211 )
212 header = []
213 if name:
214 header.append(
215 ibase.pprint_thing(self.name, escape_chars=("\t", "\r", "\n"))
216 if self.name is not None
217 else ""
218 )
219
220 if formatter is not None:
221 return header + list(self.map(formatter))
222
223 return self._format_with_header(
224 header=header, na_rep=na_rep, date_format=date_format
225 )
226
227 def _format_with_header(
228 self, *, header: list[str], na_rep: str, date_format: str | None = None
229 ) -> list[str]:
230 # TODO: not reached in tests 2023-10-11
231 # matches base class except for whitespace padding and date_format
232 return header + list(
233 self._get_values_for_csv(na_rep=na_rep, date_format=date_format)
234 )
235
236 @property
237 def _formatter_func(self):
238 return self._data._formatter()
239
240 def _format_attrs(self):
241 """
242 Return a list of tuples of the (attr,formatted_value).
243 """
244 attrs = super()._format_attrs()
245 for attrib in self._attributes:
246 # iterating over _attributes prevents us from doing this for PeriodIndex
247 if attrib == "freq":
248 freq = self.freqstr
249 if freq is not None:
250 freq = repr(freq) # e.g. D -> 'D'
251 attrs.append(("freq", freq))
252 return attrs
253
254 @Appender(Index._summary.__doc__)
255 def _summary(self, name=None) -> str:
256 result = super()._summary(name=name)
257 if self.freq:
258 result += f"\nFreq: {self.freqstr}"
259
260 return result
261
262 # --------------------------------------------------------------------
263 # Indexing Methods
264
265 @final
266 def _can_partial_date_slice(self, reso: Resolution) -> bool:
267 # e.g. test_getitem_setitem_periodindex
268 # History of conversation GH#3452, GH#3931, GH#2369, GH#14826
269 return reso > self._resolution_obj
270 # NB: for DTI/PI, not TDI
271
272 def _parsed_string_to_bounds(self, reso: Resolution, parsed):
273 raise NotImplementedError
274
275 def _parse_with_reso(self, label: str):
276 # overridden by TimedeltaIndex
277 try:
278 if self.freq is None or hasattr(self.freq, "rule_code"):
279 freq = self.freq
280 except NotImplementedError:
281 freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
282
283 freqstr: str | None
284 if freq is not None and not isinstance(freq, str):
285 freqstr = freq.rule_code
286 else:
287 freqstr = freq
288
289 if isinstance(label, np.str_):
290 # GH#45580
291 label = str(label)
292
293 parsed, reso_str = parsing.parse_datetime_string_with_reso(label, freqstr)
294 reso = Resolution.from_attrname(reso_str)
295 return parsed, reso
296
297 def _get_string_slice(self, key: str):
298 # overridden by TimedeltaIndex
299 parsed, reso = self._parse_with_reso(key)
300 try:
301 return self._partial_date_slice(reso, parsed)
302 except KeyError as err:
303 raise KeyError(key) from err
304
305 @final
306 def _partial_date_slice(
307 self,
308 reso: Resolution,
309 parsed: datetime,
310 ) -> slice | npt.NDArray[np.intp]:
311 """
312 Parameters
313 ----------
314 reso : Resolution
315 parsed : datetime
316
317 Returns
318 -------
319 slice or ndarray[intp]
320 """
321 if not self._can_partial_date_slice(reso):
322 raise ValueError
323
324 t1, t2 = self._parsed_string_to_bounds(reso, parsed)
325 vals = self._data._ndarray
326 unbox = self._data._unbox
327
328 if self.is_monotonic_increasing:
329 if len(self) and (
330 (t1 < self[0] and t2 < self[0]) or (t1 > self[-1] and t2 > self[-1])
331 ):
332 # we are out of range
333 raise KeyError
334
335 # TODO: does this depend on being monotonic _increasing_?
336
337 # a monotonic (sorted) series can be sliced
338 left = vals.searchsorted(unbox(t1), side="left")
339 right = vals.searchsorted(unbox(t2), side="right")
340 return slice(left, right)
341
342 else:
343 lhs_mask = vals >= unbox(t1)
344 rhs_mask = vals <= unbox(t2)
345
346 # try to find the dates
347 return (lhs_mask & rhs_mask).nonzero()[0]
348
349 def _maybe_cast_slice_bound(self, label, side: str):
350 """
351 If label is a string, cast it to scalar type according to resolution.
352
353 Parameters
354 ----------
355 label : object
356 side : {'left', 'right'}
357
358 Returns
359 -------
360 label : object
361
362 Notes
363 -----
364 Value of `side` parameter should be validated in caller.
365 """
366 if isinstance(label, str):
367 try:
368 parsed, reso = self._parse_with_reso(label)
369 except ValueError as err:
370 # DTI -> parsing.DateParseError
371 # TDI -> 'unit abbreviation w/o a number'
372 # PI -> string cannot be parsed as datetime-like
373 self._raise_invalid_indexer("slice", label, err)
374
375 lower, upper = self._parsed_string_to_bounds(reso, parsed)
376 return lower if side == "left" else upper
377 elif not isinstance(label, self._data._recognized_scalars):
378 self._raise_invalid_indexer("slice", label)
379
380 return label
381
382 # --------------------------------------------------------------------
383 # Arithmetic Methods
384
385 def shift(self, periods: int = 1, freq=None) -> Self:
386 """
387 Shift index by desired number of time frequency increments.
388
389 This method is for shifting the values of datetime-like indexes
390 by a specified time increment a given number of times.
391
392 Parameters
393 ----------
394 periods : int, default 1
395 Number of periods (or increments) to shift by,
396 can be positive or negative.
397 freq : pandas.DateOffset, pandas.Timedelta or string, optional
398 Frequency increment to shift by.
399 If None, the index is shifted by its own `freq` attribute.
400 Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
401
402 Returns
403 -------
404 pandas.DatetimeIndex
405 Shifted index.
406
407 See Also
408 --------
409 Index.shift : Shift values of Index.
410 PeriodIndex.shift : Shift values of PeriodIndex.
411 """
412 raise NotImplementedError
413
414 # --------------------------------------------------------------------
415
416 @doc(Index._maybe_cast_listlike_indexer)
417 def _maybe_cast_listlike_indexer(self, keyarr):
418 try:
419 res = self._data._validate_listlike(keyarr, allow_object=True)
420 except (ValueError, TypeError):
421 if not isinstance(keyarr, ExtensionArray):
422 # e.g. we don't want to cast DTA to ndarray[object]
423 res = com.asarray_tuplesafe(keyarr)
424 # TODO: com.asarray_tuplesafe shouldn't cast e.g. DatetimeArray
425 else:
426 res = keyarr
427 return Index(res, dtype=res.dtype)
428
429
430class DatetimeTimedeltaMixin(DatetimeIndexOpsMixin, ABC):
431 """
432 Mixin class for methods shared by DatetimeIndex and TimedeltaIndex,
433 but not PeriodIndex
434 """
435
436 _data: DatetimeArray | TimedeltaArray
437 _comparables = ["name", "freq"]
438 _attributes = ["name", "freq"]
439
440 # Compat for frequency inference, see GH#23789
441 _is_monotonic_increasing = Index.is_monotonic_increasing
442 _is_monotonic_decreasing = Index.is_monotonic_decreasing
443 _is_unique = Index.is_unique
444
445 @property
446 def unit(self) -> str:
447 return self._data.unit
448
449 def as_unit(self, unit: str) -> Self:
450 """
451 Convert to a dtype with the given unit resolution.
452
453 Parameters
454 ----------
455 unit : {'s', 'ms', 'us', 'ns'}
456
457 Returns
458 -------
459 same type as self
460
461 Examples
462 --------
463 For :class:`pandas.DatetimeIndex`:
464
465 >>> idx = pd.DatetimeIndex(['2020-01-02 01:02:03.004005006'])
466 >>> idx
467 DatetimeIndex(['2020-01-02 01:02:03.004005006'],
468 dtype='datetime64[ns]', freq=None)
469 >>> idx.as_unit('s')
470 DatetimeIndex(['2020-01-02 01:02:03'], dtype='datetime64[s]', freq=None)
471
472 For :class:`pandas.TimedeltaIndex`:
473
474 >>> tdelta_idx = pd.to_timedelta(['1 day 3 min 2 us 42 ns'])
475 >>> tdelta_idx
476 TimedeltaIndex(['1 days 00:03:00.000002042'],
477 dtype='timedelta64[ns]', freq=None)
478 >>> tdelta_idx.as_unit('s')
479 TimedeltaIndex(['1 days 00:03:00'], dtype='timedelta64[s]', freq=None)
480 """
481 arr = self._data.as_unit(unit)
482 return type(self)._simple_new(arr, name=self.name)
483
484 def _with_freq(self, freq):
485 arr = self._data._with_freq(freq)
486 return type(self)._simple_new(arr, name=self._name)
487
488 @property
489 def values(self) -> np.ndarray:
490 # NB: For Datetime64TZ this is lossy
491 data = self._data._ndarray
492 if using_copy_on_write():
493 data = data.view()
494 data.flags.writeable = False
495 return data
496
497 @doc(DatetimeIndexOpsMixin.shift)
498 def shift(self, periods: int = 1, freq=None) -> Self:
499 if freq is not None and freq != self.freq:
500 if isinstance(freq, str):
501 freq = to_offset(freq)
502 offset = periods * freq
503 return self + offset
504
505 if periods == 0 or len(self) == 0:
506 # GH#14811 empty case
507 return self.copy()
508
509 if self.freq is None:
510 raise NullFrequencyError("Cannot shift with no freq")
511
512 start = self[0] + periods * self.freq
513 end = self[-1] + periods * self.freq
514
515 # Note: in the DatetimeTZ case, _generate_range will infer the
516 # appropriate timezone from `start` and `end`, so tz does not need
517 # to be passed explicitly.
518 result = self._data._generate_range(
519 start=start, end=end, periods=None, freq=self.freq, unit=self.unit
520 )
521 return type(self)._simple_new(result, name=self.name)
522
523 @cache_readonly
524 @doc(DatetimeLikeArrayMixin.inferred_freq)
525 def inferred_freq(self) -> str | None:
526 return self._data.inferred_freq
527
528 # --------------------------------------------------------------------
529 # Set Operation Methods
530
531 @cache_readonly
532 def _as_range_index(self) -> RangeIndex:
533 # Convert our i8 representations to RangeIndex
534 # Caller is responsible for checking isinstance(self.freq, Tick)
535 freq = cast(Tick, self.freq)
536 tick = Timedelta(freq).as_unit("ns")._value
537 rng = range(self[0]._value, self[-1]._value + tick, tick)
538 return RangeIndex(rng)
539
540 def _can_range_setop(self, other) -> bool:
541 return isinstance(self.freq, Tick) and isinstance(other.freq, Tick)
542
543 def _wrap_range_setop(self, other, res_i8) -> Self:
544 new_freq = None
545 if not len(res_i8):
546 # RangeIndex defaults to step=1, which we don't want.
547 new_freq = self.freq
548 elif isinstance(res_i8, RangeIndex):
549 new_freq = to_offset(Timedelta(res_i8.step))
550
551 # TODO(GH#41493): we cannot just do
552 # type(self._data)(res_i8.values, dtype=self.dtype, freq=new_freq)
553 # because test_setops_preserve_freq fails with _validate_frequency raising.
554 # This raising is incorrect, as 'on_freq' is incorrect. This will
555 # be fixed by GH#41493
556 res_values = res_i8.values.view(self._data._ndarray.dtype)
557 result = type(self._data)._simple_new(
558 # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
559 # incompatible type "Union[dtype[Any], ExtensionDtype]"; expected
560 # "Union[dtype[datetime64], DatetimeTZDtype]"
561 res_values,
562 dtype=self.dtype, # type: ignore[arg-type]
563 freq=new_freq, # type: ignore[arg-type]
564 )
565 return cast("Self", self._wrap_setop_result(other, result))
566
567 def _range_intersect(self, other, sort) -> Self:
568 # Dispatch to RangeIndex intersection logic.
569 left = self._as_range_index
570 right = other._as_range_index
571 res_i8 = left.intersection(right, sort=sort)
572 return self._wrap_range_setop(other, res_i8)
573
574 def _range_union(self, other, sort) -> Self:
575 # Dispatch to RangeIndex union logic.
576 left = self._as_range_index
577 right = other._as_range_index
578 res_i8 = left.union(right, sort=sort)
579 return self._wrap_range_setop(other, res_i8)
580
581 def _intersection(self, other: Index, sort: bool = False) -> Index:
582 """
583 intersection specialized to the case with matching dtypes and both non-empty.
584 """
585 other = cast("DatetimeTimedeltaMixin", other)
586
587 if self._can_range_setop(other):
588 return self._range_intersect(other, sort=sort)
589
590 if not self._can_fast_intersect(other):
591 result = Index._intersection(self, other, sort=sort)
592 # We need to invalidate the freq because Index._intersection
593 # uses _shallow_copy on a view of self._data, which will preserve
594 # self.freq if we're not careful.
595 # At this point we should have result.dtype == self.dtype
596 # and type(result) is type(self._data)
597 result = self._wrap_setop_result(other, result)
598 return result._with_freq(None)._with_freq("infer")
599
600 else:
601 return self._fast_intersect(other, sort)
602
603 def _fast_intersect(self, other, sort):
604 # to make our life easier, "sort" the two ranges
605 if self[0] <= other[0]:
606 left, right = self, other
607 else:
608 left, right = other, self
609
610 # after sorting, the intersection always starts with the right index
611 # and ends with the index of which the last elements is smallest
612 end = min(left[-1], right[-1])
613 start = right[0]
614
615 if end < start:
616 result = self[:0]
617 else:
618 lslice = slice(*left.slice_locs(start, end))
619 result = left._values[lslice]
620
621 return result
622
623 def _can_fast_intersect(self, other: Self) -> bool:
624 # Note: we only get here with len(self) > 0 and len(other) > 0
625 if self.freq is None:
626 return False
627
628 elif other.freq != self.freq:
629 return False
630
631 elif not self.is_monotonic_increasing:
632 # Because freq is not None, we must then be monotonic decreasing
633 return False
634
635 # this along with matching freqs ensure that we "line up",
636 # so intersection will preserve freq
637 # Note we are assuming away Ticks, as those go through _range_intersect
638 # GH#42104
639 return self.freq.n == 1
640
641 def _can_fast_union(self, other: Self) -> bool:
642 # Assumes that type(self) == type(other), as per the annotation
643 # The ability to fast_union also implies that `freq` should be
644 # retained on union.
645 freq = self.freq
646
647 if freq is None or freq != other.freq:
648 return False
649
650 if not self.is_monotonic_increasing:
651 # Because freq is not None, we must then be monotonic decreasing
652 # TODO: do union on the reversed indexes?
653 return False
654
655 if len(self) == 0 or len(other) == 0:
656 # only reached via union_many
657 return True
658
659 # to make our life easier, "sort" the two ranges
660 if self[0] <= other[0]:
661 left, right = self, other
662 else:
663 left, right = other, self
664
665 right_start = right[0]
666 left_end = left[-1]
667
668 # Only need to "adjoin", not overlap
669 return (right_start == left_end + freq) or right_start in left
670
671 def _fast_union(self, other: Self, sort=None) -> Self:
672 # Caller is responsible for ensuring self and other are non-empty
673
674 # to make our life easier, "sort" the two ranges
675 if self[0] <= other[0]:
676 left, right = self, other
677 elif sort is False:
678 # TDIs are not in the "correct" order and we don't want
679 # to sort but want to remove overlaps
680 left, right = self, other
681 left_start = left[0]
682 loc = right.searchsorted(left_start, side="left")
683 right_chunk = right._values[:loc]
684 dates = concat_compat((left._values, right_chunk))
685 result = type(self)._simple_new(dates, name=self.name)
686 return result
687 else:
688 left, right = other, self
689
690 left_end = left[-1]
691 right_end = right[-1]
692
693 # concatenate
694 if left_end < right_end:
695 loc = right.searchsorted(left_end, side="right")
696 right_chunk = right._values[loc:]
697 dates = concat_compat([left._values, right_chunk])
698 # The can_fast_union check ensures that the result.freq
699 # should match self.freq
700 assert isinstance(dates, type(self._data))
701 # error: Item "ExtensionArray" of "ExtensionArray |
702 # ndarray[Any, Any]" has no attribute "_freq"
703 assert dates._freq == self.freq # type: ignore[union-attr]
704 result = type(self)._simple_new(dates)
705 return result
706 else:
707 return left
708
709 def _union(self, other, sort):
710 # We are called by `union`, which is responsible for this validation
711 assert isinstance(other, type(self))
712 assert self.dtype == other.dtype
713
714 if self._can_range_setop(other):
715 return self._range_union(other, sort=sort)
716
717 if self._can_fast_union(other):
718 result = self._fast_union(other, sort=sort)
719 # in the case with sort=None, the _can_fast_union check ensures
720 # that result.freq == self.freq
721 return result
722 else:
723 return super()._union(other, sort)._with_freq("infer")
724
725 # --------------------------------------------------------------------
726 # Join Methods
727
728 def _get_join_freq(self, other):
729 """
730 Get the freq to attach to the result of a join operation.
731 """
732 freq = None
733 if self._can_fast_union(other):
734 freq = self.freq
735 return freq
736
737 def _wrap_joined_index(
738 self, joined, other, lidx: npt.NDArray[np.intp], ridx: npt.NDArray[np.intp]
739 ):
740 assert other.dtype == self.dtype, (other.dtype, self.dtype)
741 result = super()._wrap_joined_index(joined, other, lidx, ridx)
742 result._data._freq = self._get_join_freq(other)
743 return result
744
745 def _get_engine_target(self) -> np.ndarray:
746 # engine methods and libjoin methods need dt64/td64 values cast to i8
747 return self._data._ndarray.view("i8")
748
749 def _from_join_target(self, result: np.ndarray):
750 # view e.g. i8 back to M8[ns]
751 result = result.view(self._data._ndarray.dtype)
752 return self._data._from_backing_data(result)
753
754 # --------------------------------------------------------------------
755 # List-like Methods
756
757 def _get_delete_freq(self, loc: int | slice | Sequence[int]):
758 """
759 Find the `freq` for self.delete(loc).
760 """
761 freq = None
762 if self.freq is not None:
763 if is_integer(loc):
764 if loc in (0, -len(self), -1, len(self) - 1):
765 freq = self.freq
766 else:
767 if is_list_like(loc):
768 # error: Incompatible types in assignment (expression has
769 # type "Union[slice, ndarray]", variable has type
770 # "Union[int, slice, Sequence[int]]")
771 loc = lib.maybe_indices_to_slice( # type: ignore[assignment]
772 np.asarray(loc, dtype=np.intp), len(self)
773 )
774 if isinstance(loc, slice) and loc.step in (1, None):
775 if loc.start in (0, None) or loc.stop in (len(self), None):
776 freq = self.freq
777 return freq
778
779 def _get_insert_freq(self, loc: int, item):
780 """
781 Find the `freq` for self.insert(loc, item).
782 """
783 value = self._data._validate_scalar(item)
784 item = self._data._box_func(value)
785
786 freq = None
787 if self.freq is not None:
788 # freq can be preserved on edge cases
789 if self.size:
790 if item is NaT:
791 pass
792 elif loc in (0, -len(self)) and item + self.freq == self[0]:
793 freq = self.freq
794 elif (loc == len(self)) and item - self.freq == self[-1]:
795 freq = self.freq
796 else:
797 # Adding a single item to an empty index may preserve freq
798 if isinstance(self.freq, Tick):
799 # all TimedeltaIndex cases go through here; is_on_offset
800 # would raise TypeError
801 freq = self.freq
802 elif self.freq.is_on_offset(item):
803 freq = self.freq
804 return freq
805
806 @doc(NDArrayBackedExtensionIndex.delete)
807 def delete(self, loc) -> Self:
808 result = super().delete(loc)
809 result._data._freq = self._get_delete_freq(loc)
810 return result
811
812 @doc(NDArrayBackedExtensionIndex.insert)
813 def insert(self, loc: int, item):
814 result = super().insert(loc, item)
815 if isinstance(result, type(self)):
816 # i.e. parent class method did not cast
817 result._data._freq = self._get_insert_freq(loc, item)
818 return result
819
820 # --------------------------------------------------------------------
821 # NDArray-Like Methods
822
823 @Appender(_index_shared_docs["take"] % _index_doc_kwargs)
824 def take(
825 self,
826 indices,
827 axis: Axis = 0,
828 allow_fill: bool = True,
829 fill_value=None,
830 **kwargs,
831 ) -> Self:
832 nv.validate_take((), kwargs)
833 indices = np.asarray(indices, dtype=np.intp)
834
835 result = NDArrayBackedExtensionIndex.take(
836 self, indices, axis, allow_fill, fill_value, **kwargs
837 )
838
839 maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
840 if isinstance(maybe_slice, slice):
841 freq = self._data._get_getitem_freq(maybe_slice)
842 result._data._freq = freq
843 return result