Skip to content

Commit c5c9db6

Browse files
committed
reverting ignore flags due to pandas stubs
1 parent d00cd93 commit c5c9db6

File tree

9 files changed

+24
-28
lines changed

9 files changed

+24
-28
lines changed

xarray/coding/cftimeindex.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -517,7 +517,7 @@ def contains(self, key: Any) -> bool:
517517
"""Needed for .loc based partial-string indexing"""
518518
return self.__contains__(key)
519519

520-
def shift( # freq is typed Any, we are more precise
520+
def shift( # type: ignore[override] # freq is typed Any, we are more precise
521521
self,
522522
periods: int | float,
523523
freq: str | timedelta | BaseCFTimeOffset | None = None,

xarray/core/dataarray.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3032,7 +3032,7 @@ def to_unstacked_dataset(self, dim: Hashable, level: int | Hashable = 0) -> Data
30323032
if not isinstance(idx, pd.MultiIndex):
30333033
raise ValueError(f"'{dim}' is not a stacked coordinate")
30343034

3035-
level_number = idx._get_level_number(level)
3035+
level_number = idx._get_level_number(level) # type: ignore[attr-defined]
30363036
variables = idx.levels[level_number]
30373037
variable_dim = idx.names[level_number]
30383038

xarray/core/dataset.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6629,7 +6629,7 @@ def interpolate_na(
66296629
| None
66306630
) = None,
66316631
**kwargs: Any,
6632-
) -> Dataset:
6632+
) -> Self:
66336633
"""Fill in NaNs by interpolating according to different methods.
66346634
66356635
Parameters
@@ -6760,7 +6760,7 @@ def interpolate_na(
67606760
)
67616761
return new
67626762

6763-
def ffill(self, dim: Hashable, limit: int | None = None) -> Dataset:
6763+
def ffill(self, dim: Hashable, limit: int | None = None) -> Self:
67646764
"""Fill NaN values by propagating values forward
67656765
67666766
*Requires bottleneck.*
@@ -6824,7 +6824,7 @@ def ffill(self, dim: Hashable, limit: int | None = None) -> Dataset:
68246824
new = _apply_over_vars_with_dim(ffill, self, dim=dim, limit=limit)
68256825
return new
68266826

6827-
def bfill(self, dim: Hashable, limit: int | None = None) -> Dataset:
6827+
def bfill(self, dim: Hashable, limit: int | None = None) -> Self:
68286828
"""Fill NaN values by propagating values backward
68296829
68306830
*Requires bottleneck.*
@@ -7523,7 +7523,7 @@ def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self:
75237523

75247524
if isinstance(idx, pd.MultiIndex):
75257525
dims = tuple(
7526-
name if name is not None else "level_%i" % n
7526+
name if name is not None else "level_%i" % n # type: ignore[redundant-expr]
75277527
for n, name in enumerate(idx.names)
75287528
)
75297529
for dim, lev in zip(dims, idx.levels, strict=True):
@@ -9829,7 +9829,7 @@ def eval(
98299829
c (x) float64 40B 0.0 1.25 2.5 3.75 5.0
98309830
"""
98319831

9832-
return pd.eval(
9832+
return pd.eval( # type: ignore[return-value]
98339833
statement,
98349834
resolvers=[self],
98359835
target=self,

xarray/core/extension_array.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def __extension_duck_array__stack(arr: T_ExtensionArray, axis: int):
4545
def __extension_duck_array__concatenate(
4646
arrays: Sequence[T_ExtensionArray], axis: int = 0, out=None
4747
) -> T_ExtensionArray:
48-
return type(arrays[0])._concat_same_type(arrays)
48+
return type(arrays[0])._concat_same_type(arrays) # type: ignore[attr-defined]
4949

5050

5151
@implements(np.where)
@@ -57,8 +57,8 @@ def __extension_duck_array__where(
5757
and isinstance(y, pd.Categorical)
5858
and x.dtype != y.dtype
5959
):
60-
x = x.add_categories(set(y.categories).difference(set(x.categories)))
61-
y = y.add_categories(set(x.categories).difference(set(y.categories)))
60+
x = x.add_categories(set(y.categories).difference(set(x.categories))) # type: ignore[assignment]
61+
y = y.add_categories(set(x.categories).difference(set(y.categories))) # type: ignore[assignment]
6262
return cast(T_ExtensionArray, pd.Series(x).where(condition, pd.Series(y)).array)
6363

6464

@@ -116,9 +116,7 @@ def __getitem__(self, key) -> PandasExtensionArray[T_ExtensionArray]:
116116
if is_extension_array_dtype(item):
117117
return type(self)(item)
118118
if np.isscalar(item):
119-
return type(self)(
120-
type(self.array)([item])
121-
) # only subclasses with proper __init__ allowed
119+
return type(self)(type(self.array)([item])) # type: ignore[call-arg] # only subclasses with proper __init__ allowed
122120
return item
123121

124122
def __setitem__(self, key, val):

xarray/core/indexes.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -740,7 +740,7 @@ def isel(
740740
# scalar indexer: drop index
741741
return None
742742

743-
return self._replace(self.index[indxr])
743+
return self._replace(self.index[indxr]) # type: ignore[index]
744744

745745
def sel(
746746
self, labels: dict[Any, Any], method=None, tolerance=None
@@ -926,7 +926,7 @@ def remove_unused_levels_categories(index: T_PDIndex) -> T_PDIndex:
926926
return cast(T_PDIndex, new_index)
927927

928928
if isinstance(index, pd.CategoricalIndex):
929-
return index.remove_unused_categories()
929+
return index.remove_unused_categories() # type: ignore[attr-defined]
930930

931931
return index
932932

@@ -1164,7 +1164,7 @@ def create_variables(
11641164
dtype = None
11651165
else:
11661166
level = name
1167-
dtype = self.level_coords_dtype[name] # TODO: are Hashables ok?
1167+
dtype = self.level_coords_dtype[name] # type: ignore[index] # TODO: are Hashables ok?
11681168

11691169
var = variables.get(name, None)
11701170
if var is not None:
@@ -1174,9 +1174,7 @@ def create_variables(
11741174
attrs = {}
11751175
encoding = {}
11761176

1177-
data = PandasMultiIndexingAdapter(
1178-
self.index, dtype=dtype, level=level
1179-
) # TODO: are Hashables ok?
1177+
data = PandasMultiIndexingAdapter(self.index, dtype=dtype, level=level) # type: ignore[arg-type] # TODO: are Hashables ok?
11801178
index_vars[name] = IndexVariable(
11811179
self.dim,
11821180
data,
@@ -1673,7 +1671,7 @@ def copy_indexes(
16731671
convert_new_idx = False
16741672
xr_idx = idx
16751673

1676-
new_idx = xr_idx._copy(deep=deep, memo=memo)
1674+
new_idx = xr_idx._copy(deep=deep, memo=memo) # type: ignore[assignment]
16771675
idx_vars = xr_idx.create_variables(coords)
16781676

16791677
if convert_new_idx:

xarray/core/utils.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ def get_valid_numpy_dtype(array: np.ndarray | pd.Index) -> np.dtype:
132132
if not is_valid_numpy_dtype(array.dtype):
133133
return np.dtype("O")
134134

135-
return array.dtype
135+
return array.dtype # type: ignore[return-value]
136136

137137

138138
def maybe_coerce_to_str(index, original_coords):
@@ -180,7 +180,7 @@ def equivalent(first: T, second: T) -> bool:
180180
return duck_array_ops.array_equiv(first, second)
181181
if isinstance(first, list) or isinstance(second, list):
182182
return list_equiv(first, second) # type: ignore[arg-type]
183-
return (first == second) or (pd.isnull(first) and pd.isnull(second))
183+
return (first == second) or (pd.isnull(first) and pd.isnull(second)) # type: ignore[call-overload]
184184

185185

186186
def list_equiv(first: Sequence[T], second: Sequence[T]) -> bool:

xarray/core/variable.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def as_variable(
151151
) from error
152152
elif utils.is_scalar(obj):
153153
obj = Variable([], obj)
154-
elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None: # type: ignore[redundant-expr]
154+
elif isinstance(obj, pd.Index | IndexVariable) and obj.name is not None:
155155
obj = Variable(obj.name, obj)
156156
elif isinstance(obj, set | dict):
157157
raise TypeError(f"variable {name!r} has invalid type {type(obj)!r}")

xarray/groupers.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ def factorize(self, group: T_Group) -> EncodedGroups:
296296

297297
data = np.asarray(group.data) # Cast _DummyGroup data to array
298298

299-
binned, self.bins = pd.cut(
299+
binned, self.bins = pd.cut( # type: ignore [call-overload]
300300
data.ravel(),
301301
bins=self.bins,
302302
right=self.right,

xarray/namedarray/daskmanager.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -21,13 +21,13 @@
2121
try:
2222
from dask.array import Array as DaskArray
2323
except ImportError:
24-
DaskArray = np.ndarray[Any, Any] # type: ignore[misc,assignment]
24+
DaskArray = np.ndarray[Any, Any]
2525

2626

2727
dask_available = module_available("dask")
2828

2929

30-
class DaskManager(ChunkManagerEntrypoint["DaskArray"]): # type: ignore[type-var]
30+
class DaskManager(ChunkManagerEntrypoint["DaskArray"]):
3131
array_cls: type[DaskArray]
3232
available: bool = dask_available
3333

@@ -91,7 +91,7 @@ def array_api(self) -> Any:
9191

9292
return da
9393

94-
def reduction( # type: ignore[override]
94+
def reduction(
9595
self,
9696
arr: T_ChunkedArray,
9797
func: Callable[..., Any],
@@ -113,7 +113,7 @@ def reduction( # type: ignore[override]
113113
keepdims=keepdims,
114114
) # type: ignore[no-untyped-call]
115115

116-
def scan( # type: ignore[override]
116+
def scan(
117117
self,
118118
func: Callable[..., Any],
119119
binop: Callable[..., Any],

0 commit comments

Comments
 (0)