55
55
from pandas.formats.printing import pprint_thing
56
56
from pandas.util.validators import validate_kwargs
57
57
58
- import pandas.core.algorithms as algos
58
+ import pandas.core.algorithms as algorithms
59
59
import pandas.core.common as com
60
60
from pandas.core.config import option_context
61
61
62
- from pandas.libs import lib, tslib, algos as _algos
62
+ from pandas.libs import lib, libts, libalgos, NaT, iNaT
63
63
from pandas.libs.lib import Timestamp, count_level_2d
64
64
65
65
_doc_template = """
@@ -1473,11 +1473,11 @@ def shift(self, periods=1, freq=None, axis=0):
1473
1473
1474
1474
# filled in by Cython
1475
1475
indexer = np.zeros_like(labels)
1476
- _algos .group_shift_indexer(indexer, labels, ngroups, periods)
1476
+ libalgos .group_shift_indexer(indexer, labels, ngroups, periods)
1477
1477
1478
1478
output = {}
1479
1479
for name, obj in self._iterate_slices():
1480
- output[name] = algos .take_nd(obj.values, indexer)
1480
+ output[name] = algorithms .take_nd(obj.values, indexer)
1481
1481
1482
1482
return self._wrap_transformed_output(output)
1483
1483
@@ -1814,13 +1814,13 @@ def _get_cython_function(self, kind, how, values, is_numeric):
1814
1814
def get_func(fname):
1815
1815
# see if there is a fused-type version of function
1816
1816
# only valid for numeric
1817
- f = getattr(_algos , fname, None)
1817
+ f = getattr(libalgos , fname, None)
1818
1818
if f is not None and is_numeric:
1819
1819
return f
1820
1820
1821
1821
# otherwise find dtype-specific version, falling back to object
1822
1822
for dt in [dtype_str, 'object']:
1823
- f = getattr(_algos , "%s_%s" % (fname, dtype_str), None)
1823
+ f = getattr(libalgos , "%s_%s" % (fname, dtype_str), None)
1824
1824
if f is not None:
1825
1825
return f
1826
1826
@@ -1900,7 +1900,7 @@ def _cython_operation(self, kind, values, how, axis):
1900
1900
elif is_integer_dtype(values):
1901
1901
# we use iNaT for the missing value on ints
1902
1902
# so pre-convert to guard this condition
1903
- if (values == tslib. iNaT).any():
1903
+ if (values == iNaT).any():
1904
1904
values = _ensure_float64(values)
1905
1905
else:
1906
1906
values = values.astype('int64', copy=False)
@@ -1942,7 +1942,7 @@ def _cython_operation(self, kind, values, how, axis):
1942
1942
result, values, labels, func, is_numeric, is_datetimelike)
1943
1943
1944
1944
if is_integer_dtype(result):
1945
- mask = result == tslib. iNaT
1945
+ mask = result == iNaT
1946
1946
if mask.any():
1947
1947
result = result.astype('float64')
1948
1948
result[mask] = np.nan
@@ -2033,7 +2033,7 @@ def _aggregate_series_fast(self, obj, func):
2033
2033
dummy = obj._get_values(slice(None, 0)).to_dense()
2034
2034
indexer = get_group_index_sorter(group_index, ngroups)
2035
2035
obj = obj.take(indexer, convert=False)
2036
- group_index = algos .take_nd(group_index, indexer, allow_fill=False)
2036
+ group_index = algorithms .take_nd(group_index, indexer, allow_fill=False)
2037
2037
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
2038
2038
dummy)
2039
2039
result, counts = grouper.get_result()
@@ -2131,7 +2131,7 @@ def groups(self):
2131
2131
# GH 3881
2132
2132
result = {}
2133
2133
for key, value in zip(self.binlabels, self.bins):
2134
- if key is not tslib. NaT:
2134
+ if key is not NaT:
2135
2135
result[key] = value
2136
2136
return result
2137
2137
@@ -2158,7 +2158,7 @@ def get_iterator(self, data, axis=0):
2158
2158
2159
2159
start = 0
2160
2160
for edge, label in zip(self.bins, self.binlabels):
2161
- if label is not tslib. NaT:
2161
+ if label is not NaT:
2162
2162
yield label, slicer(start, edge)
2163
2163
start = edge
2164
2164
@@ -2172,7 +2172,7 @@ def indices(self):
2172
2172
i = 0
2173
2173
for label, bin in zip(self.binlabels, self.bins):
2174
2174
if i < bin:
2175
- if label is not tslib. NaT:
2175
+ if label is not NaT:
2176
2176
indices[label] = list(range(i, bin))
2177
2177
i = bin
2178
2178
return indices
@@ -2382,7 +2382,7 @@ def group_index(self):
2382
2382
2383
2383
def _make_labels(self):
2384
2384
if self._labels is None or self._group_index is None:
2385
- labels, uniques = algos .factorize(self.grouper, sort=self.sort)
2385
+ labels, uniques = algorithms .factorize(self.grouper, sort=self.sort)
2386
2386
uniques = Index(uniques, name=self.name)
2387
2387
self._labels = labels
2388
2388
self._group_index = uniques
@@ -2927,7 +2927,7 @@ def _transform_fast(self, func):
2927
2927
2928
2928
ids, _, ngroup = self.grouper.group_info
2929
2929
cast = (self.size().fillna(0) > 0).any()
2930
- out = algos .take_1d(func().values, ids)
2930
+ out = algorithms .take_1d(func().values, ids)
2931
2931
if cast:
2932
2932
out = self._try_cast(out, self.obj)
2933
2933
return Series(out, index=self.obj.index, name=self.obj.name)
@@ -2984,7 +2984,7 @@ def nunique(self, dropna=True):
2984
2984
except TypeError: # catches object dtypes
2985
2985
assert val.dtype == object, \
2986
2986
'val.dtype must be object, got %s' % val.dtype
2987
- val, _ = algos .factorize(val, sort=False)
2987
+ val, _ = algorithms .factorize(val, sort=False)
2988
2988
sorter = np.lexsort((val, ids))
2989
2989
_isnull = lambda a: a == -1
2990
2990
else:
@@ -3068,7 +3068,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
3068
3068
ids, val = ids[mask], val[mask]
3069
3069
3070
3070
if bins is None:
3071
- lab, lev = algos .factorize(val, sort=True)
3071
+ lab, lev = algorithms .factorize(val, sort=True)
3072
3072
else:
3073
3073
cat, bins = cut(val, bins, retbins=True)
3074
3074
# bins[:-1] for backward compat;
@@ -3107,7 +3107,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
3107
3107
if dropna:
3108
3108
m = ids[lab == -1]
3109
3109
if _np_version_under1p8:
3110
- mi, ml = algos .factorize(m)
3110
+ mi, ml = algorithms .factorize(m)
3111
3111
d[ml] = d[ml] - np.bincount(mi)
3112
3112
else:
3113
3113
np.add.at(d, m, -1)
@@ -3129,7 +3129,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
3129
3129
out = _ensure_int64(out)
3130
3130
return Series(out, index=mi, name=self.name)
3131
3131
3132
- # for compat. with algos .value_counts need to ensure every
3132
+ # for compat. with libalgos .value_counts need to ensure every
3133
3133
# bin is present at every index level, null filled with zeros
3134
3134
diff = np.zeros(len(out), dtype='bool')
3135
3135
for lab in labels[:-1]:
@@ -3700,7 +3700,7 @@ def _transform_fast(self, result, obj):
3700
3700
ids, _, ngroup = self.grouper.group_info
3701
3701
output = []
3702
3702
for i, _ in enumerate(result.columns):
3703
- res = algos .take_1d(result.iloc[:, i].values, ids)
3703
+ res = algorithms .take_1d(result.iloc[:, i].values, ids)
3704
3704
if cast:
3705
3705
res = self._try_cast(res, obj.iloc[:, i])
3706
3706
output.append(res)
@@ -4188,7 +4188,7 @@ def __init__(self, data, labels, ngroups, axis=0):
4188
4188
@cache_readonly
4189
4189
def slabels(self):
4190
4190
# Sorted labels
4191
- return algos .take_nd(self.labels, self.sort_idx, allow_fill=False)
4191
+ return algorithms .take_nd(self.labels, self.sort_idx, allow_fill=False)
4192
4192
4193
4193
@cache_readonly
4194
4194
def sort_idx(self):
0 commit comments