Skip to content

Commit 85acf78

Browse files
author
MomIsBestFriend
committed
Some code cleanups
1 parent 4b142ef commit 85acf78

File tree

5 files changed

+18
-36
lines changed

5 files changed

+18
-36
lines changed

pandas/core/internals/blocks.py

Lines changed: 5 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -85,8 +85,6 @@
8585
import pandas.core.missing as missing
8686
from pandas.core.nanops import nanpercentile
8787

88-
from pandas.io.formats.printing import pprint_thing
89-
9088

9189
class Block(PandasObject):
9290
"""
@@ -159,7 +157,8 @@ def _check_ndim(self, values, ndim):
159157

160158
@property
161159
def _holder(self):
162-
"""The array-like that can hold the underlying values.
160+
"""
161+
The array-like that can hold the underlying values.
163162
164163
None for 'Block', overridden by subclasses that don't
165164
use an ndarray.
@@ -284,16 +283,10 @@ def __repr__(self) -> str:
284283
# don't want to print out all of the items here
285284
name = type(self).__name__
286285
if self._is_single_block:
287-
288286
result = f"{name}: {len(self)} dtype: {self.dtype}"
289-
290287
else:
291-
292-
shape = " x ".join(pprint_thing(s) for s in self.shape)
293-
result = (
294-
f"{name}: {pprint_thing(self.mgr_locs.indexer)}, "
295-
f"{shape}, dtype: {self.dtype}"
296-
)
288+
shape = " x ".join(str(s) for s in self.shape)
289+
result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}"
297290

298291
return result
299292

@@ -319,10 +312,7 @@ def getitem_block(self, slicer, new_mgr_locs=None):
319312
As of now, only supports slices that preserve dimensionality.
320313
"""
321314
if new_mgr_locs is None:
322-
if isinstance(slicer, tuple):
323-
axis0_slicer = slicer[0]
324-
else:
325-
axis0_slicer = slicer
315+
axis0_slicer = slicer[0] if isinstance(slicer, tuple) else slicer
326316
new_mgr_locs = self.mgr_locs[axis0_slicer]
327317

328318
new_values = self._slice(slicer)

pandas/core/internals/concat.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -204,10 +204,9 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
204204
missing_arr.fill(fill_value)
205205
return missing_arr
206206

207-
if not self.indexers:
208-
if not self.block._can_consolidate:
209-
# preserve these for validation in concat_compat
210-
return self.block.values
207+
if not self.indexers and (not self.block._can_consolidate):
208+
# preserve these for validation in concat_compat
209+
return self.block.values
211210

212211
if self.block.is_bool and not self.block.is_categorical:
213212
# External code requested filling/upcasting, bool values must
@@ -372,7 +371,7 @@ def _get_empty_dtype_and_na(join_units):
372371
raise AssertionError(msg)
373372

374373

375-
def is_uniform_join_units(join_units):
374+
def is_uniform_join_units(join_units) -> bool:
376375
"""
377376
Check if the join units consist of blocks of uniform type that can
378377
be concatenated using Block.concat_same_type instead of the generic

pandas/core/internals/managers.py

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -516,11 +516,9 @@ def get_axe(block, qs, axes):
516516
if len(self.blocks) > 1:
517517

518518
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
519-
i = 0
520519
for b in self.blocks:
521-
for j in b.mgr_locs:
520+
for i, j in enumerate(b.mgr_locs):
522521
indexer[j] = i
523-
i = i + 1
524522

525523
values = values.take(indexer)
526524

@@ -589,7 +587,7 @@ def comp(s, regex=False):
589587
)
590588
return _compare_or_regex_search(values, s, regex)
591589

592-
masks = [comp(s, regex) for i, s in enumerate(src_list)]
590+
masks = [comp(s, regex) for s in src_list]
593591

594592
result_blocks = []
595593
src_len = len(src_list) - 1
@@ -755,10 +753,7 @@ def copy(self, deep=True):
755753
# hit in e.g. tests.io.json.test_pandas
756754

757755
def copy_func(ax):
758-
if deep == "all":
759-
return ax.copy(deep=True)
760-
else:
761-
return ax.view()
756+
return ax.copy(deep=True) if deep == "all" else ax.view()
762757

763758
new_axes = [copy_func(ax) for ax in self.axes]
764759
else:

pandas/io/parsers.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1492,11 +1492,10 @@ def extract(r):
14921492
# level, then our header was too long.
14931493
for n in range(len(columns[0])):
14941494
if all(ensure_str(col[n]) in self.unnamed_cols for col in columns):
1495+
header = ",".join(str(x) for x in self.header)
14951496
raise ParserError(
1496-
"Passed header=[{header}] are too many rows for this "
1497-
"multi_index of columns".format(
1498-
header=",".join(str(x) for x in self.header)
1499-
)
1497+
f"Passed header=[{header}] are too many rows "
1498+
"for this multi_index of columns"
15001499
)
15011500

15021501
# Clean the column names (if we have an index_col).

pandas/io/pytables.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3085,9 +3085,8 @@ def write(self, obj, **kwargs):
30853085

30863086
self.attrs.ndim = data.ndim
30873087
for i, ax in enumerate(data.axes):
3088-
if i == 0:
3089-
if not ax.is_unique:
3090-
raise ValueError("Columns index has to be unique for fixed format")
3088+
if i == 0 and (not ax.is_unique):
3089+
raise ValueError("Columns index has to be unique for fixed format")
30913090
self.write_index(f"axis{i}", ax)
30923091

30933092
# Supporting mixed-type DataFrame objects...nontrivial
@@ -4216,7 +4215,7 @@ def write_data(self, chunksize: Optional[int], dropna: bool = False):
42164215
chunksize = 100000
42174216

42184217
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
4219-
chunks = int(nrows / chunksize) + 1
4218+
chunks = nrows // chunksize + 1
42204219
for i in range(chunks):
42214220
start_i = i * chunksize
42224221
end_i = min((i + 1) * chunksize, nrows)

0 commit comments

Comments
 (0)