Skip to content

Comma cleanup #36168

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 7 commits into from
Sep 7, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pandas/tests/indexing/test_iloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def test_is_scalar_access(self):
assert ser.iloc._is_scalar_access((1,))

df = ser.to_frame()
assert df.iloc._is_scalar_access((1, 0,))
assert df.iloc._is_scalar_access((1, 0))

def test_iloc_exceeds_bounds(self):

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/indexing/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1004,7 +1004,7 @@ def test_extension_array_cross_section():
def test_extension_array_cross_section_converts():
# all numeric columns -> numeric series
df = pd.DataFrame(
{"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"],
{"A": pd.array([1, 2], dtype="Int64"), "B": np.array([1, 2])}, index=["a", "b"]
)
result = df.loc["a"]
expected = pd.Series([1, 1], dtype="Int64", index=["A", "B"], name="a")
Expand Down
33 changes: 12 additions & 21 deletions pandas/tests/indexing/test_loc.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,13 +29,11 @@ def test_loc_getitem_label_out_of_range(self):

# out of range label
self.check_result(
"loc", "f", typs=["ints", "uints", "labels", "mixed", "ts"], fails=KeyError,
"loc", "f", typs=["ints", "uints", "labels", "mixed", "ts"], fails=KeyError
)
self.check_result("loc", "f", typs=["floats"], fails=KeyError)
self.check_result("loc", "f", typs=["floats"], fails=KeyError)
self.check_result(
"loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError,
)
self.check_result("loc", 20, typs=["ints", "uints", "mixed"], fails=KeyError)
self.check_result("loc", 20, typs=["labels"], fails=KeyError)
self.check_result("loc", 20, typs=["ts"], axes=0, fails=KeyError)
self.check_result("loc", 20, typs=["floats"], axes=0, fails=KeyError)
Expand All @@ -46,26 +44,24 @@ def test_loc_getitem_label_list(self):
pass

def test_loc_getitem_label_list_with_missing(self):
self.check_result("loc", [0, 1, 2], typs=["empty"], fails=KeyError)
self.check_result(
"loc", [0, 1, 2], typs=["empty"], fails=KeyError,
)
self.check_result(
"loc", [0, 2, 10], typs=["ints", "uints", "floats"], axes=0, fails=KeyError,
"loc", [0, 2, 10], typs=["ints", "uints", "floats"], axes=0, fails=KeyError
)

self.check_result(
"loc", [3, 6, 7], typs=["ints", "uints", "floats"], axes=1, fails=KeyError,
"loc", [3, 6, 7], typs=["ints", "uints", "floats"], axes=1, fails=KeyError
)

# GH 17758 - MultiIndex and missing keys
self.check_result(
"loc", [(1, 3), (1, 4), (2, 5)], typs=["multi"], axes=0, fails=KeyError,
"loc", [(1, 3), (1, 4), (2, 5)], typs=["multi"], axes=0, fails=KeyError
)

def test_loc_getitem_label_list_fails(self):
# fails
self.check_result(
"loc", [20, 30, 40], typs=["ints", "uints"], axes=1, fails=KeyError,
"loc", [20, 30, 40], typs=["ints", "uints"], axes=1, fails=KeyError
)

def test_loc_getitem_label_array_like(self):
Expand Down Expand Up @@ -95,18 +91,14 @@ def test_loc_getitem_label_slice(self):
)

self.check_result(
"loc", slice("20130102", "20130104"), typs=["ts"], axes=1, fails=TypeError,
"loc", slice("20130102", "20130104"), typs=["ts"], axes=1, fails=TypeError
)

self.check_result(
"loc", slice(2, 8), typs=["mixed"], axes=0, fails=TypeError,
)
self.check_result(
"loc", slice(2, 8), typs=["mixed"], axes=1, fails=KeyError,
)
self.check_result("loc", slice(2, 8), typs=["mixed"], axes=0, fails=TypeError)
self.check_result("loc", slice(2, 8), typs=["mixed"], axes=1, fails=KeyError)

self.check_result(
"loc", slice(2, 4, 2), typs=["mixed"], axes=0, fails=TypeError,
"loc", slice(2, 4, 2), typs=["mixed"], axes=0, fails=TypeError
)

def test_setitem_from_duplicate_axis(self):
Expand Down Expand Up @@ -669,8 +661,7 @@ def test_loc_setitem_with_scalar_index(self, indexer, value):
(1, ["A", "B", "C"]),
np.array([7, 8, 9], dtype=np.int64),
pd.DataFrame(
[[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]],
columns=["A", "B", "C"],
[[1, 2, np.nan], [7, 8, 9], [5, 6, np.nan]], columns=["A", "B", "C"]
),
),
(
Expand Down
8 changes: 4 additions & 4 deletions pandas/tests/internals/test_internals.py
Original file line number Diff line number Diff line change
Expand Up @@ -892,16 +892,16 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value):
fill_value,
)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value,
mgr, ax, mgr.axes[ax][::-1], np.arange(mgr.shape[ax]), fill_value
)
assert_reindex_indexer_is_ok(
mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value,
mgr, ax, mgr.axes[ax], np.arange(mgr.shape[ax])[::-1], fill_value
)
assert_reindex_indexer_is_ok(
mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 0, 0], fill_value
)
assert_reindex_indexer_is_ok(
mgr, ax, pd.Index(["foo", "bar", "baz"]), [-1, 0, -1], fill_value,
mgr, ax, pd.Index(["foo", "bar", "baz"]), [-1, 0, -1], fill_value
)
assert_reindex_indexer_is_ok(
mgr,
Expand All @@ -913,7 +913,7 @@ def assert_reindex_indexer_is_ok(mgr, axis, new_labels, indexer, fill_value):

if mgr.shape[ax] >= 3:
assert_reindex_indexer_is_ok(
mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 1, 2], fill_value,
mgr, ax, pd.Index(["foo", "bar", "baz"]), [0, 1, 2], fill_value
)


Expand Down
12 changes: 4 additions & 8 deletions pandas/tests/io/formats/test_css.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,11 +99,11 @@ def test_css_side_shorthands(shorthand, expansions):
top, right, bottom, left = expansions

assert_resolves(
f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"},
f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"}
)

assert_resolves(
f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"},
f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"}
)

assert_resolves(
Expand Down Expand Up @@ -189,9 +189,7 @@ def test_css_absolute_font_size(size, relative_to, resolved):
inherited = None
else:
inherited = {"font-size": relative_to}
assert_resolves(
f"font-size: {size}", {"font-size": resolved}, inherited=inherited,
)
assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)


@pytest.mark.parametrize(
Expand Down Expand Up @@ -225,6 +223,4 @@ def test_css_relative_font_size(size, relative_to, resolved):
inherited = None
else:
inherited = {"font-size": relative_to}
assert_resolves(
f"font-size: {size}", {"font-size": resolved}, inherited=inherited,
)
assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
12 changes: 5 additions & 7 deletions pandas/tests/io/formats/test_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ def test_info_memory_usage():
DataFrame(1, index=["a"], columns=["A"]).memory_usage(index=True)
DataFrame(1, index=["a"], columns=["A"]).index.nbytes
df = DataFrame(
data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"],
data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"]
)
df.index.nbytes
df.memory_usage(index=True)
Expand Down Expand Up @@ -336,7 +336,7 @@ def test_info_memory_usage_deep_pypy():
@pytest.mark.skipif(PYPY, reason="PyPy getsizeof() fails by design")
def test_usage_via_getsizeof():
df = DataFrame(
data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"],
data=1, index=MultiIndex.from_product([["a"], range(1000)]), columns=["A"]
)
mem = df.memory_usage(deep=True).sum()
# sys.getsizeof will call the .memory_usage with
Expand All @@ -359,16 +359,14 @@ def test_info_memory_usage_qualified():

buf = StringIO()
df = DataFrame(
1, columns=list("ab"), index=MultiIndex.from_product([range(3), range(3)]),
1, columns=list("ab"), index=MultiIndex.from_product([range(3), range(3)])
)
df.info(buf=buf)
assert "+" not in buf.getvalue()

buf = StringIO()
df = DataFrame(
1,
columns=list("ab"),
index=MultiIndex.from_product([range(3), ["foo", "bar"]]),
1, columns=list("ab"), index=MultiIndex.from_product([range(3), ["foo", "bar"]])
)
df.info(buf=buf)
assert "+" in buf.getvalue()
Expand All @@ -384,7 +382,7 @@ def memory_usage(f):
N = 100
M = len(uppercase)
index = MultiIndex.from_product(
[list(uppercase), date_range("20160101", periods=N)], names=["id", "date"],
[list(uppercase), date_range("20160101", periods=N)], names=["id", "date"]
)
df = DataFrame({"value": np.random.randn(N * M)}, index=index)

Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/json/test_compression.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def test_with_s3_url(compression, s3_resource, s3so):
s3_resource.Bucket("pandas-test").put_object(Key="test-1", Body=f)

roundtripped_df = pd.read_json(
"s3://pandas-test/test-1", compression=compression, storage_options=s3so,
"s3://pandas-test/test-1", compression=compression, storage_options=s3so
)
tm.assert_frame_equal(df, roundtripped_df)

Expand Down
10 changes: 2 additions & 8 deletions pandas/tests/io/json/test_pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -745,11 +745,7 @@ def test_reconstruction_index(self):

def test_path(self, float_frame, int_frame, datetime_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
int_frame,
datetime_frame,
]:
for df in [float_frame, int_frame, datetime_frame]:
df.to_json(path)
read_json(path)

Expand Down Expand Up @@ -1706,9 +1702,7 @@ def test_to_s3(self, s3_resource, s3so):
# GH 28375
mock_bucket_name, target_file = "pandas-test", "test.json"
df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})
df.to_json(
f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so,
)
df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)
timeout = 5
while True:
if target_file in (
Expand Down
4 changes: 1 addition & 3 deletions pandas/tests/io/parser/test_c_parser_only.py
Original file line number Diff line number Diff line change
Expand Up @@ -646,9 +646,7 @@ def test_1000_sep_with_decimal(
tm.assert_frame_equal(result, expected)


@pytest.mark.parametrize(
"float_precision", [None, "high", "round_trip"],
)
@pytest.mark.parametrize("float_precision", [None, "high", "round_trip"])
@pytest.mark.parametrize(
"value,expected",
[
Expand Down
4 changes: 2 additions & 2 deletions pandas/tests/io/parser/test_parse_dates.py
Original file line number Diff line number Diff line change
Expand Up @@ -1439,7 +1439,7 @@ def test_parse_timezone(all_parsers):
end="2018-01-04 09:05:00",
freq="1min",
tz=pytz.FixedOffset(540),
),
)
),
freq=None,
)
Expand Down Expand Up @@ -1553,5 +1553,5 @@ def test_missing_parse_dates_column_raises(
msg = f"Missing column provided to 'parse_dates': '{missing_cols}'"
with pytest.raises(ValueError, match=msg):
parser.read_csv(
content, sep=",", names=names, usecols=usecols, parse_dates=parse_dates,
content, sep=",", names=names, usecols=usecols, parse_dates=parse_dates
)
2 changes: 1 addition & 1 deletion pandas/tests/io/parser/test_usecols.py
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ def test_usecols_with_whitespace(all_parsers):
# Column selection by index.
([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
# Column selection by name.
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"]),),
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])),
],
)
def test_usecols_with_integer_like_header(all_parsers, usecols, expected):
Expand Down