Skip to content

BUG: Fix FastParquetImpl.write for non-existent file #28326

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Sep 19, 2019
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions doc/source/whatsnew/v1.0.0.rst
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ I/O
- Bug in :meth:`DataFrame.to_csv` where values were truncated when the length of ``na_rep`` was shorter than the text input data. (:issue:`25099`)
- Bug in :func:`DataFrame.to_string` where values were truncated using display options instead of outputting the full content (:issue:`9784`)
- Bug in :meth:`DataFrame.to_json` where a datetime column label would not be written out in ISO format with ``orient="table"`` (:issue:`28130`)
- Bug in :func:`DataFrame.to_parquet` where writing to GCS would fail with `engine='fastparquet'` if the file did not already exist (:issue:`28326`)

Plotting
^^^^^^^^
Expand Down
8 changes: 4 additions & 4 deletions pandas/io/parquet.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

from pandas import DataFrame, get_option

from pandas.io.common import get_filepath_or_buffer, is_s3_url
from pandas.io.common import get_filepath_or_buffer, is_gcs_url, is_s3_url


def get_engine(engine):
Expand Down Expand Up @@ -159,12 +159,12 @@ def write(
if partition_cols is not None:
kwargs["file_scheme"] = "hive"

if is_s3_url(path):
# path is s3:// so we need to open the s3file in 'wb' mode.
if is_s3_url(path) or is_gcs_url(path):
# if path is s3:// or gs:// we need to open the file in 'wb' mode.
# TODO: Support 'ab'

path, _, _, _ = get_filepath_or_buffer(path, mode="wb")
# And pass the opened s3file to the fastparquet internal impl.
# And pass the opened file to the fastparquet internal impl.
kwargs["open_with"] = lambda path, _: path
else:
path, _, _, _ = get_filepath_or_buffer(path)
Expand Down
26 changes: 26 additions & 0 deletions pandas/tests/io/test_gcs.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import os
from io import StringIO

import numpy as np
Expand Down Expand Up @@ -60,6 +61,31 @@ def open(*args):
assert_frame_equal(df1, df2)


@td.skip_if_no("fastparquet")
@td.skip_if_no("gcsfs")
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)

class MockGCSFileSystem:
def open(self, path, mode="r", *args):
if "w" not in mode:
raise FileNotFoundError
return open(os.path.join(tmpdir, "test.parquet"), mode)

monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_parquet(
"gs://test/test.csv", index=True, engine="fastparquet", compression=None
)


@td.skip_if_no("gcsfs")
def test_gcs_get_filepath_or_buffer(monkeypatch):
df1 = DataFrame(
Expand Down