Skip to content

Commit 15cdc2b

Browse files
committed
also on io
1 parent 41e0fed commit 15cdc2b

File tree

2 files changed

+6
-5
lines changed

2 files changed

+6
-5
lines changed

python/datafusion/context.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1120,7 +1120,7 @@ def read_avro(
11201120
self,
11211121
path: str | pathlib.Path,
11221122
schema: pa.Schema | None = None,
1123-
file_partition_cols: list[tuple[str, str]] | None = None,
1123+
file_partition_cols: list[tuple[str, str | pa.DataType]] | None = None,
11241124
file_extension: str = ".avro",
11251125
) -> DataFrame:
11261126
"""Create a :py:class:`DataFrame` for reading Avro data source.
@@ -1136,6 +1136,7 @@ def read_avro(
11361136
"""
11371137
if file_partition_cols is None:
11381138
file_partition_cols = []
1139+
file_partition_cols = self._convert_table_partition_cols(file_partition_cols)
11391140
return DataFrame(
11401141
self.ctx.read_avro(str(path), schema, file_partition_cols, file_extension)
11411142
)

python/datafusion/io.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434

3535
def read_parquet(
3636
path: str | pathlib.Path,
37-
table_partition_cols: list[tuple[str, pa.DataType]] | None = None,
37+
table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None,
3838
parquet_pruning: bool = True,
3939
file_extension: str = ".parquet",
4040
skip_metadata: bool = True,
@@ -83,7 +83,7 @@ def read_json(
8383
schema: pa.Schema | None = None,
8484
schema_infer_max_records: int = 1000,
8585
file_extension: str = ".json",
86-
table_partition_cols: list[tuple[str, pa.DataType]] | None = None,
86+
table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None,
8787
file_compression_type: str | None = None,
8888
) -> DataFrame:
8989
"""Read a line-delimited JSON data source.
@@ -124,7 +124,7 @@ def read_csv(
124124
delimiter: str = ",",
125125
schema_infer_max_records: int = 1000,
126126
file_extension: str = ".csv",
127-
table_partition_cols: list[tuple[str, pa.DataType]] | None = None,
127+
table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None,
128128
file_compression_type: str | None = None,
129129
) -> DataFrame:
130130
"""Read a CSV data source.
@@ -171,7 +171,7 @@ def read_csv(
171171
def read_avro(
172172
path: str | pathlib.Path,
173173
schema: pa.Schema | None = None,
174-
file_partition_cols: list[tuple[str, str]] | None = None,
174+
file_partition_cols: list[tuple[str, str | pa.DataType]] | None = None,
175175
file_extension: str = ".avro",
176176
) -> DataFrame:
177177
"""Create a :py:class:`DataFrame` for reading Avro data source.

0 commit comments

Comments
 (0)