问题
I would like to convert this code:
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.parquet.column.page.PageReadStore;
import org.apache.parquet.example.data.simple.SimpleGroup;
import org.apache.parquet.example.data.simple.convert.GroupRecordConverter;
import org.apache.parquet.hadoop.ParquetFileReader;
import org.apache.parquet.hadoop.util.HadoopInputFile;
import org.apache.parquet.io.ColumnIOFactory;
import org.apache.parquet.io.MessageColumnIO;
import org.apache.parquet.io.RecordReader;
import org.apache.parquet.schema.MessageType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
public class ParquetReaderUtils {
public static Parquet getParquetData(String filePath) throws IOException {
List<SimpleGroup> simpleGroups = new ArrayList<>();
ParquetFileReader reader = ParquetFileReader.open(HadoopInputFile.fromPath(new Path(filePath), new Configuration()));
MessageType schema = reader.getFooter().getFileMetaData().getSchema();
//List<Type> fields = schema.getFields();
PageReadStore pages;
while ((pages = reader.readNextRowGroup()) != null) {
long rows = pages.getRowCount();
MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema);
RecordReader recordReader = columnIO.getRecordReader(pages, new GroupRecordConverter(schema));
for (int i = 0; i < rows; i++) {
SimpleGroup simpleGroup = (SimpleGroup) recordReader.read();
simpleGroups.add(simpleGroup);
}
}
reader.close();
return new Parquet(simpleGroups, schema);
}
}
(which is from https://www.arm64.ca/post/reading-parquet-files-java/)
to take a ByteArrayOutputStream parameter instead of a filePath.
Is this possible? I don't see a ParquetStreamReader in org.apache.parquet.hadoop.
Any help is appreciated. I am trying to write a test app for parquet coming from kafka and writing each of many messages out to a file is rather slow.
回答1:
So without deeper testing, I would try with this class (albeit the content of the outputstream should be parquet-compatible). I put there a streamId to make the identification of the processed bytearray easier (the ParquetFileReader prints the instance.toString() out if something went wrong).
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import org.apache.parquet.io.DelegatingSeekableInputStream;
import org.apache.parquet.io.InputFile;
import org.apache.parquet.io.SeekableInputStream;
public class ParquetStream implements InputFile
{
private final String streamId;
private final byte[] data;
public class SeekableByteArrayInputStream extends ByteArrayInputStream
{
public SeekableByteArrayInputStream(byte[] buf)
{
super(buf);
}
public void setPos(int pos)
{
this.pos = pos;
}
public int getPos()
{
return this.pos;
}
}
public ParquetStream(String streamId, ByteArrayOutputStream stream)
{
this.streamId = streamId;
this.data = stream.toByteArray();
}
@Override
public long getLength() throws IOException
{
return this.data.length;
}
@Override
public SeekableInputStream newStream() throws IOException
{
return new DelegatingSeekableInputStream(new SeekableByteArrayInputStream(this.data))
{
@Override
public void seek(long newPos) throws IOException
{
((SeekableByteArrayInputStream) this.getStream()).setPos(new Long(newPos).intValue());
}
@Override
public long getPos() throws IOException
{
return new Integer(((SeekableByteArrayInputStream) this.getStream()).getPos()).longValue();
}
};
}
@Override
public String toString()
{
return new StringBuilder("ParquetStream[").append(streamId).append("]").toString();
}
}
来源:https://stackoverflow.com/questions/58141248/read-parquet-data-from-bytearrayoutputstream-instead-of-file