public class IcebergArrowColumnVector
extends org.apache.spark.sql.vectorized.ColumnVector
ColumnVector
interface. The code for this class is heavily
inspired from Spark's ArrowColumnVector
The main difference is in how nullability checks
are made in this class by relying on NullabilityHolder
instead of the validity vector in
the Arrow vector.Constructor and Description |
---|
IcebergArrowColumnVector(VectorHolder holder) |
Modifier and Type | Method and Description |
---|---|
protected ArrowVectorAccessor<org.apache.spark.sql.types.Decimal,org.apache.spark.unsafe.types.UTF8String,org.apache.spark.sql.vectorized.ColumnarArray,org.apache.spark.sql.vectorized.ArrowColumnVector> |
accessor() |
void |
close() |
org.apache.spark.sql.vectorized.ColumnarArray |
getArray(int rowId) |
byte[] |
getBinary(int rowId) |
boolean |
getBoolean(int rowId) |
byte |
getByte(int rowId) |
org.apache.spark.sql.vectorized.ArrowColumnVector |
getChild(int ordinal) |
org.apache.spark.sql.types.Decimal |
getDecimal(int rowId,
int precision,
int scale) |
double |
getDouble(int rowId) |
float |
getFloat(int rowId) |
int |
getInt(int rowId) |
long |
getLong(int rowId) |
org.apache.spark.sql.vectorized.ColumnarMap |
getMap(int rowId) |
short |
getShort(int rowId) |
org.apache.spark.unsafe.types.UTF8String |
getUTF8String(int rowId) |
boolean |
hasNull() |
boolean |
isNullAt(int rowId) |
protected NullabilityHolder |
nullabilityHolder() |
int |
numNulls() |
ArrowVectorAccessor<org.apache.spark.sql.types.Decimal,org.apache.spark.unsafe.types.UTF8String,org.apache.spark.sql.vectorized.ColumnarArray,org.apache.spark.sql.vectorized.ArrowColumnVector> |
vectorAccessor() |
public IcebergArrowColumnVector(VectorHolder holder)
protected ArrowVectorAccessor<org.apache.spark.sql.types.Decimal,org.apache.spark.unsafe.types.UTF8String,org.apache.spark.sql.vectorized.ColumnarArray,org.apache.spark.sql.vectorized.ArrowColumnVector> accessor()
protected NullabilityHolder nullabilityHolder()
public void close()
close
in interface java.lang.AutoCloseable
close
in class org.apache.spark.sql.vectorized.ColumnVector
public boolean hasNull()
hasNull
in class org.apache.spark.sql.vectorized.ColumnVector
public int numNulls()
numNulls
in class org.apache.spark.sql.vectorized.ColumnVector
public boolean isNullAt(int rowId)
isNullAt
in class org.apache.spark.sql.vectorized.ColumnVector
public boolean getBoolean(int rowId)
getBoolean
in class org.apache.spark.sql.vectorized.ColumnVector
public byte getByte(int rowId)
getByte
in class org.apache.spark.sql.vectorized.ColumnVector
public short getShort(int rowId)
getShort
in class org.apache.spark.sql.vectorized.ColumnVector
public int getInt(int rowId)
getInt
in class org.apache.spark.sql.vectorized.ColumnVector
public long getLong(int rowId)
getLong
in class org.apache.spark.sql.vectorized.ColumnVector
public float getFloat(int rowId)
getFloat
in class org.apache.spark.sql.vectorized.ColumnVector
public double getDouble(int rowId)
getDouble
in class org.apache.spark.sql.vectorized.ColumnVector
public org.apache.spark.sql.vectorized.ColumnarArray getArray(int rowId)
getArray
in class org.apache.spark.sql.vectorized.ColumnVector
public org.apache.spark.sql.vectorized.ColumnarMap getMap(int rowId)
getMap
in class org.apache.spark.sql.vectorized.ColumnVector
public org.apache.spark.sql.types.Decimal getDecimal(int rowId, int precision, int scale)
getDecimal
in class org.apache.spark.sql.vectorized.ColumnVector
public org.apache.spark.unsafe.types.UTF8String getUTF8String(int rowId)
getUTF8String
in class org.apache.spark.sql.vectorized.ColumnVector
public byte[] getBinary(int rowId)
getBinary
in class org.apache.spark.sql.vectorized.ColumnVector
public org.apache.spark.sql.vectorized.ArrowColumnVector getChild(int ordinal)
getChild
in class org.apache.spark.sql.vectorized.ColumnVector
public ArrowVectorAccessor<org.apache.spark.sql.types.Decimal,org.apache.spark.unsafe.types.UTF8String,org.apache.spark.sql.vectorized.ColumnarArray,org.apache.spark.sql.vectorized.ArrowColumnVector> vectorAccessor()