本文整理了Java中org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.getPartitionColumnCount()
方法的一些代码示例,展示了VectorizedRowBatchCtx.getPartitionColumnCount()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。VectorizedRowBatchCtx.getPartitionColumnCount()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx
类名称:VectorizedRowBatchCtx
方法名:getPartitionColumnCount
暂无
代码示例来源:origin: apache/hive
private void initPartitionValues(FileSplit fileSplit, JobConf conf) throws IOException {
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, fileSplit, partitionValues);
} else {
partitionValues = null;
}
}
代码示例来源:origin: apache/hive
@Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumnCount",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public int getPartitionColumnCount() {
return vectorizedRowBatchCtx.getPartitionColumnCount();
}
代码示例来源:origin: apache/drill
@Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumnCount", explainLevels = { Level.DEFAULT, Level.EXTENDED })
public int getPartitionColumnCount() {
return vectorizedRowBatchCtx.getPartitionColumnCount();
}
代码示例来源:origin: apache/drill
private void initPartitionValues(FileSplit fileSplit, JobConf conf) throws IOException {
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
rbCtx.getPartitionValues(rbCtx, conf, fileSplit, partitionValues);
} else {
partitionValues = null;
}
}
代码示例来源:origin: apache/drill
public void debugDisplayAllMaps(BaseWork work) {
VectorizedRowBatchCtx vectorizedRowBatchCtx = work.getVectorizedRowBatchCtx();
String[] allColumnNames = vectorizedRowBatchCtx.getRowColumnNames();
Object columnTypeInfos = vectorizedRowBatchCtx.getRowColumnTypeInfos();
int partitionColumnCount = vectorizedRowBatchCtx.getPartitionColumnCount();
String[] scratchColumnTypeNames =vectorizedRowBatchCtx.getScratchColumnTypeNames();
LOG.debug("debugDisplayAllMaps allColumnNames " + Arrays.toString(allColumnNames));
LOG.debug("debugDisplayAllMaps columnTypeInfos " + Arrays.deepToString((Object[]) columnTypeInfos));
LOG.debug("debugDisplayAllMaps partitionColumnCount " + partitionColumnCount);
LOG.debug("debugDisplayAllMaps scratchColumnTypeNames " + Arrays.toString(scratchColumnTypeNames));
}
}
代码示例来源:origin: apache/hive
public NullRowsRecordReader(Configuration conf, InputSplit split) throws IOException {
boolean isVectorMode = Utilities.getIsVectorized(conf);
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + " in "
+ (isVectorMode ? "" : "non-") + "vector mode");
}
if (isVectorMode) {
rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, (FileSplit)split, partitionValues);
} else {
partitionValues = null;
}
} else {
rbCtx = null;
partitionValues = null;
}
}
代码示例来源:origin: apache/drill
public NullRowsRecordReader(Configuration conf, InputSplit split) throws IOException {
boolean isVectorMode = Utilities.getUseVectorizedInputFileFormat(conf);
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + " in "
+ (isVectorMode ? "" : "non-") + "vector mode");
}
if (isVectorMode) {
rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, (FileSplit)split, partitionValues);
} else {
partitionValues = null;
}
} else {
rbCtx = null;
partitionValues = null;
}
}
代码示例来源:origin: apache/hive
@Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumns",
explainLevels = { Level.DEFAULT, Level.EXTENDED })
public List<String> getPartitionColumns() {
return getColumns(
vectorizedRowBatchCtx,
vectorizedRowBatchCtx.getDataColumnCount(),
vectorizedRowBatchCtx.getPartitionColumnCount());
}
代码示例来源:origin: apache/drill
@Explain(vectorization = Vectorization.DETAIL, displayName = "partitionColumns", explainLevels = { Level.DEFAULT, Level.EXTENDED })
public List<String> getPartitionColumns() {
return getColumns(vectorizedRowBatchCtx.getDataColumnCount(), vectorizedRowBatchCtx.getPartitionColumnCount());
}
代码示例来源:origin: apache/hive
LOG.debug(name + " includeColumns: " + Arrays.toString(dataColumnNums));
LOG.debug(name + " partitionColumnCount: " + batchContext.getPartitionColumnCount());
LOG.debug(name + " dataColumns: " +
BaseWork.BaseExplainVectorization.getColumns(
代码示例来源:origin: apache/drill
VectorizedOrcAcidRowReader(AcidInputFormat.RowReader<OrcStruct> inner,
Configuration conf,
VectorizedRowBatchCtx vectorizedRowBatchCtx,
FileSplit split) throws IOException {
this.innerReader = inner;
this.key = inner.createKey();
rbCtx = vectorizedRowBatchCtx;
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
rbCtx.getPartitionValues(rbCtx, conf, split, partitionValues);
}
this.value = inner.createValue();
this.objectInspector = inner.getObjectInspector();
}
代码示例来源:origin: apache/hive
partitionColumnCount = batchContext.getPartitionColumnCount();
partitionValues = new Object[partitionColumnCount];
virtualColumnCount = batchContext.getVirtualColumnCount();
代码示例来源:origin: apache/drill
partitionColumnCount = batchContext.getPartitionColumnCount();
partitionValues = new Object[partitionColumnCount];
代码示例来源:origin: apache/hive
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
代码示例来源:origin: apache/hive
if (batchContext.getPartitionColumnCount() > 0) {
代码示例来源:origin: apache/hive
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
代码示例来源:origin: apache/drill
int partitionColumnCount = (rbCtx != null) ? rbCtx.getPartitionColumnCount() : 0;
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
代码示例来源:origin: apache/drill
int partitionColumnCount = rbCtx.getPartitionColumnCount();
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
代码示例来源:origin: apache/hive
this.length = orcSplit.getLength();
int partitionColumnCount = (rbCtx != null) ? rbCtx.getPartitionColumnCount() : 0;
if (partitionColumnCount > 0) {
partitionValues = new Object[partitionColumnCount];
代码示例来源:origin: apache/drill
if (batchContext.getPartitionColumnCount() > 0) {
内容来源于网络,如有侵权,请联系作者删除!