本文整理了Java中org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.getRowColumnTypeInfos()
方法的一些代码示例,展示了VectorizedRowBatchCtx.getRowColumnTypeInfos()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。VectorizedRowBatchCtx.getRowColumnTypeInfos()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx
类名称:VectorizedRowBatchCtx
方法名:getRowColumnTypeInfos
暂无
代码示例来源:origin: apache/drill
private List<String> getColumns(int startIndex, int count) {
String[] rowColumnNames = vectorizedRowBatchCtx.getRowColumnNames();
TypeInfo[] rowColumnTypeInfos = vectorizedRowBatchCtx.getRowColumnTypeInfos();
List<String> result = new ArrayList<String>(count);
final int end = startIndex + count;
for (int i = startIndex; i < end; i++) {
result.add(rowColumnNames[i] + ":" + rowColumnTypeInfos[i]);
}
return result;
}
代码示例来源:origin: apache/drill
public void debugDisplayAllMaps(BaseWork work) {
VectorizedRowBatchCtx vectorizedRowBatchCtx = work.getVectorizedRowBatchCtx();
String[] allColumnNames = vectorizedRowBatchCtx.getRowColumnNames();
Object columnTypeInfos = vectorizedRowBatchCtx.getRowColumnTypeInfos();
int partitionColumnCount = vectorizedRowBatchCtx.getPartitionColumnCount();
String[] scratchColumnTypeNames =vectorizedRowBatchCtx.getScratchColumnTypeNames();
LOG.debug("debugDisplayAllMaps allColumnNames " + Arrays.toString(allColumnNames));
LOG.debug("debugDisplayAllMaps columnTypeInfos " + Arrays.deepToString((Object[]) columnTypeInfos));
LOG.debug("debugDisplayAllMaps partitionColumnCount " + partitionColumnCount);
LOG.debug("debugDisplayAllMaps scratchColumnTypeNames " + Arrays.toString(scratchColumnTypeNames));
}
}
代码示例来源:origin: apache/hive
tableIncludedCols = new ArrayList<>(rbCtx.getRowColumnTypeInfos().length);
for (int i = 0; i < rbCtx.getRowColumnTypeInfos().length; ++i) {
tableIncludedCols.add(i);
代码示例来源:origin: apache/hive
public static List<String> getColumns(VectorizedRowBatchCtx vectorizedRowBatchCtx,
int startIndex, int count) {
String[] rowColumnNames = vectorizedRowBatchCtx.getRowColumnNames();
TypeInfo[] rowColumnTypeInfos = vectorizedRowBatchCtx.getRowColumnTypeInfos();
DataTypePhysicalVariation[] dataTypePhysicalVariations =
vectorizedRowBatchCtx.getRowdataTypePhysicalVariations();
List<String> result = new ArrayList<String>(count);
final int end = startIndex + count;
for (int i = startIndex; i < end; i++) {
String displayString = rowColumnNames[i] + ":" + rowColumnTypeInfos[i];
if (dataTypePhysicalVariations != null &&
dataTypePhysicalVariations[i] != DataTypePhysicalVariation.NONE) {
displayString += "/" + dataTypePhysicalVariations[i].toString();
}
result.add(displayString);
}
return result;
}
代码示例来源:origin: apache/hive
public BatchToRowReader(RecordReader<NullWritable, VectorizedRowBatch> vrbReader,
VectorizedRowBatchCtx vrbCtx, List<Integer> includedCols) {
this.vrbReader = vrbReader;
this.key = vrbReader.createKey();
this.batch = vrbReader.createValue();
this.schema = Lists.<TypeInfo>newArrayList(vrbCtx.getRowColumnTypeInfos());
// TODO: does this include partition columns?
boolean[] included = new boolean[schema.size()];
if (includedCols != null) {
for (int colIx : includedCols) {
included[colIx] = true;
}
} else {
Arrays.fill(included, true);
}
// Create struct for ROW__ID virtual column and extract index
this.rowIdIdx = vrbCtx.findVirtualColumnNum(VirtualColumn.ROWID);
if (this.rowIdIdx >= 0) {
included[rowIdIdx] = true;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Including the columns " + DebugUtils.toString(included));
}
this.included = included;
}
代码示例来源:origin: apache/drill
public BatchToRowReader(RecordReader<NullWritable, VectorizedRowBatch> vrbReader,
VectorizedRowBatchCtx vrbCtx, List<Integer> includedCols) {
this.vrbReader = vrbReader;
this.key = vrbReader.createKey();
this.batch = vrbReader.createValue();
this.schema = Lists.<TypeInfo>newArrayList(vrbCtx.getRowColumnTypeInfos());
// TODO: does this include partition columns?
boolean[] included = new boolean[schema.size()];
if (includedCols != null) {
for (int colIx : includedCols) {
included[colIx] = true;
}
} else {
Arrays.fill(included, true);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Including the columns " + DebugUtils.toString(included));
}
this.included = included;
}
代码示例来源:origin: apache/hive
this.cacheIncludes = cacheIncludes;
this.sourceBatch = vrbCtx.createVectorizedRowBatch();
deserializeRead = new LazySimpleDeserializeRead(vrbCtx.getRowColumnTypeInfos(),
vrbCtx.getRowdataTypePhysicalVariations(),/* useExternalBuffer */ true, createSerdeParams(conf, tblProps));
vectorDeserializeRow = new VectorDeserializeRow<LazySimpleDeserializeRead>(deserializeRead);
int colCount = vrbCtx.getRowColumnTypeInfos().length;
boolean[] includes = null;
this.usesSourceIncludes = sourceIncludes.size() < colCount;
代码示例来源:origin: apache/hive
final TypeInfo[] rowColumnTypeInfos = batchContext.getRowColumnTypeInfos();
tableStructTypeInfo =
TypeInfoFactory.getStructTypeInfo(
TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(tableStructTypeInfo);
tableRowTypeInfos = batchContext.getRowColumnTypeInfos();
代码示例来源:origin: apache/hive
public DruidVectorizedWrapper(DruidQueryRecordReader reader, Configuration jobConf) {
this.rbCtx = Utilities.getVectorizedRowBatchCtx(jobConf);
if (rbCtx.getDataColumnNums() != null) {
projectedColumns = rbCtx.getDataColumnNums();
} else {
// case all the columns are selected
projectedColumns = new int[rbCtx.getRowColumnTypeInfos().length];
for (int i = 0; i < projectedColumns.length; i++) {
projectedColumns[i] = i;
}
}
this.serDe = createAndInitializeSerde(jobConf);
this.baseReader = Preconditions.checkNotNull(reader);
// row parser and row assigner initializing
try {
vectorAssignRow.init((StructObjectInspector) serDe.getObjectInspector());
} catch (HiveException e) {
throw new RuntimeException(e);
}
druidWritable = baseReader.createValue();
rowBoat = new Object[rbCtx.getDataColumnCount()];
}
代码示例来源:origin: apache/drill
final TypeInfo[] rowColumnTypeInfos = batchContext.getRowColumnTypeInfos();
tableStructTypeInfo =
TypeInfoFactory.getStructTypeInfo(
TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(tableStructTypeInfo);
tableRowTypeInfos = batchContext.getRowColumnTypeInfos();
代码示例来源:origin: apache/hive
} else {
projectedColumns = new int[rbCtx.getRowColumnTypeInfos().length];
for (int i = 0; i < projectedColumns.length; i++) {
projectedColumns[i] = i;
代码示例来源:origin: apache/hive
final boolean decimal64Support = HiveConf.getVar(job, ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED)
.equalsIgnoreCase("decimal_64");
int limit = determineQueueLimit(queueLimitBase, queueLimitMin, rbCtx.getRowColumnTypeInfos(), decimal64Support);
LOG.info("Queue limit for LlapRecordReader is " + limit);
this.queue = new LinkedBlockingQueue<>(limit);
代码示例来源:origin: org.apache.hive/hive-llap-server
tableIncludedCols = new ArrayList<>(rbCtx.getRowColumnTypeInfos().length);
for (int i = 0; i < rbCtx.getRowColumnTypeInfos().length; ++i) {
tableIncludedCols.add(i);
代码示例来源:origin: org.apache.hive/hive-llap-server
this.cacheIncludes = cacheIncludes;
this.sourceBatch = vrbCtx.createVectorizedRowBatch();
deserializeRead = new LazySimpleDeserializeRead(vrbCtx.getRowColumnTypeInfos(),
vrbCtx.getRowdataTypePhysicalVariations(),/* useExternalBuffer */ true, createSerdeParams(conf, tblProps));
vectorDeserializeRow = new VectorDeserializeRow<LazySimpleDeserializeRead>(deserializeRead);
int colCount = vrbCtx.getRowColumnTypeInfos().length;
boolean[] includes = null;
this.usesSourceIncludes = sourceIncludes.size() < colCount;
代码示例来源:origin: org.apache.hive/kafka-handler
} else {
projectedColumns = new int[rbCtx.getRowColumnTypeInfos().length];
for (int i = 0; i < projectedColumns.length; i++) {
projectedColumns[i] = i;
代码示例来源:origin: org.apache.hive/hive-llap-server
final boolean decimal64Support = HiveConf.getVar(job, ConfVars.HIVE_VECTORIZED_INPUT_FORMAT_SUPPORTS_ENABLED)
.equalsIgnoreCase("decimal_64");
int limit = determineQueueLimit(queueLimitBase, queueLimitMin, rbCtx.getRowColumnTypeInfos(), decimal64Support);
LOG.info("Queue limit for LlapRecordReader is " + limit);
this.queue = new LinkedBlockingQueue<>(limit);
内容来源于网络,如有侵权,请联系作者删除!