org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.<init>()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(5.4k)|赞(0)|评价(0)|浏览(112)

本文整理了Java中org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.<init>()方法的一些代码示例,展示了VectorizedRowBatchCtx.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。VectorizedRowBatchCtx.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx
类名称:VectorizedRowBatchCtx
方法名:<init>

VectorizedRowBatchCtx.<init>介绍

[英]Constructor for VectorizedRowBatchCtx
[中]VectorizedRowBatchCtx的构造函数

代码示例

代码示例来源:origin: apache/hive

@Override
public void initializeOp(Configuration hconf) throws HiveException {
 super.initializeOp(hconf);
 vrbCtx = new VectorizedRowBatchCtx();
 vrbCtx.init((StructObjectInspector) this.outputObjInspector,
   vOutContext.getScratchColumnTypeNames(), vOutContext.getScratchDataTypePhysicalVariations());
 outputBatch = vrbCtx.createVectorizedRowBatch();
 outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRow>();
}

代码示例来源:origin: apache/drill

@Override
public void initializeOp(Configuration hconf) throws HiveException {
 super.initializeOp(hconf);
 vrbCtx = new VectorizedRowBatchCtx();
 vrbCtx.init((StructObjectInspector) this.outputObjInspector, vOutContext.getScratchColumnTypeNames());
 outputBatch = vrbCtx.createVectorizedRowBatch();
 outputVectorAssignRowMap = new HashMap<ObjectInspector, VectorAssignRow>();
}

代码示例来源:origin: apache/hive

VectorizedRowBatchCtx vrbCtx = new VectorizedRowBatchCtx();
try {
 vrbCtx.init(oi, new String[0]);

代码示例来源:origin: apache/hive

virtualColumns = new VirtualColumn[0];
return new VectorizedRowBatchCtx(colNames.toArray(new String[colNames.size()]),
  colTypes.toArray(new TypeInfo[colTypes.size()]), null, null, partitionColumnCount,
  virtualColumns.length, virtualColumns, new String[0], null);

代码示例来源:origin: apache/drill

public void transferToBaseWork(BaseWork baseWork) {
  String[] allColumnNameArray = allColumnNames.toArray(new String[0]);
  TypeInfo[] allTypeInfoArray = allTypeInfos.toArray(new TypeInfo[0]);
  int[] dataColumnNumsArray;
  if (dataColumnNums != null) {
   dataColumnNumsArray = ArrayUtils.toPrimitive(dataColumnNums.toArray(new Integer[0]));
  } else {
   dataColumnNumsArray = null;
  }
  VectorizedRowBatchCtx vectorizedRowBatchCtx =
    new VectorizedRowBatchCtx(
     allColumnNameArray,
     allTypeInfoArray,
     dataColumnNumsArray,
     partitionColumnCount,
     scratchTypeNameArray);
  baseWork.setVectorizedRowBatchCtx(vectorizedRowBatchCtx);
  if (baseWork instanceof MapWork) {
   MapWork mapWork = (MapWork) baseWork;
   mapWork.setUseVectorizedInputFileFormat(useVectorizedInputFileFormat);
  }
  baseWork.setAllNative(allNative);
  baseWork.setGroupByVectorOutput(groupByVectorOutput);
  baseWork.setUsesVectorUDFAdaptor(usesVectorUDFAdaptor);
 }
}

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
 allColumnNameArray,
 allTypeInfoArray,

代码示例来源:origin: apache/hive

VectorExpression.doTransientInit(bigTableValueExpressions);
vrbCtx = new VectorizedRowBatchCtx();
vrbCtx.init((StructObjectInspector) this.outputObjInspector, vOutContext.getScratchColumnTypeNames());

代码示例来源:origin: apache/hive

private void validateKeyInterval(OrcSplit split, RecordIdentifier lowKey, RecordIdentifier highKey, boolean filterOn)
  throws Exception {
 VectorizedOrcAcidRowBatchReader vectorizedReader =
   new VectorizedOrcAcidRowBatchReader(split, conf, Reporter.NULL, new VectorizedRowBatchCtx());
 OrcRawRecordMerger.KeyInterval keyInterval =
   vectorizedReader.getKeyInterval();
 SearchArgument sarg = vectorizedReader.getDeleteEventSarg();
 if(filterOn) {
  assertEquals(new OrcRawRecordMerger.KeyInterval(lowKey, highKey), keyInterval);
 } else {
  assertEquals(new OrcRawRecordMerger.KeyInterval(null, null), keyInterval);
  assertNull(sarg);
 }
}

代码示例来源:origin: apache/hive

VectorizedOrcAcidRowBatchReader vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(0), conf, Reporter.NULL, new VectorizedRowBatchCtx());
if (deleteEventRegistry.equals(ColumnizedDeleteEventRegistry.class.getName())) {
 assertTrue(vectorizedReader.getDeleteEventRegistry() instanceof ColumnizedDeleteEventRegistry);

代码示例来源:origin: apache/hive

protected static void initialVectorizedRowBatchCtx(Configuration conf) throws HiveException {
 MapWork mapWork = new MapWork();
 VectorizedRowBatchCtx rbCtx = new VectorizedRowBatchCtx();
 rbCtx.init(createStructObjectInspector(conf), new String[0]);
 mapWork.setVectorMode(true);
 mapWork.setVectorizedRowBatchCtx(rbCtx);
 Utilities.setMapWork(conf, mapWork);
}

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
  columnNames,
  rowSource.typeInfos(),

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
  columnNames,
  typeInfos,

代码示例来源:origin: apache/hive

for (OrcSplit split : splits) {
 try {
  new VectorizedOrcAcidRowBatchReader(split, conf, Reporter.NULL, new VectorizedRowBatchCtx());
 } catch (FileFormatException e) {

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
  columnNames,
  typeInfos,

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
  columnNames,
  typeInfos,

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
  columnNames,
  rowSource.typeInfos(),

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
  columnNames,
  rowSource.typeInfos(),

代码示例来源:origin: apache/hive

new VectorizedRowBatchCtx(
  columnNames,
  rowSource.typeInfos(),

代码示例来源:origin: apache/hive

/* allowNulls */ true, /* isUnicodeOk */ true);
VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx();
batchContext.init(source.rowStructObjectInspector(), emptyScratchTypeNames);
VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();

代码示例来源:origin: apache/hive

/* allowNulls */ false, /* isUnicodeOk */ false);
VectorizedRowBatchCtx batchContext = new VectorizedRowBatchCtx();
batchContext.init(source.rowStructObjectInspector(), emptyScratchTypeNames);
VectorizedRowBatch batch = batchContext.createVectorizedRowBatch();

相关文章