org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.getPartitionValues()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(6.2k)|赞(0)|评价(0)|浏览(74)

本文整理了Java中org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.getPartitionValues()方法的一些代码示例,展示了VectorizedRowBatchCtx.getPartitionValues()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。VectorizedRowBatchCtx.getPartitionValues()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx
类名称:VectorizedRowBatchCtx
方法名:getPartitionValues

VectorizedRowBatchCtx.getPartitionValues介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public static void getPartitionValues(VectorizedRowBatchCtx vrbCtx, Configuration hiveConf,
  FileSplit split, Object[] partitionValues) throws IOException {
 // TODO: this is invalid for SMB. Keep this for now for legacy reasons. See the other overload.
 MapWork mapWork = Utilities.getMapWork(hiveConf);
 getPartitionValues(vrbCtx, mapWork, split, partitionValues);
}

代码示例来源:origin: apache/hive

private void initPartitionValues(FileSplit fileSplit, JobConf conf) throws IOException {
  int partitionColumnCount = rbCtx.getPartitionColumnCount();
  if (partitionColumnCount > 0) {
   partitionValues = new Object[partitionColumnCount];
   VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, fileSplit, partitionValues);
  } else {
   partitionValues = null;
  }
}

代码示例来源:origin: apache/drill

public static void getPartitionValues(VectorizedRowBatchCtx vrbCtx, Configuration hiveConf,
  FileSplit split, Object[] partitionValues) throws IOException {
 // TODO: this is invalid for SMB. Keep this for now for legacy reasons. See the other overload.
 MapWork mapWork = Utilities.getMapWork(hiveConf);
 getPartitionValues(vrbCtx, mapWork, split, partitionValues);
}

代码示例来源:origin: apache/drill

private void initPartitionValues(FileSplit fileSplit, JobConf conf) throws IOException {
 int partitionColumnCount = rbCtx.getPartitionColumnCount();
 if (partitionColumnCount > 0) {
  partitionValues = new Object[partitionColumnCount];
  rbCtx.getPartitionValues(rbCtx, conf, fileSplit, partitionValues);
 } else {
  partitionValues = null;
 }
 }

代码示例来源:origin: apache/hive

public NullRowsRecordReader(Configuration conf, InputSplit split) throws IOException {
 boolean isVectorMode = Utilities.getIsVectorized(conf);
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + " in "
    + (isVectorMode ? "" : "non-") + "vector mode");
 }
 if (isVectorMode) {
  rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
  int partitionColumnCount = rbCtx.getPartitionColumnCount();
  if (partitionColumnCount > 0) {
   partitionValues = new Object[partitionColumnCount];
   VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, (FileSplit)split, partitionValues);
  } else {
   partitionValues = null;
  }
 } else {
  rbCtx = null;
  partitionValues = null;
 }
}

代码示例来源:origin: apache/drill

public NullRowsRecordReader(Configuration conf, InputSplit split) throws IOException {
 boolean isVectorMode = Utilities.getUseVectorizedInputFileFormat(conf);
 if (LOG.isDebugEnabled()) {
  LOG.debug(getClass().getSimpleName() + " in "
    + (isVectorMode ? "" : "non-") + "vector mode");
 }
 if (isVectorMode) {
  rbCtx = Utilities.getVectorizedRowBatchCtx(conf);
  int partitionColumnCount = rbCtx.getPartitionColumnCount();
  if (partitionColumnCount > 0) {
   partitionValues = new Object[partitionColumnCount];
   VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, (FileSplit)split, partitionValues);
  } else {
   partitionValues = null;
  }
 } else {
  rbCtx = null;
  partitionValues = null;
 }
}

代码示例来源:origin: apache/hive

public static void getPartitionValues(VectorizedRowBatchCtx vrbCtx,
  MapWork mapWork, FileSplit split, Object[] partitionValues)
  throws IOException {
 Map<Path, PartitionDesc> pathToPartitionInfo = mapWork.getPathToPartitionInfo();
 PartitionDesc partDesc = HiveFileFormatUtils
   .getFromPathRecursively(pathToPartitionInfo,
     split.getPath(), IOPrepareCache.get().getPartitionDescMap());
 getPartitionValues(vrbCtx, partDesc, partitionValues);
}

代码示例来源:origin: apache/drill

VectorizedOrcAcidRowReader(AcidInputFormat.RowReader<OrcStruct> inner,
              Configuration conf,
              VectorizedRowBatchCtx vectorizedRowBatchCtx,
              FileSplit split) throws IOException {
 this.innerReader = inner;
 this.key = inner.createKey();
 rbCtx = vectorizedRowBatchCtx;
 int partitionColumnCount = rbCtx.getPartitionColumnCount();
 if (partitionColumnCount > 0) {
  partitionValues = new Object[partitionColumnCount];
  rbCtx.getPartitionValues(rbCtx, conf, split, partitionValues);
 }
 this.value = inner.createValue();
 this.objectInspector = inner.getObjectInspector();
}

代码示例来源:origin: apache/drill

public static void getPartitionValues(VectorizedRowBatchCtx vrbCtx,
  MapWork mapWork, FileSplit split, Object[] partitionValues)
  throws IOException {
 Map<Path, PartitionDesc> pathToPartitionInfo = mapWork.getPathToPartitionInfo();
 PartitionDesc partDesc = HiveFileFormatUtils
   .getPartitionDescFromPathRecursively(pathToPartitionInfo,
     split.getPath(), IOPrepareCache.get().getPartitionDescMap());
 getPartitionValues(vrbCtx, partDesc, partitionValues);
}

代码示例来源:origin: apache/hive

if (partitionColumnCount > 0) {
 partitionValues = new Object[partitionColumnCount];
 VectorizedRowBatchCtx.getPartitionValues(rbCtx, mapWork, split, partitionValues);
} else {
 partitionValues = null;

代码示例来源:origin: apache/hive

VectorizedRowBatchCtx.getPartitionValues(batchContext, partDesc, partitionValues);
batchContext.addPartitionColsToBatch(deserializerBatch, partitionValues);

代码示例来源:origin: apache/hive

if (partitionColumnCount > 0) {
 partitionValues = new Object[partitionColumnCount];
 rbCtx.getPartitionValues(rbCtx, conf, fileSplit, partitionValues);
} else {
 partitionValues = null;

代码示例来源:origin: apache/drill

if (partitionColumnCount > 0) {
 partitionValues = new Object[partitionColumnCount];
 rbCtx.getPartitionValues(rbCtx, conf, fileSplit, partitionValues);
} else {
 partitionValues = null;

代码示例来源:origin: apache/drill

if (partitionColumnCount > 0) {
 partitionValues = new Object[partitionColumnCount];
 VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, orcSplit, partitionValues);
} else {
 partitionValues = null;

代码示例来源:origin: apache/hive

if (partitionColumnCount > 0) {
 partitionValues = new Object[partitionColumnCount];
 VectorizedRowBatchCtx.getPartitionValues(rbCtx, conf, orcSplit, partitionValues);
} else {
 partitionValues = null;

代码示例来源:origin: apache/drill

VectorizedRowBatchCtx.getPartitionValues(batchContext, partDesc, partitionValues);
batchContext.addPartitionColsToBatch(deserializerBatch, partitionValues);

代码示例来源:origin: org.apache.hive/hive-llap-server

if (partitionColumnCount > 0) {
 partitionValues = new Object[partitionColumnCount];
 VectorizedRowBatchCtx.getPartitionValues(rbCtx, mapWork, split, partitionValues);
} else {
 partitionValues = null;

相关文章