org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.addPartitionColsToBatch()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(7.6k)|赞(0)|评价(0)|浏览(92)

本文整理了Java中org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx.addPartitionColsToBatch()方法的一些代码示例,展示了VectorizedRowBatchCtx.addPartitionColsToBatch()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。VectorizedRowBatchCtx.addPartitionColsToBatch()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatchCtx
类名称:VectorizedRowBatchCtx
方法名:addPartitionColsToBatch

VectorizedRowBatchCtx.addPartitionColsToBatch介绍

[英]Add the partition values to the batch
[中]将分区值添加到批处理中

代码示例

代码示例来源:origin: apache/hive

/**
 * Add the partition values to the batch
 *
 * @param batch
 * @param partitionValues
 * @throws HiveException
 */
public void addPartitionColsToBatch(VectorizedRowBatch batch, Object[] partitionValues)
{
 addPartitionColsToBatch(batch.cols, partitionValues);
}

代码示例来源:origin: apache/hive

protected void makeNullVrb(Object value, int size) {
  VectorizedRowBatch vrb = (VectorizedRowBatch)value;
  if (addPartitionCols) {
   if (partitionValues != null) {
    rbCtx.addPartitionColsToBatch(vrb, partitionValues);
   }
   addPartitionCols = false;
  }
  vrb.size = size;
  vrb.selectedInUse = false;
  for (int i = 0; i < rbCtx.getDataColumnCount(); i++) {
   ColumnVector cv = vrb.cols[i];
   if (cv == null) {
    continue;
   }
   cv.noNulls = false;
   cv.isRepeating = true;
   cv.isNull[0] = true;
  }
 }
}

代码示例来源:origin: apache/drill

protected void makeNullVrb(Object value, int size) {
  VectorizedRowBatch vrb = (VectorizedRowBatch)value;
  if (addPartitionCols) {
   if (partitionValues != null) {
    rbCtx.addPartitionColsToBatch(vrb, partitionValues);
   }
   addPartitionCols = false;
  }
  vrb.size = size;
  vrb.selectedInUse = false;
  for (int i = 0; i < rbCtx.getDataColumnCount(); i++) {
   ColumnVector cv = vrb.cols[i];
   if (cv == null) {
    continue;
   }
   cv.noNulls = false;
   cv.isRepeating = true;
   cv.isNull[0] = true;
  }
 }
}

代码示例来源:origin: apache/hive

@Override
public boolean next(NullWritable key, VectorizedRowBatch value) throws IOException {
 try {
  // Check and update partition cols if necessary. Ideally, this should be done
  // in CreateValue as the partition is constant per split. But since Hive uses
  // CombineHiveRecordReader and
  // as this does not call CreateValue for each new RecordReader it creates, this check is
  // required in next()
  if (addPartitionCols) {
   if (partitionValues != null) {
    rbCtx.addPartitionColsToBatch(value, partitionValues);
   }
   addPartitionCols = false;
  }
  if (!reader.nextBatch(value)) {
   return false;
  }
 } catch (Exception e) {
  throw new RuntimeException(e);
 }
 progress = reader.getProgress();
 return true;
}

代码示例来源:origin: apache/hive

/**
 * Advances to the next batch of rows. Returns false if there are no more.
 */
private boolean nextBatch(VectorizedRowBatch columnarBatch) throws IOException {
 columnarBatch.reset();
 if (rowsReturned >= totalRowCount) {
  return false;
 }
 // Add partition cols if necessary (see VectorizedOrcInputFormat for details).
 if (partitionValues != null) {
  rbCtx.addPartitionColsToBatch(columnarBatch, partitionValues);
 }
 checkEndOfRowGroup();
 int num = (int) Math.min(VectorizedRowBatch.DEFAULT_SIZE, totalCountLoadedSoFar - rowsReturned);
 if (colsToInclude.size() > 0) {
  for (int i = 0; i < columnReaders.length; ++i) {
   if (columnReaders[i] == null) {
    continue;
   }
   columnarBatch.cols[colsToInclude.get(i)].isRepeating = true;
   columnReaders[i].readBatch(num, columnarBatch.cols[colsToInclude.get(i)],
     columnTypesList.get(colsToInclude.get(i)));
  }
 }
 rowsReturned += num;
 columnarBatch.size = num;
 return true;
}

代码示例来源:origin: apache/drill

/**
 * Advances to the next batch of rows. Returns false if there are no more.
 */
private boolean nextBatch(VectorizedRowBatch columnarBatch) throws IOException {
 columnarBatch.reset();
 if (rowsReturned >= totalRowCount) {
  return false;
 }
 // Add partition cols if necessary (see VectorizedOrcInputFormat for details).
 if (partitionValues != null) {
  rbCtx.addPartitionColsToBatch(columnarBatch, partitionValues);
 }
 checkEndOfRowGroup();
 int num = (int) Math.min(VectorizedRowBatch.DEFAULT_SIZE, totalCountLoadedSoFar - rowsReturned);
 if (colsToInclude.size() > 0) {
  for (int i = 0; i < columnReaders.length; ++i) {
   if (columnReaders[i] == null) {
    continue;
   }
   columnarBatch.cols[colsToInclude.get(i)].isRepeating = true;
   columnReaders[i].readBatch(num, columnarBatch.cols[colsToInclude.get(i)],
     columnTypesList.get(colsToInclude.get(i)));
  }
 }
 rowsReturned += num;
 columnarBatch.size = num;
 return true;
}

代码示例来源:origin: apache/drill

@Override
public boolean next(NullWritable key, VectorizedRowBatch value) throws IOException {
 try {
  // Check and update partition cols if necessary. Ideally, this should be done
  // in CreateValue as the partition is constant per split. But since Hive uses
  // CombineHiveRecordReader and
  // as this does not call CreateValue for each new RecordReader it creates, this check is
  // required in next()
  if (addPartitionCols) {
   if (partitionValues != null) {
    rbCtx.addPartitionColsToBatch(value, partitionValues);
   }
   addPartitionCols = false;
  }
  if (!reader.nextBatch(value)) {
   return false;
  }
 } catch (Exception e) {
  throw new RuntimeException(e);
 }
 progress = reader.getProgress();
 return true;
}

代码示例来源:origin: apache/drill

rbCtx.addPartitionColsToBatch(value, partitionValues);

代码示例来源:origin: apache/hive

rbCtx.addPartitionColsToBatch(value, partitionValues);

代码示例来源:origin: apache/drill

@Override
public boolean next(NullWritable nullWritable,
          VectorizedRowBatch vectorizedRowBatch
          ) throws IOException {
 vectorizedRowBatch.reset();
 buffer.reset();
 if (!innerReader.next(key, value)) {
  return false;
 }
 if (partitionValues != null) {
  rbCtx.addPartitionColsToBatch(vectorizedRowBatch, partitionValues);
 }
 try {
  VectorizedBatchUtil.acidAddRowToBatch(value,
    (StructObjectInspector) objectInspector,
    vectorizedRowBatch.size++, vectorizedRowBatch, rbCtx, buffer);
  while (vectorizedRowBatch.size < vectorizedRowBatch.selected.length &&
    innerReader.next(key, value)) {
   VectorizedBatchUtil.acidAddRowToBatch(value,
     (StructObjectInspector) objectInspector,
     vectorizedRowBatch.size++, vectorizedRowBatch, rbCtx, buffer);
  }
 } catch (Exception e) {
  throw new IOException("error iterating", e);
 }
 return true;
}

代码示例来源:origin: apache/hive

if (isFirst) {
 if (partitionValues != null) {
  rbCtx.addPartitionColsToBatch(vrb, partitionValues);

代码示例来源:origin: apache/hive

batchContext.addPartitionColsToBatch(deserializerBatch, partitionValues);

代码示例来源:origin: apache/drill

batchContext.addPartitionColsToBatch(deserializerBatch, partitionValues);

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public boolean next(NullWritable key, VectorizedRowBatch value) throws IOException {
 if (!reader.hasNext()) {
  return false;
 }
 try {
  // Check and update partition cols if necessary. Ideally, this should be done
  // in CreateValue as the partition is constant per split. But since Hive uses
  // CombineHiveRecordReader and
  // as this does not call CreateValue for each new RecordReader it creates, this check is
  // required in next()
  if (addPartitionCols) {
   rbCtx.addPartitionColsToBatch(value);
   addPartitionCols = false;
  }
  reader.nextBatch(value);
 } catch (Exception e) {
  throw new RuntimeException(e);
 }
 progress = reader.getProgress();
 return true;
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public boolean next(NullWritable nullWritable,
          VectorizedRowBatch vectorizedRowBatch
          ) throws IOException {
 vectorizedRowBatch.reset();
 buffer.reset();
 if (!innerReader.next(key, value)) {
  return false;
 }
 try {
  rowBatchCtx.addPartitionColsToBatch(vectorizedRowBatch);
 } catch (HiveException e) {
  throw new IOException("Problem adding partition column", e);
 }
 try {
  VectorizedBatchUtil.acidAddRowToBatch(value,
    (StructObjectInspector) objectInspector,
    vectorizedRowBatch.size++, vectorizedRowBatch, rowBatchCtx, buffer);
  while (vectorizedRowBatch.size < vectorizedRowBatch.selected.length &&
    innerReader.next(key, value)) {
   VectorizedBatchUtil.acidAddRowToBatch(value,
     (StructObjectInspector) objectInspector,
     vectorizedRowBatch.size++, vectorizedRowBatch, rowBatchCtx, buffer);
  }
 } catch (HiveException he) {
  throw new IOException("error iterating", he);
 }
 return true;
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

rbCtx.addPartitionColsToBatch(value);
addPartitionCols = false;

代码示例来源:origin: org.apache.hive/hive-llap-server

if (isFirst) {
 if (partitionValues != null) {
  rbCtx.addPartitionColsToBatch(vrb, partitionValues);

相关文章