org.apache.hadoop.hive.ql.exec.Utilities.makeList()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(15.9k)|赞(0)|评价(0)|浏览(65)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.makeList()方法的一些代码示例,展示了Utilities.makeList()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.makeList()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:makeList

Utilities.makeList介绍

暂无

代码示例

代码示例来源:origin: apache/hive

/**
  * In bucket mapjoin, there are ReduceSinks that mark a small table parent (Reduce Sink are removed from big-table).
  * In SMB join these are not expected for any parents, either from small or big tables.
  * @param mapJoinOp
  */
 @SuppressWarnings("unchecked")
 private void removeSmallTableReduceSink(MapJoinOperator mapJoinOp) {
  SMBJoinDesc smbJoinDesc = new SMBJoinDesc(mapJoinOp.getConf());
  List<Operator<? extends OperatorDesc>> parentOperators = mapJoinOp.getParentOperators();
  for (int i = 0; i < parentOperators.size(); i++) {
   Operator<? extends OperatorDesc> par = parentOperators.get(i);
   if (i != smbJoinDesc.getPosBigTable()) {
    if (par instanceof ReduceSinkOperator) {
     List<Operator<? extends OperatorDesc>> grandParents = par.getParentOperators();
     Preconditions.checkArgument(grandParents.size() == 1,
      "AssertionError: expect # of parents to be 1, but was " + grandParents.size());
     Operator<? extends OperatorDesc> grandParent = grandParents.get(0);
     grandParent.removeChild(par);
     grandParent.setChildOperators(Utilities.makeList(mapJoinOp));
     mapJoinOp.getParentOperators().set(i, grandParent);
    }
   }
  }
 }
}

代码示例来源:origin: apache/drill

/**
  * In bucket mapjoin, there are ReduceSinks that mark a small table parent (Reduce Sink are removed from big-table).
  * In SMB join these are not expected for any parents, either from small or big tables.
  * @param mapJoinOp
  */
 @SuppressWarnings("unchecked")
 private void removeSmallTableReduceSink(MapJoinOperator mapJoinOp) {
  SMBJoinDesc smbJoinDesc = new SMBJoinDesc(mapJoinOp.getConf());
  List<Operator<? extends OperatorDesc>> parentOperators = mapJoinOp.getParentOperators();
  for (int i = 0; i < parentOperators.size(); i++) {
   Operator<? extends OperatorDesc> par = parentOperators.get(i);
   if (i != smbJoinDesc.getPosBigTable()) {
    if (par instanceof ReduceSinkOperator) {
     List<Operator<? extends OperatorDesc>> grandParents = par.getParentOperators();
     Preconditions.checkArgument(grandParents.size() == 1,
      "AssertionError: expect # of parents to be 1, but was " + grandParents.size());
     Operator<? extends OperatorDesc> grandParent = grandParents.get(0);
     grandParent.removeChild(par);
     grandParent.setChildOperators(Utilities.makeList(mapJoinOp));
     mapJoinOp.getParentOperators().set(i, grandParent);
    }
   }
  }
 }
}

代码示例来源:origin: apache/hive

/**
 * @param newOperator the operator will be inserted between child and parent
 * @param child
 * @param parent
 * @param context
 * @throws HiveException
 */
protected static void insertOperatorBetween(
  Operator<?> newOperator, Operator<?> parent, Operator<?> child)
    throws SemanticException {
 isNullOperator(newOperator);
 isNullOperator(parent);
 isNullOperator(child);
 if (parent != getSingleParent(child)) {
  throw new SemanticException("Operator " + parent.getName() + " (ID: " +
    parent.getIdentifier() + ") is not the only parent of Operator " +
    child.getName() + " (ID: " + child.getIdentifier() + ")");
 }
 if (child != getSingleChild(parent)) {
  throw new SemanticException("Operator " + child.getName() + " (ID: " +
    child.getIdentifier() + ") is not the only child of Operator " +
    parent.getName() + " (ID: " + parent.getIdentifier() + ")");
 }
 newOperator.setParentOperators(Utilities.makeList(parent));
 newOperator.setChildOperators(Utilities.makeList(child));
 child.setParentOperators(Utilities.makeList(newOperator));
 parent.setChildOperators(Utilities.makeList(newOperator));
}

代码示例来源:origin: apache/hive

for (Operator<?> sel : firstNodesOfPruningBranch) {
 SparkUtilities.collectOp(sinkSet, sel, SparkPartitionPruningSinkOperator.class);
 sel.setParentOperators(Utilities.makeList(newBranchingOp));

代码示例来源:origin: apache/drill

filterOp.setChildOperators(Utilities.makeList(selOp));

代码示例来源:origin: apache/hive

child.setParentOperators(parentsWithMultipleDemux);
} else {
 child.setParentOperators(Utilities.makeList(demuxOp));
parent.setChildOperators(Utilities.makeList(demuxOp));
  Operator<? extends OperatorDesc> mux = OperatorFactory.get(
    childOP.getCompilationOpContext(), new MuxDesc(parentsOfMux));
  mux.setChildOperators(Utilities.makeList(childOP));
  mux.setParentOperators(parentsOfMux);
  childOP.setParentOperators(Utilities.makeList(mux));
  parentOp.setChildOperators(Utilities.makeList(mux));
 } else {
  List<Operator<? extends OperatorDesc>> parentsOfMux =
  Operator<? extends OperatorDesc> mux = OperatorFactory.get(
    rsop.getCompilationOpContext(), muxDesc);
  mux.setChildOperators(Utilities.makeList(childOP));
  mux.setParentOperators(parentsOfMux);
    op.setChildOperators(Utilities.makeList(mux));
  childOP.setParentOperators(Utilities.makeList(mux));

代码示例来源:origin: apache/drill

/**
 * @param newOperator the operator will be inserted between child and parent
 * @param child
 * @param parent
 * @param context
 * @throws HiveException
 */
protected static void insertOperatorBetween(
  Operator<?> newOperator, Operator<?> parent, Operator<?> child)
    throws SemanticException {
 isNullOperator(newOperator);
 isNullOperator(parent);
 isNullOperator(child);
 if (parent != getSingleParent(child)) {
  throw new SemanticException("Operator " + parent.getName() + " (ID: " +
    parent.getIdentifier() + ") is not the only parent of Operator " +
    child.getName() + " (ID: " + child.getIdentifier() + ")");
 }
 if (child != getSingleChild(parent)) {
  throw new SemanticException("Operator " + child.getName() + " (ID: " +
    child.getIdentifier() + ") is not the only child of Operator " +
    parent.getName() + " (ID: " + parent.getIdentifier() + ")");
 }
 newOperator.setParentOperators(Utilities.makeList(parent));
 newOperator.setChildOperators(Utilities.makeList(child));
 child.setParentOperators(Utilities.makeList(newOperator));
 parent.setChildOperators(Utilities.makeList(newOperator));
}

代码示例来源:origin: apache/drill

child.setParentOperators(parentsWithMultipleDemux);
} else {
 child.setParentOperators(Utilities.makeList(demuxOp));
parent.setChildOperators(Utilities.makeList(demuxOp));
  Operator<? extends OperatorDesc> mux = OperatorFactory.get(
    childOP.getCompilationOpContext(), new MuxDesc(parentsOfMux));
  mux.setChildOperators(Utilities.makeList(childOP));
  mux.setParentOperators(parentsOfMux);
  childOP.setParentOperators(Utilities.makeList(mux));
  parentOp.setChildOperators(Utilities.makeList(mux));
 } else {
  List<Operator<? extends OperatorDesc>> parentsOfMux =
  Operator<? extends OperatorDesc> mux = OperatorFactory.get(
    rsop.getCompilationOpContext(), muxDesc);
  mux.setChildOperators(Utilities.makeList(childOP));
  mux.setParentOperators(parentsOfMux);
    op.setChildOperators(Utilities.makeList(mux));
  childOP.setParentOperators(Utilities.makeList(mux));

代码示例来源:origin: apache/hive

@SuppressWarnings("unchecked")
private void populateMapRedPlan1(Table src) throws SemanticException {
 ArrayList<String> outputColumns = new ArrayList<String>();
 for (int i = 0; i < 2; i++) {
  outputColumns.add("_col" + i);
 }
 // map-side work
 Operator<ReduceSinkDesc> op1 = OperatorFactory.get(ctx, PlanUtils
   .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
   Utilities.makeList(getStringColumn("value")), outputColumns, true,
   -1, 1, -1, AcidUtils.Operation.NOT_ACID));
 addMapWork(mr, src, "a", op1);
 ReduceWork rWork = new ReduceWork();
 rWork.setNumReduceTasks(Integer.valueOf(1));
 rWork.setKeyDesc(op1.getConf().getKeySerializeInfo());
 rWork.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 mr.setReduceWork(rWork);
 // reduce side work
 Operator<FileSinkDesc> op3 = OperatorFactory.get(ctx, new FileSinkDesc(new Path(tmpdir + File.separator
   + "mapredplan1.out"), Utilities.defaultTd, false));
 List<ExprNodeDesc> cols = new ArrayList<ExprNodeDesc>();
 cols.add(getStringColumn(Utilities.ReduceField.VALUE.toString()+"."+outputColumns.get(1)));
 List<String> colNames = new ArrayList<String>();
 colNames.add(HiveConf.getColumnInternalName(2));
 Operator<SelectDesc> op2 = OperatorFactory.get(new SelectDesc(cols, colNames), op3);
 rWork.setReducer(op2);
}

代码示例来源:origin: apache/hive

@SuppressWarnings("unchecked")
private void populateMapRedPlan5(Table src) throws SemanticException {
 // map-side work
 ArrayList<String> outputColumns = new ArrayList<String>();
 for (int i = 0; i < 2; i++) {
  outputColumns.add("_col" + i);
 }
 Operator<ReduceSinkDesc> op0 = OperatorFactory.get(ctx, PlanUtils
   .getReduceSinkDesc(Utilities.makeList(getStringColumn("0")), Utilities
   .makeList(getStringColumn("0"), getStringColumn("1")),
   outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
 Operator<SelectDesc> op4 = OperatorFactory.get(new SelectDesc(Utilities
   .makeList(getStringColumn("key"), getStringColumn("value")),
   outputColumns), op0);
 addMapWork(mr, src, "a", op4);
 ReduceWork rWork = new ReduceWork();
 mr.setReduceWork(rWork);
 rWork.setNumReduceTasks(Integer.valueOf(1));
 rWork.setKeyDesc(op0.getConf().getKeySerializeInfo());
 rWork.getTagToValueDesc().add(op0.getConf().getValueSerializeInfo());
 // reduce side work
 Operator<FileSinkDesc> op3 = OperatorFactory.get(ctx, new FileSinkDesc(new Path(tmpdir + File.separator
   + "mapredplan5.out"), Utilities.defaultTd, false));
 List<ExprNodeDesc> cols = new ArrayList<ExprNodeDesc>();
 cols.add(getStringColumn(Utilities.ReduceField.KEY + ".reducesinkkey" + 0));
 cols.add(getStringColumn(Utilities.ReduceField.VALUE.toString()+"."+outputColumns.get(1)));
 Operator<SelectDesc> op2 = OperatorFactory.get(new SelectDesc(cols, outputColumns), op3);
 rWork.setReducer(op2);
}

代码示例来源:origin: apache/hive

.getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
Utilities.makeList(getStringColumn("value")), outputColumns, true,
Byte.valueOf((byte) 0), 1, -1, AcidUtils.Operation.NOT_ACID));
.getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
Utilities.makeList(getStringColumn("key")), outputColumns, true,
Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1, AcidUtils.Operation.NOT_ACID));
.makeList(new ExprNodeFieldDesc(TypeInfoFactory.stringTypeInfo,
new ExprNodeColumnDesc(TypeInfoFactory.getListTypeInfo(TypeInfoFactory.stringTypeInfo),
Utilities.ReduceField.VALUE.toString(), "", false), "0", false)),
Utilities.makeList(outputColumns.get(0))), op4);

代码示例来源:origin: apache/hive

@SuppressWarnings("unchecked")
private void populateMapRedPlan2(Table src) throws Exception {
 ArrayList<String> outputColumns = new ArrayList<String>();
 for (int i = 0; i < 2; i++) {
  outputColumns.add("_col" + i);
 }
 // map-side work
 Operator<ReduceSinkDesc> op1 = OperatorFactory.get(ctx, PlanUtils
   .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")),
   Utilities
   .makeList(getStringColumn("key"), getStringColumn("value")),
   outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
 addMapWork(mr, src, "a", op1);
 ReduceWork rWork = new ReduceWork();
 rWork.setNumReduceTasks(Integer.valueOf(1));
 rWork.setKeyDesc(op1.getConf().getKeySerializeInfo());
 rWork.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo());
 mr.setReduceWork(rWork);
 // reduce side work
 Operator<FileSinkDesc> op4 = OperatorFactory.get(ctx, new FileSinkDesc(new Path(tmpdir + File.separator
   + "mapredplan2.out"), Utilities.defaultTd, false));
 Operator<FilterDesc> op3 = OperatorFactory.get(getTestFilterDesc("0"), op4);
 List<ExprNodeDesc> cols = new ArrayList<ExprNodeDesc>();
 cols.add(getStringColumn(Utilities.ReduceField.KEY + ".reducesinkkey" + 0));
 cols.add(getStringColumn(Utilities.ReduceField.VALUE.toString()+"."+outputColumns.get(1)));
 Operator<SelectDesc> op2 = OperatorFactory.get(new SelectDesc(cols, outputColumns), op3);
 rWork.setReducer(op2);
}

代码示例来源:origin: apache/hive

fileSinkOp.setParentOperators(Utilities.makeList(parent));
tableScanOp.setChildOperators(Utilities.makeList(child));
child.replaceParent(parent, tableScanOp);

代码示例来源:origin: apache/hive

.getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")),
Utilities.makeList(getStringColumn("tkey"),
getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
.makeList(getStringColumn("key"), getStringColumn("value")),
outputColumns), op0);

代码示例来源:origin: apache/hive

.getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")),
Utilities.makeList(getStringColumn("tkey"),
getStringColumn("tvalue")), outputColumns, false, -1, 1, -1, AcidUtils.Operation.NOT_ACID));
.makeList(getStringColumn("key"), getStringColumn("value")),
outputColumns), op0);

代码示例来源:origin: apache/drill

fileSinkOp.setParentOperators(Utilities.makeList(parent));
tableScanOp.setChildOperators(Utilities.makeList(child));
child.replaceParent(parent, tableScanOp);

代码示例来源:origin: com.facebook.presto.hive/hive-apache

/**
  * In bucket mapjoin, there are ReduceSinks that mark a small table parent (Reduce Sink are removed from big-table).
  * In SMB join these are not expected for any parents, either from small or big tables.
  * @param mapJoinOp
  */
 @SuppressWarnings("unchecked")
 private void removeSmallTableReduceSink(MapJoinOperator mapJoinOp) {
  SMBJoinDesc smbJoinDesc = new SMBJoinDesc(mapJoinOp.getConf());
  List<Operator<? extends OperatorDesc>> parentOperators = mapJoinOp.getParentOperators();
  for (int i = 0; i < parentOperators.size(); i++) {
   Operator<? extends OperatorDesc> par = parentOperators.get(i);
   if (i != smbJoinDesc.getPosBigTable()) {
    if (par instanceof ReduceSinkOperator) {
     List<Operator<? extends OperatorDesc>> grandParents = par.getParentOperators();
     Preconditions.checkArgument(grandParents.size() == 1,
      "AssertionError: expect # of parents to be 1, but was " + grandParents.size());
     Operator<? extends OperatorDesc> grandParent = grandParents.get(0);
     grandParent.removeChild(par);
     grandParent.setChildOperators(Utilities.makeList(mapJoinOp));
     mapJoinOp.getParentOperators().set(i, grandParent);
    }
   }
  }
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

/**
 * @param newOperator the operator will be inserted between child and parent
 * @param child
 * @param parent
 * @param context
 * @throws HiveException
 */
protected static void insertOperatorBetween(
  Operator<?> newOperator, Operator<?> parent, Operator<?> child)
    throws SemanticException {
 isNullOperator(newOperator);
 isNullOperator(parent);
 isNullOperator(child);
 if (parent != getSingleParent(child)) {
  throw new SemanticException("Operator " + parent.getName() + " (ID: " +
    parent.getIdentifier() + ") is not the only parent of Operator " +
    child.getName() + " (ID: " + child.getIdentifier() + ")");
 }
 if (child != getSingleChild(parent)) {
  throw new SemanticException("Operator " + child.getName() + " (ID: " +
    child.getIdentifier() + ") is not the only child of Operator " +
    parent.getName() + " (ID: " + parent.getIdentifier() + ")");
 }
 newOperator.setParentOperators(Utilities.makeList(parent));
 newOperator.setChildOperators(Utilities.makeList(child));
 child.setParentOperators(Utilities.makeList(newOperator));
 parent.setChildOperators(Utilities.makeList(newOperator));
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

child.setParentOperators(parentsWithMultipleDemux);
} else {
 child.setParentOperators(Utilities.makeList(demuxOp));
parent.setChildOperators(Utilities.makeList(demuxOp));
  Operator<? extends OperatorDesc> mux = OperatorFactory.get(
    new MuxDesc(parentsOfMux));
  mux.setChildOperators(Utilities.makeList(childOP));
  mux.setParentOperators(parentsOfMux);
  childOP.setParentOperators(Utilities.makeList(mux));
  parentOp.setChildOperators(Utilities.makeList(mux));
 } else {
  List<Operator<? extends OperatorDesc>> parentsOfMux =
  mux.setChildOperators(Utilities.makeList(childOP));
  mux.setParentOperators(parentsOfMux);
    op.setChildOperators(Utilities.makeList(mux));
  childOP.setParentOperators(Utilities.makeList(mux));

代码示例来源:origin: com.facebook.presto.hive/hive-apache

fileSinkOp.setParentOperators(Utilities.makeList(parent));
tableScanOp.setChildOperators(Utilities.makeList(child));
child.replaceParent(parent, tableScanOp);

相关文章

微信公众号

最新文章

更多

Utilities类方法