本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getMRTasks()
方法的一些代码示例,展示了Utilities.getMRTasks()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getMRTasks()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getMRTasks
暂无
代码示例来源:origin: apache/drill
public static List<ExecDriver> getMRTasks(List<Task<? extends Serializable>> tasks) {
List<ExecDriver> mrTasks = new ArrayList<ExecDriver>();
if (tasks != null) {
getMRTasks(tasks, mrTasks);
}
return mrTasks;
}
代码示例来源:origin: apache/hive
@Override
public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException {
inferBucketingSorting(Utilities.getMRTasks(pctx.rootTasks));
return pctx;
}
代码示例来源:origin: apache/drill
@Override
public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException {
inferBucketingSorting(Utilities.getMRTasks(pctx.rootTasks));
return pctx;
}
代码示例来源:origin: apache/hive
public static int getNumClusterJobs(List<Task<? extends Serializable>> tasks) {
return getMRTasks(tasks).size() + getTezTasks(tasks).size() + getSparkTasks(tasks).size();
}
代码示例来源:origin: apache/drill
private static void getMRTasks(List<Task<? extends Serializable>> tasks, List<ExecDriver> mrTasks) {
for (Task<? extends Serializable> task : tasks) {
if (task instanceof ExecDriver && !mrTasks.contains(task)) {
mrTasks.add((ExecDriver) task);
}
if (task.getDependentTasks() != null) {
getMRTasks(task.getDependentTasks(), mrTasks);
}
}
}
代码示例来源:origin: twitter/ambrose
public HiveDAGTransformer(HookContext hookContext) {
conf = hookContext.getConf();
tmpDir = AmbroseHiveUtil.getJobTmpDir(conf, false);
localTmpDir = AmbroseHiveUtil.getJobTmpDir(conf, true);
queryPlan = hookContext.getQueryPlan();
allTasks = Utilities.getMRTasks(queryPlan.getRootTasks());
if (!allTasks.isEmpty()) {
createNodeIdToDAGNode();
}
}
代码示例来源:origin: apache/hive
protected ExecutionMode getExecutionMode(QueryPlan plan) {
int numMRJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int numSparkJobs = Utilities.getSparkTasks(plan.getRootTasks()).size();
int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
ExecutionMode mode = ExecutionMode.MR;
if (0 == (numMRJobs + numSparkJobs + numTezJobs)) {
mode = ExecutionMode.NONE;
} else if (numSparkJobs > 0) {
return ExecutionMode.SPARK;
} else if (numTezJobs > 0) {
mode = ExecutionMode.TEZ;
// Need to go in and check if any of the tasks is running in LLAP mode.
for (TezTask tezTask : Utilities.getTezTasks(plan.getRootTasks())) {
if (tezTask.getWork().getLlapMode()) {
mode = ExecutionMode.LLAP;
break;
}
}
}
return mode;
}
代码示例来源:origin: apache/drill
protected ExecutionMode getExecutionMode(QueryPlan plan) {
int numMRJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int numSparkJobs = Utilities.getSparkTasks(plan.getRootTasks()).size();
int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
ExecutionMode mode = ExecutionMode.MR;
if (0 == (numMRJobs + numSparkJobs + numTezJobs)) {
mode = ExecutionMode.NONE;
} else if (numSparkJobs > 0) {
return ExecutionMode.SPARK;
} else if (numTezJobs > 0) {
mode = ExecutionMode.TEZ;
// Need to go in and check if any of the tasks is running in LLAP mode.
for (TezTask tezTask : Utilities.getTezTasks(plan.getRootTasks())) {
if (tezTask.getWork().getLlapMode()) {
mode = ExecutionMode.LLAP;
break;
}
}
}
return mode;
}
代码示例来源:origin: apache/hive
List<ExecDriver> mrTasks = Utilities.getMRTasks(plan.getRootTasks());
List<TezTask> tezTasks = Utilities.getTezTasks(plan.getRootTasks());
ExecutionMode executionMode = getExecutionMode(plan, mrTasks, tezTasks);
代码示例来源:origin: apache/hive
List<Task<? extends Serializable>> tasks, Configuration conf,
Interner<TableDesc> interner) {
List<ExecDriver> mrTasks = Utilities.getMRTasks(tasks);
if (!mrTasks.isEmpty()) {
for (ExecDriver execDriver : mrTasks) {
代码示例来源:origin: apache/hive
@Test
@SuppressWarnings("unchecked")
public void testGetTasksRecursion() {
Task<MapredWork> rootTask = getMapredWork();
Task<MapredWork> child1 = getMapredWork();
Task<MapredWork> child2 = getMapredWork();
Task<MapredWork> child11 = getMapredWork();
rootTask.addDependentTask(child1);
rootTask.addDependentTask(child2);
child1.addDependentTask(child11);
assertEquals(Lists.newArrayList(rootTask, child1, child2, child11),
Utilities.getMRTasks(getTestDiamondTaskGraph(rootTask)));
}
}
代码示例来源:origin: apache/hive
requestuser = hookContext.getUgi().getUserName() ;
int numMrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
if (numMrJobs + numTezJobs <= 0) {
代码示例来源:origin: apache/drill
requestuser = hookContext.getUgi().getUserName() ;
int numMrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
if (numMrJobs + numTezJobs <= 0) {
代码示例来源:origin: apache/drill
int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int jobs = mrJobs + Utilities.getTezTasks(plan.getRootTasks()).size()
+ Utilities.getSparkTasks(plan.getRootTasks()).size();
代码示例来源:origin: apache/hive
int mrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
int jobs = mrJobs + Utilities.getTezTasks(plan.getRootTasks()).size()
+ Utilities.getSparkTasks(plan.getRootTasks()).size();
代码示例来源:origin: apache/hive
/**
* This test tests that Utilities.get*Tasks do not repeat themselves in the process
* of extracting tasks from a given set of root tasks when given DAGs that can have
* multiple paths, such as the case with Diamond-shaped DAGs common to replication.
*/
@Test
public void testGetTasksHaveNoRepeats() {
CountingWrappingTask mrTask = new CountingWrappingTask(new ExecDriver());
CountingWrappingTask tezTask = new CountingWrappingTask(new TezTask());
CountingWrappingTask sparkTask = new CountingWrappingTask(new SparkTask());
// First check - we should not have repeats in results
assertEquals("No repeated MRTasks from Utilities.getMRTasks", 1,
Utilities.getMRTasks(getTestDiamondTaskGraph(mrTask)).size());
assertEquals("No repeated TezTasks from Utilities.getTezTasks", 1,
Utilities.getTezTasks(getTestDiamondTaskGraph(tezTask)).size());
assertEquals("No repeated TezTasks from Utilities.getSparkTasks", 1,
Utilities.getSparkTasks(getTestDiamondTaskGraph(sparkTask)).size());
// Second check - the tasks we looked for must not have been accessed more than
// once as a result of the traversal (note that we actually wind up accessing
// 2 times , because each visit counts twice, once to check for existence, and
// once to visit.
assertEquals("MRTasks should have been visited only once", 2, mrTask.getDepCallCount());
assertEquals("TezTasks should have been visited only once", 2, tezTask.getDepCallCount());
assertEquals("SparkTasks should have been visited only once", 2, sparkTask.getDepCallCount());
}
代码示例来源:origin: apache/hive
List<ExecDriver> mrtasks = Utilities.getMRTasks(rootTasks);
代码示例来源:origin: apache/drill
LOG.info("set least row check for LimitDesc: " + globalLimitCtx.getGlobalLimit());
globalLimitCtx.getLastReduceLimitDesc().setLeastRows(globalLimitCtx.getGlobalLimit());
List<ExecDriver> mrTasks = Utilities.getMRTasks(rootTasks);
for (ExecDriver tsk : mrTasks) {
tsk.setRetryCmdWhenFail(true);
代码示例来源:origin: apache/drill
List<ExecDriver> mrtasks = Utilities.getMRTasks(rootTasks);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
private static void getMRTasks(List<Task<? extends Serializable>> tasks, List<ExecDriver> mrTasks) {
for (Task<? extends Serializable> task : tasks) {
if (task instanceof ExecDriver && !mrTasks.contains(task)) {
mrTasks.add((ExecDriver) task);
}
if (task.getDependentTasks() != null) {
getMRTasks(task.getDependentTasks(), mrTasks);
}
}
}
内容来源于网络,如有侵权,请联系作者删除!