本文整理了Java中org.apache.hadoop.hive.ql.history.HiveHistory.setTaskProperty()
方法的一些代码示例,展示了HiveHistory.setTaskProperty()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HiveHistory.setTaskProperty()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.history.HiveHistory
类名称:HiveHistory
方法名:setTaskProperty
[英]Used to set task properties.
[中]用于设置任务属性。
代码示例来源:origin: apache/drill
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
代码示例来源:origin: apache/hive
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
代码示例来源:origin: apache/hive
/**
* from StreamJob.java.
*/
public void jobInfo(RunningJob rj) {
if (ShimLoader.getHadoopShims().isLocalMode(job)) {
console.printInfo("Job running in-process (local Hadoop)");
} else {
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().setTaskProperty(queryId,
getId(), Keys.TASK_HADOOP_ID, rj.getID().toString());
}
console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = "
+ rj.getTrackingURL());
console.printInfo("Kill Command = " + HiveConf.getVar(job, ConfVars.MAPREDBIN)
+ " job -kill " + rj.getID());
}
}
代码示例来源:origin: apache/drill
/**
* from StreamJob.java.
*/
public void jobInfo(RunningJob rj) {
if (ShimLoader.getHadoopShims().isLocalMode(job)) {
console.printInfo("Job running in-process (local Hadoop)");
} else {
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().setTaskProperty(queryId,
getId(), Keys.TASK_HADOOP_ID, rj.getID().toString());
}
console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = "
+ rj.getTrackingURL());
console.printInfo("Kill Command = " + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN)
+ " job -kill " + rj.getID());
}
}
代码示例来源:origin: apache/drill
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
代码示例来源:origin: apache/hive
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
/**
* from StreamJob.java.
*/
private void jobInfo(RunningJob rj) {
if (job.get("mapred.job.tracker", "local").equals("local")) {
console.printInfo("Job running in-process (local Hadoop)");
} else {
String hp = job.get("mapred.job.tracker");
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().setTaskProperty(SessionState.get().getQueryId(),
getId(), Keys.TASK_HADOOP_ID, rj.getJobID());
}
console.printInfo(ExecDriver.getJobStartMsg(rj.getJobID()) + ", Tracking URL = "
+ rj.getTrackingURL());
console.printInfo("Kill Command = " + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN)
+ " job -Dmapred.job.tracker=" + hp + " -kill " + rj.getJobID());
}
}
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
if (ss != null) {
ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_HADOOP_PROGRESS, output);
ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
if (ss != null) {
ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
代码示例来源:origin: com.facebook.presto.hive/hive-apache
/**
* from StreamJob.java.
*/
public void jobInfo(RunningJob rj) {
if (ShimLoader.getHadoopShims().isLocalMode(job)) {
console.printInfo("Job running in-process (local Hadoop)");
} else {
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().setTaskProperty(SessionState.get().getQueryId(),
getId(), Keys.TASK_HADOOP_ID, rj.getID().toString());
}
console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = "
+ rj.getTrackingURL());
console.printInfo("Kill Command = " + HiveConf.getVar(job, HiveConf.ConfVars.HADOOPBIN)
+ " job -kill " + rj.getID());
}
}
代码示例来源:origin: com.facebook.presto.hive/hive-apache
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
内容来源于网络,如有侵权,请联系作者删除!