本文整理了Java中org.apache.hadoop.hive.ql.history.HiveHistory
类的一些代码示例,展示了HiveHistory
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。HiveHistory
类的具体详情如下:
包路径:org.apache.hadoop.hive.ql.history.HiveHistory
类名称:HiveHistory
[英]HiveHistory. Logs information such as query, query plan, runtime statistics into a file. Each session uses a new object, which creates a new file.
[中]蜂群历史。将查询、查询计划、运行时统计信息等信息记录到文件中。每个会话使用一个新对象,该对象创建一个新文件。
代码示例来源:origin: apache/hive
/**
* Update the history if set hive.session.history.enabled
*
* @param historyEnabled
* @param ss
*/
public void updateHistory(boolean historyEnabled, SessionState ss) {
if (historyEnabled) {
// Uses a no-op proxy
if (ss.hiveHist.getHistFileName() == null) {
ss.hiveHist = new HiveHistoryImpl(ss);
}
} else {
if (ss.hiveHist.getHistFileName() != null) {
ss.hiveHist = HiveHistoryProxyHandler.getNoOpHiveHistoryProxy();
}
}
}
代码示例来源:origin: apache/hive
HiveHistory hiveHist = sessionState.getHiveHistory();
if (null != hiveHist) {
hiveHist.closeStream();
代码示例来源:origin: apache/hive
@Override
public void logPlanProgress(SessionState ss) throws IOException {
ss.getHiveHistory().logPlanProgress(queryPlan);
}
代码示例来源:origin: apache/hive
SessionState.get().getHiveHistory().startQuery(queryStr, queryId);
SessionState.get().getHiveHistory().logPlanProgress(plan);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS,
String.valueOf(jobs));
SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(0));
SessionState.get().getHiveHistory().printRowCount(queryId);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(12));
SessionState.get().getHiveHistory().endQuery(queryId);
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_HADOOP_PROGRESS, output);
ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this);
ss.getHiveHistory().logPlanProgress(queryPlan);
ss.getHiveHistory().logPlanProgress(queryPlan);
代码示例来源:origin: apache/drill
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
ss.getHiveHistory().progressTask(queryId, this.task);
this.callBackObj.logPlanProgress(ss);
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
代码示例来源:origin: apache/hive
private void addToHistory(Keys key, String value) {
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().setQueryProperty(queryState.getQueryId(), key, value);
}
}
代码示例来源:origin: apache/drill
SessionState.get().getHiveHistory().startQuery(queryStr,
conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
SessionState.get().getHiveHistory().logPlanProgress(plan);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS,
String.valueOf(jobs));
SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(0));
SessionState.get().getHiveHistory().printRowCount(queryId);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(12));
SessionState.get().getHiveHistory().endQuery(queryId);
代码示例来源:origin: apache/hive
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
ss.getHiveHistory().setTaskProperty(queryId, getId(),
Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
ss.getHiveHistory().progressTask(queryId, this.task);
this.callBackObj.logPlanProgress(ss);
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(queryId, getId(), ctrs);
代码示例来源:origin: apache/drill
private void addToHistory(SparkJobRef jobRef) {
console.printInfo("Starting Spark Job = " + jobRef.getJobId());
if (SessionState.get() != null) {
SessionState.get().getHiveHistory()
.setQueryProperty(queryState.getQueryId(), Keys.SPARK_JOB_ID, jobRef.getJobId());
}
}
代码示例来源:origin: org.apache.hadoop.hive/hive-exec
SessionState.get().getHiveHistory().startQuery(queryStr,
conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
SessionState.get().getHiveHistory().logPlanProgress(plan);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS,
String.valueOf(jobs));
SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(0));
SessionState.get().getHiveHistory().printRowCount(queryId);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(12));
} finally {
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().endQuery(queryId);
SessionState.get().getHiveHistory().logPlanProgress(plan);
} catch (Exception e) {
代码示例来源:origin: com.facebook.presto.hive/hive-apache
numMap = mappers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_NUM_MAPPERS, Integer.toString(numMap));
numReduce = reducers.length;
if (ss != null) {
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_NUM_REDUCERS, Integer.toString(numReduce));
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
ss.getHiveHistory().setTaskProperty(SessionState.get().getQueryId(), getId(),
Keys.TASK_HADOOP_PROGRESS, output);
if (ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS)) {
ss.getHiveHistory().progressTask(SessionState.get().getQueryId(), this.task);
this.callBackObj.logPlanProgress(ss);
SessionState ss = SessionState.get();
if (ss != null) {
ss.getHiveHistory().setTaskCounters(SessionState.get().getQueryId(), getId(), ctrs);
代码示例来源:origin: apache/drill
/**
* Update the history if set hive.session.history.enabled
*
* @param historyEnabled
* @param ss
*/
public void updateHistory(boolean historyEnabled, SessionState ss) {
if (historyEnabled) {
// Uses a no-op proxy
if (ss.hiveHist.getHistFileName() == null) {
ss.hiveHist = new HiveHistoryImpl(ss);
}
} else {
if (ss.hiveHist.getHistFileName() != null) {
ss.hiveHist = HiveHistoryProxyHandler.getNoOpHiveHistoryProxy();
}
}
}
代码示例来源:origin: org.spark-project.hive/hive-service
HiveHistory hiveHist = sessionState.getHiveHistory();
if (null != hiveHist) {
hiveHist.closeStream();
代码示例来源:origin: apache/drill
@Override
public void logPlanProgress(SessionState ss) throws IOException {
ss.getHiveHistory().logPlanProgress(queryPlan);
}
代码示例来源:origin: com.facebook.presto.hive/hive-apache
private void addToHistory(SparkJobRef jobRef) {
console.printInfo("Starting Spark Job = " + jobRef.getJobId());
if (SessionState.get() != null) {
SessionState.get().getHiveHistory()
.setQueryProperty(SessionState.get().getQueryId(), Keys.SPARK_JOB_ID, jobRef.getJobId());
}
}
代码示例来源:origin: com.facebook.presto.hive/hive-apache
SessionState.get().getHiveHistory().startQuery(queryStr,
conf.getVar(HiveConf.ConfVars.HIVEQUERYID));
SessionState.get().getHiveHistory().logPlanProgress(plan);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_NUM_TASKS,
String.valueOf(jobs));
SessionState.get().getHiveHistory().setIdToTableMap(plan.getIdToTableNameMap());
SessionState.get().getHiveHistory().setTaskProperty(queryId, tsk.getId(),
Keys.TASK_RET_CODE, String.valueOf(exitVal));
SessionState.get().getHiveHistory().endTask(queryId, tsk);
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(0));
SessionState.get().getHiveHistory().printRowCount(queryId);
ctx.restoreOriginalTracker();
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().setQueryProperty(queryId, Keys.QUERY_RET_CODE,
String.valueOf(12));
} finally {
if (SessionState.get() != null) {
SessionState.get().getHiveHistory().endQuery(queryId);
try {
SessionState.get().getLineageState().clear();
SessionState.get().getHiveHistory().logPlanProgress(plan);
} catch (Exception e) {
代码示例来源:origin: com.github.hyukjinkwon/hive-hwi
historyFile = SessionState.get().getHiveHistory().getHistFileName();
l4j.debug("HWISessionItem itemInit Complete " + getSessionName());
status = WebSessionItemStatus.READY;
代码示例来源:origin: com.github.hyukjinkwon/hive-service
HiveHistory hiveHist = sessionState.getHiveHistory();
if (null != hiveHist) {
hiveHist.closeStream();
代码示例来源:origin: apache/hive
/**
* This method is called in the Driver on every task. It updates counters and calls execute(),
* which is overridden in each task
*
* @return return value of execute()
*/
public int executeTask(HiveHistory hiveHistory) {
try {
this.setStarted();
if (hiveHistory != null) {
hiveHistory.logPlanProgress(queryPlan);
}
if (conf != null) {
LOG.debug("Task getting executed using mapred tag : " + conf.get(MRJobConfig.JOB_TAGS));
}
int retval = execute(driverContext);
this.setDone();
if (hiveHistory != null) {
hiveHistory.logPlanProgress(queryPlan);
}
return retval;
} catch (IOException e) {
throw new RuntimeException("Unexpected error: " + e.getMessage(), e);
}
}
内容来源于网络,如有侵权,请联系作者删除!