org.apache.hadoop.hive.ql.exec.Utilities.setReduceWork()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(8.3k)|赞(0)|评价(0)|浏览(89)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.setReduceWork()方法的一些代码示例,展示了Utilities.setReduceWork()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.setReduceWork()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:setReduceWork

Utilities.setReduceWork介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
 String useName = conf.get(INPUT_NAME);
 if (useName == null) {
  useName = "mapreduce:" + hiveScratchDir;
 }
 conf.set(INPUT_NAME, useName);
 setMapWork(conf, w.getMapWork(), hiveScratchDir, true);
 if (w.getReduceWork() != null) {
  conf.set(INPUT_NAME, useName);
  setReduceWork(conf, w.getReduceWork(), hiveScratchDir, true);
 }
}

代码示例来源:origin: apache/drill

public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
 String useName = conf.get(INPUT_NAME);
 if (useName == null) {
  useName = "mapreduce";
 }
 conf.set(INPUT_NAME, useName);
 setMapWork(conf, w.getMapWork(), hiveScratchDir, true);
 if (w.getReduceWork() != null) {
  conf.set(INPUT_NAME, useName);
  setReduceWork(conf, w.getReduceWork(), hiveScratchDir, true);
 }
}

代码示例来源:origin: apache/hive

public ReduceRecordProcessor(final JobConf jconf, final ProcessorContext context) throws Exception {
 super(jconf, context);
 String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID);
 cache = ObjectCacheFactory.getCache(jconf, queryId, true);
 dynamicValueCache = ObjectCacheFactory.getCache(jconf, queryId, false, true);
 String cacheKey = processorContext.getTaskVertexName() + REDUCE_PLAN_KEY;
 cacheKeys = Lists.newArrayList(cacheKey);
 dynamicValueCacheKeys = new ArrayList<String>();
 reduceWork = (ReduceWork) cache.retrieve(cacheKey, new Callable<Object>() {
   @Override
   public Object call() {
    return Utilities.getReduceWork(jconf);
  }
 });
 Utilities.setReduceWork(jconf, reduceWork);
 mergeWorkList = getMergeWorkList(jconf, cacheKey, queryId, cache, cacheKeys);
}

代码示例来源:origin: apache/drill

} else if (work instanceof ReduceWork) {
 cloned.setBoolean("mapred.task.is.map", false);
 Utilities.setReduceWork(cloned, (ReduceWork) work, scratchDir, false);
 Utilities.createTmpDirs(cloned, (ReduceWork) work);
 cloned.set(Utilities.MAPRED_REDUCER_CLASS, ExecReducer.class.getName());

代码示例来源:origin: apache/drill

public ReduceRecordProcessor(final JobConf jconf, final ProcessorContext context) throws Exception {
 super(jconf, context);
 String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID);
 cache = ObjectCacheFactory.getCache(jconf, queryId, true);
 dynamicValueCache = ObjectCacheFactory.getCache(jconf, queryId, false);
 String cacheKey = processorContext.getTaskVertexName() + REDUCE_PLAN_KEY;
 cacheKeys = Lists.newArrayList(cacheKey);
 dynamicValueCacheKeys = new ArrayList<String>();
 reduceWork = (ReduceWork) cache.retrieve(cacheKey, new Callable<Object>() {
   @Override
   public Object call() {
    return Utilities.getReduceWork(jconf);
  }
 });
 Utilities.setReduceWork(jconf, reduceWork);
 mergeWorkList = getMergeWorkList(jconf, cacheKey, queryId, cache, cacheKeys);
}

代码示例来源:origin: apache/hive

} else if (work instanceof ReduceWork) {
 cloned.setBoolean("mapred.task.is.map", false);
 Utilities.setReduceWork(cloned, (ReduceWork) work, scratchDir, false);
 Utilities.createTmpDirs(cloned, (ReduceWork) work);
 cloned.set(Utilities.MAPRED_REDUCER_CLASS, ExecReducer.class.getName());

代码示例来源:origin: apache/hive

Utilities.setReduceWork(jc, null);
if (e instanceof OutOfMemoryError) {

代码示例来源:origin: apache/drill

Utilities.setReduceWork(jc, null);
if (e instanceof OutOfMemoryError) {

代码示例来源:origin: apache/drill

private Vertex createVertex(JobConf conf, ReduceWork reduceWork,
  LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
  Path mrScratchDir, Context ctx) throws Exception {
 // set up operator plan
 conf.set(Utilities.INPUT_NAME, reduceWork.getName());
 Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
 // create the directories FileSinkOperators need
 Utilities.createTmpDirs(conf, reduceWork);
 VertexExecutionContext vertexExecutionContext = createVertexExecutionContext(reduceWork);
 // create the vertex
 Vertex reducer = Vertex.create(reduceWork.getName(),
   ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
     setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
   reduceWork.isAutoReduceParallelism() ?
     reduceWork.getMaxReduceTasks() :
     reduceWork.getNumReduceTasks(), getContainerResource(conf));
 reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
 reducer.setExecutionContext(vertexExecutionContext);
 reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
 Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
 localResources.put(getBaseName(appJarLr), appJarLr);
 for (LocalResource lr: additionalLr) {
  localResources.put(getBaseName(lr), lr);
 }
 reducer.addTaskLocalFiles(localResources);
 return reducer;
}

代码示例来源:origin: apache/hive

private Vertex createVertex(JobConf conf, ReduceWork reduceWork, FileSystem fs,
  Path mrScratchDir, Context ctx, Map<String, LocalResource> localResources)
    throws Exception {
 // set up operator plan
 conf.set(Utilities.INPUT_NAME, reduceWork.getName());
 Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
 // create the directories FileSinkOperators need
 Utilities.createTmpDirs(conf, reduceWork);
 VertexExecutionContext vertexExecutionContext = createVertexExecutionContext(reduceWork);
 // create the vertex
 Vertex reducer = Vertex.create(reduceWork.getName(),
   ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
     setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
   reduceWork.isAutoReduceParallelism() ?
     reduceWork.getMaxReduceTasks() :
     reduceWork.getNumReduceTasks(), getContainerResource(conf));
 reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
 reducer.setExecutionContext(vertexExecutionContext);
 reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
 reducer.addTaskLocalFiles(localResources);
 return reducer;
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

public static void setMapRedWork(Configuration conf, MapredWork w, Path hiveScratchDir) {
 String useName = conf.get(INPUT_NAME);
 if (useName == null) {
  useName = "mapreduce";
 }
 conf.set(INPUT_NAME, useName);
 setMapWork(conf, w.getMapWork(), hiveScratchDir, true);
 if (w.getReduceWork() != null) {
  conf.set(INPUT_NAME, useName);
  setReduceWork(conf, w.getReduceWork(), hiveScratchDir, true);
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

public ReduceRecordProcessor(final JobConf jconf, final ProcessorContext context) throws Exception {
 super(jconf, context);
 ObjectCache cache = ObjectCacheFactory.getCache(jconf);
 String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID);
 cacheKey = queryId + REDUCE_PLAN_KEY;
 cacheKeys = new ArrayList<String>();
 cacheKeys.add(cacheKey);
 reduceWork = (ReduceWork) cache.retrieve(cacheKey, new Callable<Object>() {
   @Override
   public Object call() {
    return Utilities.getReduceWork(jconf);
  }
 });
 Utilities.setReduceWork(jconf, reduceWork);
 mergeWorkList = getMergeWorkList(jconf, cacheKey, queryId, cache, cacheKeys);
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

} else if (work instanceof ReduceWork) {
 cloned.setBoolean("mapred.task.is.map", false);
 Utilities.setReduceWork(cloned, (ReduceWork) work, scratchDir, false);
 Utilities.createTmpDirs(cloned, (ReduceWork) work);
 cloned.set(Utilities.MAPRED_REDUCER_CLASS, ExecReducer.class.getName());

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private Vertex createVertex(JobConf conf, ReduceWork reduceWork,
  LocalResource appJarLr, List<LocalResource> additionalLr, FileSystem fs,
  Path mrScratchDir, Context ctx) throws Exception {
 // set up operator plan
 conf.set(Utilities.INPUT_NAME, reduceWork.getName());
 Utilities.setReduceWork(conf, reduceWork, mrScratchDir, false);
 // create the directories FileSinkOperators need
 Utilities.createTmpDirs(conf, reduceWork);
 // create the vertex
 Vertex reducer = Vertex.create(reduceWork.getName(),
   ProcessorDescriptor.create(ReduceTezProcessor.class.getName()).
   setUserPayload(TezUtils.createUserPayloadFromConf(conf)),
     reduceWork.isAutoReduceParallelism() ? reduceWork.getMaxReduceTasks() : reduceWork
       .getNumReduceTasks(), getContainerResource(conf));
 reducer.setTaskEnvironment(getContainerEnvironment(conf, false));
 reducer.setTaskLaunchCmdOpts(getContainerJavaOpts(conf));
 Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
 localResources.put(getBaseName(appJarLr), appJarLr);
 for (LocalResource lr: additionalLr) {
  localResources.put(getBaseName(lr), lr);
 }
 reducer.addTaskLocalFiles(localResources);
 return reducer;
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

Utilities.setReduceWork(jc, null);
if (e instanceof OutOfMemoryError) {

相关文章

微信公众号

最新文章

更多

Utilities类方法