org.apache.hadoop.mapred.Task.setConf()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(5.8k)|赞(0)|评价(0)|浏览(103)

本文整理了Java中org.apache.hadoop.mapred.Task.setConf()方法的一些代码示例,展示了Task.setConf()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Task.setConf()方法的具体详情如下:
包路径:org.apache.hadoop.mapred.Task
类名称:Task
方法名:setConf

Task.setConf介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

private void setupTest(boolean uberized)
  throws IOException, InterruptedException {
 Configuration conf = new Configuration(false);
 conf.setBoolean("mapreduce.task.uberized", uberized);
 task.setConf(conf);
 when(umbilical.statusUpdate(any(TaskAttemptID.class),
   any(TaskStatus.class))).thenReturn(feedback);
 // to avoid possible infinite loop
 when(feedback.getTaskFound()).thenReturn(false, true);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

@Test (timeout=10000)
public void testTaskProgress() throws Exception {
 JobConf job = new JobConf();
 job.setLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 1000);
 Task task = new DummyTask();
 task.setConf(job);
 DummyTaskReporter reporter = new DummyTaskReporter(task);
 Thread t = new Thread(reporter);
 t.start();
 Thread.sleep(2100);
 task.setTaskDone();
 reporter.resetDoneFlag();
 t.join();
 Assert.assertEquals(statusUpdateTimes, 2);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

task.setConf(conf);
DummyTaskReporter reporter = new DummyTaskReporter(task);
Thread t = new Thread(reporter);

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

public void testScratchDirLimit(boolean fastFail, JobConf conf)
    throws Exception {
 ExitUtil.disableSystemExit();
 threadExited = false;
 Thread.UncaughtExceptionHandler h = new Thread.UncaughtExceptionHandler() {
  public void uncaughtException(Thread th, Throwable ex) {
   if (ex instanceof ExitUtil.ExitException) {
    threadExited = true;
    th.interrupt();
   }
  }
 };
 Task task = new DummyTask();
 task.setConf(conf);
 DummyTaskReporter reporter = new DummyTaskReporter(task);
 reporter.startDiskLimitCheckerThreadIfNeeded();
 Thread t = new Thread(reporter);
 t.setUncaughtExceptionHandler(h);
 reporter.setProgressFlag();
 t.start();
 while (!reporter.taskLimitIsChecked) {
  Thread.yield();
 }
 task.done(fakeUmbilical, reporter);
 reporter.resetDoneFlag();
 t.join(1000L);
 Assert.assertEquals(fastFail, threadExited);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

@Override
public void setConf(Configuration conf) {
 super.setConf(conf);
 this.maxCopyBackoff = conf.getInt("mapred.reduce.copy.backoff.max", 10);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

private void createTask()
  throws IOException {
 task = new MapTask(jobConfFile.toURI().toString(), taskId, 1, null, 1);
 task.setConf(jobConf); // Set conf. Set user name in particular.
 task.setUser(jobConf.getUser());
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, i, 0);
Task task = new MapTask(null, attemptID, i, null, 1);
task.setConf(taskConf);
TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
File pidFile = new File(TEST_DIR, "pid_" + i);

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException,
InterruptedException {
 JobConf conf = new JobConf();
 conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class);
 Path outDir = new Path(rootDir, "output"); 
 FileOutputFormat.setOutputPath(conf, outDir);
 JobID jobId = JobID.forName("job_201002121132_0001");
 Task testTask;
 if (type == TaskType.MAP) {
  testTask = new MapTask();
 } else {
  testTask = new ReduceTask();
 }
 testTask.setConf(conf);
 testTask.initialize(conf, jobId, Reporter.NULL, false);
 return testTask;
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

task = new ReduceTask(jobFilename.toString(), taskId, partition, numMaps);
task.setConf(conf);
task.run(conf, new FakeUmbilical());

代码示例来源:origin: com.facebook.hadoop/hadoop-core

1, conf.getUser());
task.setConf(conf);
task.run(conf, new FakeUmbilical());

代码示例来源:origin: com.facebook.hadoop/hadoop-core

new File(userLogsDir, "stderr"))));
task.setConf(job);

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

task.setConf(localJobConf);

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

cleanupTasks.put(taskid, taskTracker);
t.setConf(conf);
t.setUser(getUser());
if (LOG.isDebugEnabled()) {

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

jobtracker.removeTaskEntry(taskid);
t.setConf(conf);
LOG.debug("Launching task with skipRanges:"+failedRanges.getSkipRanges());
t.setSkipRanges(failedRanges.getSkipRanges());

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient

private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException,
InterruptedException {
 JobConf conf = new JobConf();
 conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class);
 Path outDir = new Path(rootDir, "output"); 
 FileOutputFormat.setOutputPath(conf, outDir);
 JobID jobId = JobID.forName("job_201002121132_0001");
 Task testTask;
 if (type == TaskType.MAP) {
  testTask = new MapTask();
 } else {
  testTask = new ReduceTask();
 }
 testTask.setConf(conf);
 testTask.initialize(conf, jobId, Reporter.NULL, false);
 return testTask;
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-app

writeLocalJobFile(localTaskFile, job);
task.setJobFile(localTaskFile.toString());
task.setConf(job);

代码示例来源:origin: io.hops/hadoop-mapreduce-client-app

writeLocalJobFile(localTaskFile, job);
task.setJobFile(localTaskFile.toString());
task.setConf(job);

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-app

writeLocalJobFile(localTaskFile, job);
task.setJobFile(localTaskFile.toString());
task.setConf(job);

代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test

attemptID = TaskAttemptID.forName(dummyAttemptID);
Task task = new MapTask(null, attemptID, 0, null, 1);
task.setConf(job);
task.localizeConfiguration(job);
task.initialize(job, attemptID.getJobID(), Reporter.NULL, false);

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-jobclient

attemptID = TaskAttemptID.forName(dummyAttemptID);
Task task = new MapTask(null, attemptID, 0, null, 1);
task.setConf(job);
task.localizeConfiguration(job);
task.initialize(job, attemptID.getJobID(), Reporter.NULL, false);

相关文章

微信公众号

最新文章

更多