本文整理了Java中org.apache.hadoop.mapred.Task.isTaskCleanupTask()
方法的一些代码示例,展示了Task.isTaskCleanupTask()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Task.isTaskCleanupTask()
方法的具体详情如下:
包路径:org.apache.hadoop.mapred.Task
类名称:Task
方法名:isTaskCleanupTask
暂无
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
private void setupLog4jProperties(Vector<String> vargs, TaskAttemptID taskid,
long logSize) {
vargs.add("-Dhadoop.log.dir=" +
new File(System.getProperty("hadoop.log.dir")).getAbsolutePath());
vargs.add("-Dhadoop.root.logger=" + getLogLevel(conf).toString() + ",TLA");
vargs.add("-D" + TaskLogAppender.TASKID_PROPERTY + "=" + taskid);
vargs.add("-D" + TaskLogAppender.ISCLEANUP_PROPERTY +
"=" + t.isTaskCleanupTask());
vargs.add("-D" + TaskLogAppender.LOGSIZE_PROPERTY + "=" + logSize);
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
private String getTaskCacheDirectory(TaskControllerContext context) {
// In the case of JVM reuse, the task specific directory
// is different from what is set with respect with
// env.workDir. Hence building this from the taskId everytime.
String taskId = context.task.getTaskID().toString();
File cacheDirForJob = context.env.workDir.getParentFile().getParentFile();
if(context.task.isTaskCleanupTask()) {
taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX;
}
return new File(cacheDirForJob, taskId).getAbsolutePath();
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
private List<String> buildTaskCleanupArgs(
TaskControllerPathDeletionContext context) {
List<String> commandArgs = new ArrayList<String>(3);
commandArgs.add(context.mapredLocalDir.toUri().getPath());
commandArgs.add(context.task.getJobID().toString());
String workDir = "";
if (context.isWorkDir) {
workDir = "/work";
}
if (context.task.isTaskCleanupTask()) {
commandArgs.add(context.task.getTaskID() + TaskTracker.TASK_CLEANUP_SUFFIX
+ workDir);
} else {
commandArgs.add(context.task.getTaskID() + workDir);
}
return commandArgs;
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred-test
void validateNumSlotsUsedForTaskCleanup(TaskTrackerStatus ttStatus)
throws IOException {
List<Task> tasks = jobTracker.getSetupAndCleanupTasks(ttStatus);
assertEquals("Actual number of taskCleanup tasks is not same as expected", 1, tasks.size());
LOG.info("taskCleanup task is " + tasks.get(0));
assertTrue(tasks.get(0).isTaskCleanupTask());
// slots needed for taskCleanup task should be 1(even for high RAM jobs)
assertEquals("TaskCleanup task should not need more than 1 slot.",
1, tasks.get(0).getNumSlotsRequired());
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
private String getDirectoryChosenForTask(File directory,
TaskControllerContext context) {
String jobId = getJobId(context);
String taskId = context.task.getTaskID().toString();
for (String dir : mapredLocalDirs) {
File mapredDir = new File(dir);
File taskDir = new File(mapredDir, TaskTracker.getLocalTaskDir(
jobId, taskId, context.task.isTaskCleanupTask()));
if (directory.equals(taskDir)) {
return dir;
}
}
LOG.error("Couldn't parse task cache directory correctly");
throw new IllegalArgumentException("invalid task cache directory "
+ directory.getAbsolutePath());
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Get the logFileDetails of all the list of attempts passed.
*
* @param lInfo
* @return a map of task to the log-file detail
* @throws IOException
*/
private Map<Task, Map<LogName, LogFileDetail>> getAllLogsFileDetails(
final List<Task> allAttempts) throws IOException {
Map<Task, Map<LogName, LogFileDetail>> taskLogFileDetails =
new HashMap<Task, Map<LogName, LogFileDetail>>();
for (Task task : allAttempts) {
Map<LogName, LogFileDetail> allLogsFileDetails;
allLogsFileDetails =
TaskLog.getAllLogsFileDetails(task.getTaskID(),
task.isTaskCleanupTask());
taskLogFileDetails.put(task, allLogsFileDetails);
}
return taskLogFileDetails;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Builds the path of taskAttemptDir OR taskWorkDir based on
* mapredLocalDir, jobId, taskId, etc
*/
String buildPathForDeletion() {
String subDir = TaskTracker.getLocalTaskDir(task.getJobID().toString(),
task.getTaskID().toString(), task.isTaskCleanupTask());
if (isWorkDir) {
subDir = subDir + Path.SEPARATOR + "work";
}
return mapredLocalDir.toUri().getPath() + Path.SEPARATOR + subDir;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* Returns list of arguments to be passed while launching task VM.
* See {@code buildTaskControllerExecutor(TaskCommands,
* String, List<String>, JvmEnv)} documentation.
* @param context
* @return Argument to be used while launching Task VM
*/
private List<String> buildLaunchTaskArgs(TaskControllerContext context) {
List<String> commandArgs = new ArrayList<String>(3);
String taskId = context.task.getTaskID().toString();
String jobId = getJobId(context);
LOG.debug("getting the task directory as: "
+ getTaskCacheDirectory(context));
commandArgs.add(getDirectoryChosenForTask(
new File(getTaskCacheDirectory(context)),
context));
commandArgs.add(jobId);
if(!context.task.isTaskCleanupTask()) {
commandArgs.add(taskId);
}else {
commandArgs.add(taskId + TaskTracker.TASK_CLEANUP_SUFFIX);
}
return commandArgs;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
private void setupTaskCacheFileAccess(TaskControllerContext context) {
String taskId = context.task.getTaskID().toString();
JobID jobId = JobID.forName(getJobId(context));
//Change permission for the task across all the disks
for(String localDir : mapredLocalDirs) {
File f = new File(localDir);
File taskCacheDir = new File(f,TaskTracker.getLocalTaskDir(
jobId.toString(), taskId, context.task.isTaskCleanupTask()));
if(taskCacheDir.exists()) {
changeDirectoryPermissions(taskCacheDir.getPath(),
FILE_PERMISSIONS, true);
}
}//end of local directory Iteration
}
代码示例来源:origin: io.hops/hadoop-mapreduce-client-common
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
String jobId = t.getJobID().toString();
String taskId = t.getTaskID().toString();
boolean isCleanup = t.isTaskCleanupTask();
String user = t.getUser();
StringBuffer childMapredLocalDir =
new StringBuffer(localDirs[0] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
for (int i = 1; i < localDirs.length; i++) {
childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
}
LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-common
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
String jobId = t.getJobID().toString();
String taskId = t.getTaskID().toString();
boolean isCleanup = t.isTaskCleanupTask();
String user = t.getUser();
StringBuffer childMapredLocalDir =
new StringBuffer(localDirs[0] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
for (int i = 1; i < localDirs.length; i++) {
childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
}
LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-common
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
String jobId = t.getJobID().toString();
String taskId = t.getTaskID().toString();
boolean isCleanup = t.isTaskCleanupTask();
String user = t.getUser();
StringBuffer childMapredLocalDir =
new StringBuffer(localDirs[0] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
for (int i = 1; i < localDirs.length; i++) {
childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
}
LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-common
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
String jobId = t.getJobID().toString();
String taskId = t.getTaskID().toString();
boolean isCleanup = t.isTaskCleanupTask();
String user = t.getUser();
StringBuffer childMapredLocalDir =
new StringBuffer(localDirs[0] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
for (int i = 1; i < localDirs.length; i++) {
childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
+ getLocalTaskDir(user, jobId, taskId, isCleanup));
}
LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
/**
* Prepare the Configs.LOCAL_DIR for the child. The child is sand-boxed now.
* Whenever it uses LocalDirAllocator from now on inside the child, it will
* only see files inside the attempt-directory. This is done in the Child's
* process space.
*/
static void setupChildMapredLocalDirs(Task t, JobConf conf) {
String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
String jobId = t.getJobID().toString();
String taskId = t.getTaskID().toString();
boolean isCleanup = t.isTaskCleanupTask();
String user = t.getUser();
StringBuffer childMapredLocalDir =
new StringBuffer(localDirs[0] + Path.SEPARATOR
+ TaskTracker.getLocalTaskDir(user, jobId, taskId, isCleanup));
for (int i = 1; i < localDirs.length; i++) {
childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
+ TaskTracker.getLocalTaskDir(user, jobId, taskId, isCleanup));
}
LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
}
代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core
public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) {
this.task = task;
this.launcher = launcher;
this.lastProgressReport = System.currentTimeMillis();
this.defaultJobConf = conf;
localJobConf = null;
taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
0.0f,
task.getState(),
diagnosticInfo.toString(),
"initializing",
getName(),
task.isTaskCleanupTask() ?
TaskStatus.Phase.CLEANUP :
task.isMapTask()? TaskStatus.Phase.MAP:
TaskStatus.Phase.SHUFFLE,
task.getCounters());
taskTimeout = (10 * 60 * 1000);
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
public TaskInProgress(Task task, JobConf conf, TaskLauncher launcher) {
this.task = task;
this.launcher = launcher;
this.lastProgressReport = System.currentTimeMillis();
this.defaultJobConf = conf;
localJobConf = null;
taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
0.0f,
task.getNumSlotsRequired(),
task.getState(),
diagnosticInfo.toString(),
"initializing",
getName(),
task.isTaskCleanupTask() ?
TaskStatus.Phase.CLEANUP :
task.isMapTask()? TaskStatus.Phase.MAP:
TaskStatus.Phase.SHUFFLE,
task.getCounters());
taskTimeout = (10 * 60 * 1000);
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
public TaskInProgress(Task task, JobConf conf,
TaskLauncher launcher, Writable extensible) {
this.task = task;
this.launcher = launcher;
this.lastProgressReport = System.currentTimeMillis();
this.defaultJobConf = conf;
this.extensible = extensible;
localJobConf = null;
taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(),
0.0f,
task.getNumSlotsRequired(),
task.getState(),
diagnosticInfo.toString(),
"initializing",
getName(),
task.isTaskCleanupTask() ?
TaskStatus.Phase.CLEANUP :
task.isMapTask()? TaskStatus.Phase.MAP:
TaskStatus.Phase.SHUFFLE,
task.getCounters());
taskTimeout = (10 * 60 * 1000);
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
/**
* Write the child's configuration to the disk and set it in configuration so
* that the child can pick it up from there.
*
* @param lDirAlloc
* @throws IOException
*/
void setupChildTaskConfiguration(LocalDirAllocator lDirAlloc)
throws IOException {
Path localTaskFile =
lDirAlloc.getLocalPathForWrite(TaskTracker.getTaskConfFile(
t.getUser(), t.getJobID().toString(), t.getTaskID().toString(), t
.isTaskCleanupTask()), conf);
// write the child's task configuration file to the local disk
writeLocalTaskFile(localTaskFile.toString(), conf);
// Set the final job file in the task. The child needs to know the correct
// path to job.xml. So set this path accordingly.
t.setJobFile(localTaskFile.toString());
}
代码示例来源:origin: org.apache.hadoop/hadoop-mapred
/**
* Returns the taskWorkDir or taskLocalDir based on whether
* {@link TaskControllerTaskPathDeletionContext} is configured to delete
* the workDir.
*/
@Override
protected String getPath() {
String subDir = (isWorkDir) ? TaskTracker.getTaskWorkDir(task.getUser(),
task.getJobID().toString(), task.getTaskID().toString(),
task.isTaskCleanupTask())
: TaskTracker.getLocalTaskDir(task.getUser(),
task.getJobID().toString(), task.getTaskID().toString(),
task.isTaskCleanupTask());
return subDir;
}
代码示例来源:origin: com.facebook.hadoop/hadoop-core
/**
* The primary public method that should be called to 'run' a task. Handles
* both map and reduce tasks and marks them as completed after the configured
* time interval
* @param tip
*/
public void launchTask(TaskInProgress tip) throws IOException {
LOG.info("Launching simulated task " + tip.getTask().getTaskID() +
" for job " + tip.getTask().getJobID());
TaskUmbilicalProtocol umbilicalProtocol = taskTracker.getUmbilical(tip);
// For map tasks, we can just finish the task after some time. Same thing
// with cleanup tasks, as we don't need to be waiting for mappers to finish
if (tip.getTask().isMapTask() || tip.getTask().isTaskCleanupTask() ||
tip.getTask().isJobCleanupTask() || tip.getTask().isJobSetupTask() ) {
addTipToFinish(tip, umbilicalProtocol);
} else {
MapperWaitThread mwt =
new MapperWaitThread(tip, this, umbilicalProtocol);
// Save a reference to the mapper wait thread so that we can stop them if
// the task gets killed
mapperWaitThreadMap.put(tip, mwt);
mwt.start();
}
}
内容来源于网络,如有侵权,请联系作者删除!