本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.toTempPath()
方法的一些代码示例,展示了Utilities.toTempPath()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.toTempPath()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:toTempPath
[英]Given a path, convert to a temporary path.
[中]给定路径,转换为临时路径。
代码示例来源:origin: apache/hive
private Path getOperatorOutputPath(Path specPath) throws IOException {
return new Path(Utilities.toTempPath(specPath), Utilities.toTempPath(taskId));
}
代码示例来源:origin: apache/hive
/**
* Given a path, convert to a temporary path.
*/
public static Path toTempPath(String orig) {
return toTempPath(new Path(orig));
}
代码示例来源:origin: apache/drill
private Path getOperatorOutputPath(Path specPath) throws IOException {
return new Path(Utilities.toTempPath(specPath), Utilities.toTempPath(taskId));
}
代码示例来源:origin: apache/drill
/**
* Given a path, convert to a temporary path.
*/
public static Path toTempPath(String orig) {
return toTempPath(new Path(orig));
}
代码示例来源:origin: apache/hive
private Path getOperatorFinalPath(Path specPath) throws IOException {
return new Path(Utilities.toTempPath(specPath), taskId);
}
代码示例来源:origin: apache/drill
/**
* Update OutPath according to tmpPath.
*/
public Path getTaskOutPath(String taskId) {
return new Path(this.taskOutputTempPath, Utilities.toTempPath(taskId));
}
代码示例来源:origin: apache/drill
private Path getOperatorFinalPath(Path specPath) throws IOException {
return new Path(Utilities.toTempPath(specPath), taskId);
}
代码示例来源:origin: apache/hive
private void updatePaths(Path tmpPath, Path taskTmpPath) {
String taskId = Utilities.getTaskId(jc);
this.tmpPath = tmpPath;
this.taskTmpPath = taskTmpPath;
String inputFile = jc.get(MRJobConfig.MAP_INPUT_FILE);
int lastSeparator = inputFile.lastIndexOf(Path.SEPARATOR) + 1;
finalPath = new Path(tmpPath, inputFile.substring(lastSeparator));
outPath = new Path(taskTmpPath, Utilities.toTempPath(taskId));
}
代码示例来源:origin: apache/drill
private void updatePaths(Path tp, Path ttp) {
String taskId = Utilities.getTaskId(jc);
tmpPath = tp;
taskTmpPath = ttp;
finalPath = new Path(tp, taskId);
outPath = new Path(ttp, Utilities.toTempPath(taskId));
}
代码示例来源:origin: apache/drill
private void updatePaths(Path tmpPath, Path taskTmpPath) {
String taskId = Utilities.getTaskId(jc);
this.tmpPath = tmpPath;
this.taskTmpPath = taskTmpPath;
String inputFile = jc.get(MRJobConfig.MAP_INPUT_FILE);
int lastSeparator = inputFile.lastIndexOf(Path.SEPARATOR) + 1;
finalPath = new Path(tmpPath, inputFile.substring(lastSeparator));
outPath = new Path(taskTmpPath, Utilities.toTempPath(taskId));
}
代码示例来源:origin: apache/hive
private void updatePaths(Path tp, Path ttp) {
if (taskId == null) {
taskId = Utilities.getTaskId(jc);
}
tmpPath = tp;
if (isMmTable) {
taskTmpPath = null;
// Make sure we don't collide with the source.
outPath = finalPath = new Path(tmpPath, taskId + ".merged");
} else {
taskTmpPath = ttp;
finalPath = new Path(tp, taskId);
outPath = new Path(ttp, Utilities.toTempPath(taskId));
}
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("Paths for merge " + taskId + ": tmp " + tmpPath + ", task "
+ taskTmpPath + ", final " + finalPath + ", out " + outPath);
}
}
代码示例来源:origin: apache/drill
public FSPaths(Path specPath) {
tmpPath = Utilities.toTempPath(specPath);
taskOutputTempPath = Utilities.toTaskTempPath(specPath);
outPaths = new Path[numFiles];
finalPaths = new Path[numFiles];
outWriters = new RecordWriter[numFiles];
updaters = new RecordUpdater[numFiles];
if (isDebugEnabled) {
LOG.debug("Created slots for " + numFiles);
}
stat = new Stat();
}
代码示例来源:origin: apache/hive
Path tmpPath = Utilities.toTempPath(specPath);
Path intermediatePath = new Path(tmpPath.getParent(), tmpPath.getName()
+ ".intermediate");
代码示例来源:origin: apache/drill
private static void createTmpDirs(Configuration conf,
List<Operator<? extends OperatorDesc>> ops) throws IOException {
while (!ops.isEmpty()) {
Operator<? extends OperatorDesc> op = ops.remove(0);
if (op instanceof FileSinkOperator) {
FileSinkDesc fdesc = ((FileSinkOperator) op).getConf();
Path tempDir = fdesc.getDirName();
if (tempDir != null) {
Path tempPath = Utilities.toTempPath(tempDir);
FileSystem fs = tempPath.getFileSystem(conf);
fs.mkdirs(tempPath);
}
}
if (op.getChildOperators() != null) {
ops.addAll(op.getChildOperators());
}
}
}
代码示例来源:origin: apache/hive
private static void createTmpDirs(Configuration conf,
List<Operator<? extends OperatorDesc>> ops) throws IOException {
while (!ops.isEmpty()) {
Operator<? extends OperatorDesc> op = ops.remove(0);
if (op instanceof FileSinkOperator) {
FileSinkDesc fdesc = ((FileSinkOperator) op).getConf();
if (fdesc.isMmTable()) {
continue; // No need to create for MM tables
}
Path tempDir = fdesc.getDirName();
if (tempDir != null) {
Path tempPath = Utilities.toTempPath(tempDir);
FileSystem fs = tempPath.getFileSystem(conf);
fs.mkdirs(tempPath);
}
}
if (op.getChildOperators() != null) {
ops.addAll(op.getChildOperators());
}
}
}
代码示例来源:origin: apache/hive
@Override
public void configure(JobConf job) {
jc = job;
work = (ColumnTruncateWork) Utilities.getMapWork(job);
Path specPath = work.getOutputDir();
Path tmpPath = Utilities.toTempPath(specPath);
Path taskTmpPath = Utilities.toTaskTempPath(specPath);
updatePaths(tmpPath, taskTmpPath);
try {
fs = specPath.getFileSystem(job);
autoDelete = fs.deleteOnExit(outPath);
} catch (IOException e) {
this.exception = true;
throw new RuntimeException(e);
}
}
代码示例来源:origin: apache/drill
@Override
public void configure(JobConf job) {
jc = job;
work = (ColumnTruncateWork) Utilities.getMapWork(job);
Path specPath = work.getOutputDir();
Path tmpPath = Utilities.toTempPath(specPath);
Path taskTmpPath = Utilities.toTaskTempPath(specPath);
updatePaths(tmpPath, taskTmpPath);
try {
fs = specPath.getFileSystem(job);
autoDelete = fs.deleteOnExit(outPath);
} catch (IOException e) {
this.exception = true;
throw new RuntimeException(e);
}
}
代码示例来源:origin: apache/hive
public FSPaths(Path specPath, boolean isMmTable) {
this.isMmTable = isMmTable;
if (!isMmTable) {
tmpPathRoot = Utilities.toTempPath(specPath);
taskOutputTempPathRoot = Utilities.toTaskTempPath(specPath);
subdirForTxn = null;
} else {
tmpPathRoot = specPath;
taskOutputTempPathRoot = null; // Should not be used.
subdirForTxn = AcidUtils.baseOrDeltaSubdir(conf.getInsertOverwrite(),
conf.getTableWriteId(), conf.getTableWriteId(), conf.getStatementId());
}
if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
Utilities.FILE_OP_LOGGER.trace("new FSPaths for " + numFiles
+ " files, dynParts = " + bDynParts + " (spec path " + specPath + ")");
}
outPaths = new Path[numFiles];
finalPaths = new Path[numFiles];
outWriters = new RecordWriter[numFiles];
updaters = new RecordUpdater[numFiles];
if (LOG.isDebugEnabled()) {
LOG.debug("Created slots for " + numFiles);
}
stat = new Stat();
}
代码示例来源:origin: apache/hive
private void testTaskIds(String [] taskIds, String expectedAttemptId, String expectedTaskId) {
Configuration conf = new JobConf(TestOperators.class);
for (String one: taskIds) {
conf.set("mapred.task.id", one);
String attemptId = Utilities.getTaskId(conf);
assertEquals(expectedAttemptId, attemptId);
assertEquals(Utilities.getTaskIdFromFilename(attemptId), expectedTaskId);
assertEquals(Utilities.getTaskIdFromFilename(attemptId + ".gz"), expectedTaskId);
assertEquals(Utilities.getTaskIdFromFilename
(Utilities.toTempPath(new Path(attemptId + ".gz")).toString()), expectedTaskId);
}
}
代码示例来源:origin: apache/hive
updatePaths(specPath, null);
} else {
updatePaths(Utilities.toTempPath(specPath), Utilities.toTaskTempPath(specPath));
内容来源于网络,如有侵权,请联系作者删除!