org.apache.hadoop.hive.ql.exec.Utilities.toTaskTempPath()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(5.0k)|赞(0)|评价(0)|浏览(76)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.toTaskTempPath()方法的一些代码示例,展示了Utilities.toTaskTempPath()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.toTaskTempPath()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:toTaskTempPath

Utilities.toTaskTempPath介绍

暂无

代码示例

代码示例来源:origin: apache/drill

public FSPaths(Path specPath) {
 tmpPath = Utilities.toTempPath(specPath);
 taskOutputTempPath = Utilities.toTaskTempPath(specPath);
 outPaths = new Path[numFiles];
 finalPaths = new Path[numFiles];
 outWriters = new RecordWriter[numFiles];
 updaters = new RecordUpdater[numFiles];
 if (isDebugEnabled) {
  LOG.debug("Created slots for  " + numFiles);
 }
 stat = new Stat();
}

代码示例来源:origin: apache/hive

@Override
public void configure(JobConf job) {
 jc = job;
 work = (ColumnTruncateWork) Utilities.getMapWork(job);
 Path specPath = work.getOutputDir();
 Path tmpPath = Utilities.toTempPath(specPath);
 Path taskTmpPath = Utilities.toTaskTempPath(specPath);
 updatePaths(tmpPath, taskTmpPath);
 try {
  fs = specPath.getFileSystem(job);
  autoDelete = fs.deleteOnExit(outPath);
 } catch (IOException e) {
  this.exception = true;
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/drill

@Override
public void configure(JobConf job) {
 jc = job;
 work = (ColumnTruncateWork) Utilities.getMapWork(job);
 Path specPath = work.getOutputDir();
 Path tmpPath = Utilities.toTempPath(specPath);
 Path taskTmpPath = Utilities.toTaskTempPath(specPath);
 updatePaths(tmpPath, taskTmpPath);
 try {
  fs = specPath.getFileSystem(job);
  autoDelete = fs.deleteOnExit(outPath);
 } catch (IOException e) {
  this.exception = true;
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/hive

public FSPaths(Path specPath, boolean isMmTable) {
 this.isMmTable = isMmTable;
 if (!isMmTable) {
  tmpPathRoot = Utilities.toTempPath(specPath);
  taskOutputTempPathRoot = Utilities.toTaskTempPath(specPath);
  subdirForTxn = null;
 } else {
  tmpPathRoot = specPath;
  taskOutputTempPathRoot = null; // Should not be used.
  subdirForTxn = AcidUtils.baseOrDeltaSubdir(conf.getInsertOverwrite(),
    conf.getTableWriteId(), conf.getTableWriteId(),  conf.getStatementId());
 }
 if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
  Utilities.FILE_OP_LOGGER.trace("new FSPaths for " + numFiles
    + " files, dynParts = " + bDynParts + " (spec path " + specPath + ")");
 }
 outPaths = new Path[numFiles];
 finalPaths = new Path[numFiles];
 outWriters = new RecordWriter[numFiles];
 updaters = new RecordUpdater[numFiles];
 if (LOG.isDebugEnabled()) {
  LOG.debug("Created slots for  " + numFiles);
 }
 stat = new Stat();
}

代码示例来源:origin: apache/hive

boolean isBlobStorage = BlobStorageUtils.isBlobStorageFileSystem(hconf, fs);
Path tmpPath = Utilities.toTempPath(specPath);
Path taskTmpPath = Utilities.toTaskTempPath(specPath);
if (success) {
 if (!isBlobStorage && fs.exists(tmpPath)) {

代码示例来源:origin: apache/drill

Path taskTmpPath = Utilities.toTaskTempPath(specPath);
if (success) {
 FileStatus[] statuses = HiveStatsUtils.getFileStatusRecurse(

代码示例来源:origin: apache/drill

Path specPath = conf.getOutputPath();
updatePaths(Utilities.toTempPath(specPath),
  Utilities.toTaskTempPath(specPath));
try {
 fs = specPath.getFileSystem(hconf);

代码示例来源:origin: apache/hive

updatePaths(specPath, null);
} else {
 updatePaths(Utilities.toTempPath(specPath), Utilities.toTaskTempPath(specPath));

代码示例来源:origin: com.facebook.presto.hive/hive-apache

public static void mvFileToFinalPath(Path specPath, Configuration hconf,
  boolean success, Log log, DynamicPartitionCtx dpCtx, FileSinkDesc conf,
  Reporter reporter) throws IOException,
  HiveException {
 FileSystem fs = specPath.getFileSystem(hconf);
 Path tmpPath = Utilities.toTempPath(specPath);
 Path taskTmpPath = Utilities.toTaskTempPath(specPath);
 if (success) {
  if (fs.exists(tmpPath)) {
   // remove any tmp file or double-committed output files
   ArrayList<String> emptyBuckets =
     Utilities.removeTempOrDuplicateFiles(fs, tmpPath, dpCtx);
   // create empty buckets if necessary
   if (emptyBuckets.size() > 0) {
    createEmptyBuckets(hconf, emptyBuckets, conf, reporter);
   }
   // move to the file destination
   log.info("Moving tmp dir: " + tmpPath + " to: " + specPath);
   Utilities.renameOrMoveFiles(fs, tmpPath, specPath);
  }
 } else {
  fs.delete(tmpPath, true);
 }
 fs.delete(taskTmpPath, true);
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

public FSPaths(Path specPath) {
 tmpPath = Utilities.toTempPath(specPath);
 taskOutputTempPath = Utilities.toTaskTempPath(specPath);
 outPaths = new Path[numFiles];
 finalPaths = new Path[numFiles];
 outWriters = new RecordWriter[numFiles];
 updaters = new RecordUpdater[numFiles];
 if (isDebugEnabled) {
  LOG.debug("Created slots for  " + numFiles);
 }
 stat = new Stat();
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public void configure(JobConf job) {
 jc = job;
 work = (ColumnTruncateWork) Utilities.getMapWork(job);
 Path specPath = work.getOutputDir();
 Path tmpPath = Utilities.toTempPath(specPath);
 Path taskTmpPath = Utilities.toTaskTempPath(specPath);
 updatePaths(tmpPath, taskTmpPath);
 try {
  fs = specPath.getFileSystem(job);
  autoDelete = fs.deleteOnExit(outPath);
 } catch (IOException e) {
  this.exception = true;
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

Path specPath = conf.getOutputPath();
updatePaths(Utilities.toTempPath(specPath),
  Utilities.toTaskTempPath(specPath));
try {
 fs = specPath.getFileSystem(hconf);

相关文章

微信公众号

最新文章

更多

Utilities类方法