org.apache.hadoop.fs.Path.getName()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(8.8k)|赞(0)|评价(0)|浏览(196)

本文整理了Java中org.apache.hadoop.fs.Path.getName()方法的一些代码示例,展示了Path.getName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Path.getName()方法的具体详情如下:
包路径:org.apache.hadoop.fs.Path
类名称:Path
方法名:getName

Path.getName介绍

[英]Returns the final component of this path.
[中]返回此路径的最后一个组件。

代码示例

代码示例来源:origin: apache/hbase

/**
 * Returns the {@link org.apache.hadoop.hbase.TableName} object representing
 * the table directory under
 * path rootdir
 *
 * @param tablePath path of table
 * @return {@link org.apache.hadoop.fs.Path} for table
 */
public static TableName getTableName(Path tablePath) {
 return TableName.valueOf(tablePath.getParent().getName(), tablePath.getName());
}

代码示例来源:origin: apache/hive

public static Path toTempPath(Path orig) {
 if (orig.getName().indexOf(tmpPrefix) == 0) {
  return orig;
 }
 return new Path(orig.getParent(), tmpPrefix + orig.getName());
}

代码示例来源:origin: apache/storm

@Override
  public void execute(FileSystem fileSystem, Path filePath) throws IOException {
    Path destPath = new Path(destination, filePath.getName());
    LOG.info("Moving file {} to {}", filePath, destPath);
    boolean success = fileSystem.rename(filePath, destPath);
    return;
  }
}

代码示例来源:origin: apache/hbase

private Path getReplSyncUpPath(Path path) throws IOException {
 FileStatus[] rss = fs.listStatus(manager.getLogDir());
 for (FileStatus rs : rss) {
  Path p = rs.getPath();
  FileStatus[] logs = fs.listStatus(p);
  for (FileStatus log : logs) {
   p = new Path(p, log.getPath().getName());
   if (p.getName().equals(path.getName())) {
    LOG.info("Log " + p.getName() + " found at " + p);
    return p;
   }
  }
 }
 LOG.error("Didn't find path for: " + path.getName());
 return path;
}

代码示例来源:origin: apache/hive

public static Path backupOutputPath(FileSystem fs, Path outpath, JobConf job)
  throws IOException, HiveException {
 if (fs.exists(outpath)) {
  Path backupPath = new Path(outpath.getParent(), BACKUP_PREFIX
    + outpath.getName());
  Utilities.rename(fs, outpath, backupPath);
  return backupPath;
 } else {
  return null;
 }
}

代码示例来源:origin: apache/storm

private Path renameCompletedFile(Path file) throws IOException {
  String fileName = file.toString();
  String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix));
  String newName = new Path(fileNameMinusSuffix).getName();
  Path newFile = new Path(archiveDirPath + Path.SEPARATOR + newName);
  LOG.info("Completed consuming file {}", fileNameMinusSuffix);
  if (!hdfs.rename(file, newFile)) {
    throw new IOException("Rename failed for file: " + file);
  }
  LOG.debug("Renamed file {} to {} ", file, newFile);
  return newFile;
}

代码示例来源:origin: apache/storm

/**
 * Returns the corresponding input file in the 'sourceDirPath' for the specified lock file. If no such file is found then returns null
 */
private Path getFileForLockFile(Path lockFile, Path sourceDirPath)
  throws IOException {
  String lockFileName = lockFile.getName();
  Path dataFile = new Path(sourceDirPath + Path.SEPARATOR + lockFileName + inprogress_suffix);
  if (hdfs.exists(dataFile)) {
    return dataFile;
  }
  dataFile = new Path(sourceDirPath + Path.SEPARATOR + lockFileName);
  if (hdfs.exists(dataFile)) {
    return dataFile;
  }
  return null;
}

代码示例来源:origin: apache/storm

protected Iterator<String> listKeys(Path path) throws IOException {
  ArrayList<String> ret = new ArrayList<String>();
  FileStatus[] files = _fs.listStatus(new Path[]{path});
  if (files != null) {
    for (FileStatus sub : files) {
      try {
        ret.add(sub.getPath().getName().toString());
      } catch (IllegalArgumentException e) {
        //Ignored the file did not match
        LOG.debug("Found an unexpected file in {} {}", path, sub.getPath().getName());
      }
    }
  }
  return ret.iterator();
}

代码示例来源:origin: apache/incubator-gobblin

/**
 * Calculate the target filePath of the jar file to be copied on HDFS,
 * given the {@link FileStatus} of a jarFile and the path of directory that contains jar.
 */
private Path calculateDestJarFile(FileStatus status, Path jarFileDir) {
 // SNAPSHOT jars should not be shared, as different jobs may be using different versions of it
 Path baseDir = status.getPath().getName().contains("SNAPSHOT") ? this.unsharedJarsDir : jarFileDir;
 // DistributedCache requires absolute path, so we need to use makeQualified.
 return new Path(this.fs.makeQualified(baseDir), status.getPath().getName());
}

代码示例来源:origin: apache/nifi

@Override
  public boolean accept(Path path) {
    return !(path == null || (toIgnoreHiddenFiles && path.getName().startsWith(".")))
        && watchDirectory.matcher(path.toString()).matches();
  }
}

代码示例来源:origin: apache/hbase

@Override
 public int compare(FileStatus a, FileStatus b) {
  final long aId = getLogIdFromName(a.getPath().getName());
  final long bId = getLogIdFromName(b.getPath().getName());
  return Long.compare(aId, bId);
 }
};

代码示例来源:origin: apache/incubator-gobblin

public String getStagingPartitionLocation() {
 Path originalPartitionLocation = getLocation();
 if (PartitionUtils.isUnixTimeStamp(originalPartitionLocation.getName())) {
  return StringUtils.join(Arrays.asList(getLocation().getParent().toString(), this.timeStamp), '/');
 }
 else {
  return StringUtils.join(Arrays.asList(getLocation().toString(), this.timeStamp), '/');
 }
}

代码示例来源:origin: apache/hbase

/**
 * @return a path with a write for that path. caller should close.
 */
WriterAndPath createWAP(byte[] region, Entry entry) throws IOException {
 String tmpDirName = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
  HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
 Path regionedits = getRegionSplitEditsPath(entry,
   fileBeingSplit.getPath().getName(), tmpDirName, conf);
 if (regionedits == null) {
  return null;
 }
 FileSystem walFs = FSUtils.getWALFileSystem(conf);
 if (walFs.exists(regionedits)) {
  LOG.warn("Found old edits file. It could be the "
    + "result of a previous failed split attempt. Deleting " + regionedits + ", length="
    + walFs.getFileStatus(regionedits).getLen());
  if (!walFs.delete(regionedits, false)) {
   LOG.warn("Failed delete of old {}", regionedits);
  }
 }
 Writer w = createWriter(regionedits);
 LOG.debug("Creating writer path={}", regionedits);
 return new WriterAndPath(regionedits, w, entry.getKey().getSequenceId());
}

代码示例来源:origin: apache/hive

private FileStatus[] listFilesInDir(Path path) throws IOException {
 return dataFileSystem.listStatus(path, p -> {
  String name = p.getName();
  return !name.startsWith("_") && !name.startsWith(".");
 });
}

代码示例来源:origin: apache/incubator-pinot

protected void moveToOutputDirectory(FileSystem fs)
  throws Exception {
 LOGGER.info("Moving Segment Tar files from {} to: {}", _stagingDir + "/output/segmentTar", _outputDir);
 FileStatus[] segmentArr = fs.listStatus(new Path(_stagingDir + "/output/segmentTar"));
 for (FileStatus segment : segmentArr) {
  fs.rename(segment.getPath(), new Path(_outputDir, segment.getPath().getName()));
 }
}

代码示例来源:origin: apache/hive

private Path backupOutputPath(FileSystem fs, Path outpath)
  throws IOException, HiveException {
 if (fs.exists(outpath)) {
  Path backupPath = new Path(outpath.getParent(),
    BACKUP_PREFIX + outpath.getName());
  Utilities.rename(fs, outpath, backupPath);
  return backupPath;
 } else {
  return null;
 }
}

代码示例来源:origin: apache/hive

public static Path toTaskTempPath(Path orig) {
 if (orig.getName().indexOf(taskTmpPrefix) == 0) {
  return orig;
 }
 return new Path(orig.getParent(), taskTmpPrefix + orig.getName());
}

代码示例来源:origin: apache/storm

private void markFileAsBad(Path file) {
  String fileName = file.toString();
  String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix));
  String originalName = new Path(fileNameMinusSuffix).getName();
  Path newFile = new Path(badFilesDirPath + Path.SEPARATOR + originalName);
  LOG.info("Moving bad file {} to {}. Processed it till offset {}. SpoutID= {}", originalName, newFile, tracker.getCommitPosition(),
       spoutId);
  try {
    if (!hdfs.rename(file, newFile)) { // seems this can fail by returning false or throwing exception
      throw new IOException("Move failed for bad file: " + file); // convert false ret value to exception
    }
  } catch (IOException e) {
    LOG.warn("Error moving bad file: " + file + " to destination " + newFile + " SpoutId =" + spoutId, e);
  }
  closeReaderAndResetTrackers();
}

代码示例来源:origin: apache/hive

private void initReplLogger() {
 try {
  Path dbDumpPath = currentDatabaseIterator.dbLevelPath();
  FileSystem fs = dbDumpPath.getFileSystem(hiveConf);
  long numTables = getSubDirs(fs, dbDumpPath).length;
  long numFunctions = 0;
  Path funcPath = new Path(dbDumpPath, ReplicationSemanticAnalyzer.FUNCTIONS_ROOT_DIR_NAME);
  if (fs.exists(funcPath)) {
   numFunctions = getSubDirs(fs, funcPath).length;
  }
  String dbName = StringUtils.isBlank(dbNameToLoadIn) ? dbDumpPath.getName() : dbNameToLoadIn;
  replLogger = new BootstrapLoadLogger(dbName, dumpDirectory, numTables, numFunctions);
  replLogger.startLog();
 } catch (IOException e) {
  // Ignore the exception
 }
}

代码示例来源:origin: apache/hive

/**
 * Chooses 1 representative file from {@code baseOrDeltaDir}
 * This assumes that all files in the dir are of the same type: either written by an acid
 * write or Load Data.  This should always be the case for an Acid table.
 */
private static Path chooseFile(Path baseOrDeltaDir, FileSystem fs) throws IOException {
 if(!(baseOrDeltaDir.getName().startsWith(BASE_PREFIX) ||
   baseOrDeltaDir.getName().startsWith(DELTA_PREFIX))) {
  throw new IllegalArgumentException(baseOrDeltaDir + " is not a base/delta");
 }
 FileStatus[] dataFiles = fs.listStatus(new Path[] {baseOrDeltaDir}, originalBucketFilter);
 return dataFiles != null && dataFiles.length > 0 ? dataFiles[0].getPath() : null;
}

相关文章