org.apache.hadoop.fs.Path.getParent()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(8.6k)|赞(0)|评价(0)|浏览(153)

本文整理了Java中org.apache.hadoop.fs.Path.getParent()方法的一些代码示例,展示了Path.getParent()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Path.getParent()方法的具体详情如下:
包路径:org.apache.hadoop.fs.Path
类名称:Path
方法名:getParent

Path.getParent介绍

[英]Returns the parent of a path or null if at root.
[中]返回路径的父级,如果为根,则返回null。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
 * Adds a suffix to the final name in the path.
 *
 * @param suffix the suffix to add
 * @return a new path with the suffix added
 */
public Path suffix(String suffix) {
 return new Path(getParent(), getName()+suffix);
}

代码示例来源:origin: apache/hbase

private Text getKey(Path path) {
 int level = conf.getInt(NUMBER_OF_LEVELS_TO_PRESERVE_KEY, 1);
 int count = 0;
 String relPath = "";
 while (count++ < level) {
  relPath = Path.SEPARATOR + path.getName() + relPath;
  path = path.getParent();
 }
 return new Text(relPath);
}

代码示例来源:origin: apache/hive

private String getPartitionRootLocation(String ptnLocn, int numPtnKeys) {
  if (customDynamicLocationUsed) {
   return null;
  }

  if (ptnRootLocation == null) {
   // we only need to calculate it once, it'll be the same for other partitions in this job.
   Path ptnRoot = new Path(ptnLocn);
   for (int i = 0; i < numPtnKeys; i++) {
//          LOG.info("Getting parent of "+ptnRoot.getName());
    ptnRoot = ptnRoot.getParent();
   }
   ptnRootLocation = ptnRoot.toString();
  }
//      LOG.info("Returning final parent : "+ptnRootLocation);
  return ptnRootLocation;
 }

代码示例来源:origin: apache/incubator-gobblin

protected void ensureParentOfStagingPathExists() {
 try {
  Path parentStagingPath = new Path(this.stagingDataLocation).getParent();
  if (!this.fs.exists(parentStagingPath)) {
   this.fs.mkdirs(parentStagingPath);
  }
 } catch (IOException ioe) {
  throw new RuntimeException(ioe);
 }
}

代码示例来源:origin: apache/hbase

FileSystem outputFs = FileSystem.get(new Path(backupInfo.getBackupRootDir()).toUri(), conf);
    new Path(getTableBackupDir(backupInfo.getBackupRootDir(), backupInfo.getBackupId(),
     table));
  if (outputFs.delete(targetDirPath, true)) {
   LOG.info("Cleaning up backup data at " + targetDirPath.toString() + " done.");
  } else {
   LOG.info("No data has been found in " + targetDirPath.toString() + ".");
  Path tableDir = targetDirPath.getParent();
  FileStatus[] backups = listStatus(outputFs, tableDir, null);
  if (backups == null || backups.length == 0) {
   outputFs.delete(tableDir, true);
   LOG.debug(tableDir.toString() + " is empty, remove it.");
 outputFs.delete(new Path(targetDir, backupInfo.getBackupId()), true);
} catch (IOException e1) {
 LOG.error("Cleaning up backup data of " + backupInfo.getBackupId() + " at "

代码示例来源:origin: apache/hbase

private boolean sidelineFile(FileSystem fs, Path hbaseRoot, Path path) throws IOException {
 URI uri = hbaseRoot.toUri().relativize(path.toUri());
 if (uri.isAbsolute()) return false;
 String relativePath = uri.getPath();
 Path rootDir = getSidelineDir();
 Path dst = new Path(rootDir, relativePath);
 boolean pathCreated = fs.mkdirs(dst.getParent());
 if (!pathCreated) {
  LOG.error("Failed to create path: " + dst.getParent());
  return false;
 }
 LOG.info("Trying to sideline file " + path + " to " + dst);
 return fs.rename(path, dst);
}

代码示例来源:origin: apache/incubator-gobblin

private void moveDirectory(String sourceDir, String targetDir) throws IOException {
 // If targetDir exists, delete it
 if (this.fs.exists(new Path(targetDir))) {
  deleteDirectory(targetDir);
 }
 // Create parent directories of targetDir
 WriterUtils.mkdirsWithRecursivePermission(this.fs, new Path(targetDir).getParent(),
   FsPermission.getCachePoolDefault());
 // Move directory
 log.info("Moving directory: " + sourceDir + " to: " + targetDir);
 if (!this.fs.rename(new Path(sourceDir), new Path(targetDir))) {
  throw new IOException(String.format("Unable to move %s to %s", sourceDir, targetDir));
 }
}

代码示例来源:origin: apache/hbase

private void createRecordAndCorruptMobFile(TableName tn, byte[] row, byte[] family, byte[] qf,
 byte[] value) throws IOException {
 Put put1 = new Put(row);
 put1.addColumn(family, qf, value);
 table.put(put1);
 admin.flush(tn);
 Path mobFile = getFlushedMobFile(conf, fs, tn, Bytes.toString(family));
 Assert.assertNotNull(mobFile);
 // create new corrupt mob file.
 Path corruptFile = new Path(mobFile.getParent(), "dummy");
 TestHFile.truncateFile(fs, mobFile, corruptFile);
 fs.delete(mobFile, true);
 fs.rename(corruptFile, mobFile);
}

代码示例来源:origin: apache/incubator-gobblin

/**
  * Get all the unrenamed directories from the given paths
  * They are deepest level containing directories whose name doesn't have a suffix {@link MRCompactor#COMPACTION_RENAME_SOURCE_DIR_SUFFIX}
  * Also each directory needs to contain at least one file so empty directories will be excluded from the result
  */
public static Set<Path> getDeepestLevelUnrenamedDirsWithFileExistence (FileSystem fs, Set<Path> paths) throws IOException {
 Set<Path> unrenamed = Sets.newHashSet();
 for (FileStatus fileStatus : FileListUtils.listFilesRecursively(fs, paths)) {
  if (!fileStatus.getPath().getParent().toString().endsWith(MRCompactor.COMPACTION_RENAME_SOURCE_DIR_SUFFIX)) {
   unrenamed.add(fileStatus.getPath().getParent());
  }
 }
 return unrenamed;
}

代码示例来源:origin: apache/incubator-gobblin

/**
 * Open a BufferedWriter
 * @param errFilePath path to write the file
 */
public void open(Path errFilePath) throws IOException {
 this.fs.mkdirs(errFilePath.getParent());
 OutputStream os =
   this.closer.register(this.fs.exists(errFilePath) ? this.fs.append(errFilePath) : this.fs.create(errFilePath));
 this.writer = this.closer
   .register(new BufferedWriter(new OutputStreamWriter(os, ConfigurationKeys.DEFAULT_CHARSET_ENCODING)));
}

代码示例来源:origin: apache/storm

@Override
public void commit() throws IOException {
  checkIsNotTmp();
  // FileContext supports atomic rename, whereas FileSystem doesn't
  FileContext fc = FileContext.getFileContext(_hadoopConf);
  Path dest = new Path(_path.getParent(), BLOBSTORE_DATA_FILE);
  if (_mustBeNew) {
    fc.rename(_path, dest);
  } else {
    fc.rename(_path, dest, Options.Rename.OVERWRITE);
  }
  // Note, we could add support for setting the replication factor
}

代码示例来源:origin: apache/incubator-gobblin

public String getStagingPartitionLocation() {
 Path originalPartitionLocation = getLocation();
 if (PartitionUtils.isUnixTimeStamp(originalPartitionLocation.getName())) {
  return StringUtils.join(Arrays.asList(getLocation().getParent().toString(), this.timeStamp), '/');
 }
 else {
  return StringUtils.join(Arrays.asList(getLocation().toString(), this.timeStamp), '/');
 }
}

代码示例来源:origin: apache/incubator-gobblin

@AfterClass
 public void tearDown() throws IOException {
  if (this.fileSystem.exists(this.tokenFilePath.getParent())) {
   this.fileSystem.delete(this.tokenFilePath.getParent(), true);
  }
 }
}

代码示例来源:origin: apache/incubator-gobblin

public static void deletePathAndEmptyAncestors(FileSystem fs, Path f, boolean recursive) throws IOException {
 deletePath(fs, f, recursive);
 Path parent = f.getParent();
 while (parent != null) {
  if (fs.exists(parent) && fs.listStatus(parent).length == 0) {
   deletePath(fs, parent, true);
   parent = parent.getParent();
  } else {
   break;
  }
 }
}

代码示例来源:origin: h2oai/h2o-2

@Override public Object call() throws Exception {
  FileSystem fs = FileSystem.get(path.toUri(), CONF);
  fs.mkdirs(path.getParent());
  FSDataOutputStream s = fs.create(path);
  try {
   s.write(data);
  } finally {
   s.close();
  }
  return null;
 }
}, false, data.length);

代码示例来源:origin: alibaba/mdrill

public static void truncate(FileSystem lfs,Path target) throws IOException
{
  LOG.info("truncate "+target.toString());
  if (lfs.exists(target)) {
    lfs.delete(target, true);
  }
  lfs.mkdirs(target.getParent());
}
public static String readVertify(FileSystem fs, Path file) throws IOException {

代码示例来源:origin: apache/hive

public static Path toTempPath(Path orig) {
 if (orig.getName().indexOf(tmpPrefix) == 0) {
  return orig;
 }
 return new Path(orig.getParent(), tmpPrefix + orig.getName());
}

代码示例来源:origin: apache/incubator-gobblin

private String getDirInHdfs() {
 return new Path(this.filePathInHdfs).getParent().toString();
}

代码示例来源:origin: apache/incubator-gobblin

private void createDatasetFiles() throws IOException {
 // Create writer output files
 Path datasetWriterOutputPath =
   new Path(writerOutputPath, copyEntity.getDatasetAndPartition(this.metadata).identifier());
 Path outputPathWithCurrentDirectory = new Path(datasetWriterOutputPath,
   PathUtils.withoutLeadingSeparator(this.targetPath));
 for (String path : relativeFilePaths) {
  Path pathToCreate = new Path(outputPathWithCurrentDirectory, path);
  fs.mkdirs(pathToCreate.getParent());
  fs.create(pathToCreate);
 }
}

代码示例来源:origin: apache/incubator-druid

/**
 * Returns the latest modified file at the uri of interest.
 *
 * @param uri     Either a directory or a file on HDFS. If it is a file, the parent directory will be searched.
 * @param pattern A pattern matcher for file names in the directory of interest. Passing `null` results in matching any file in the directory.
 *
 * @return The URI of the file with the most recent modified timestamp.
 */
@Override
public URI getLatestVersion(final URI uri, final @Nullable Pattern pattern)
{
 final Path path = new Path(uri);
 try {
  return RetryUtils.retry(
    () -> {
     final FileSystem fs = path.getFileSystem(config);
     if (!fs.exists(path)) {
      return null;
     }
     return mostRecentInDir(fs.isDirectory(path) ? path : path.getParent(), pattern);
    },
    shouldRetryPredicate(),
    DEFAULT_RETRY_COUNT
  );
 }
 catch (Exception e) {
  throw Throwables.propagate(e);
 }
}

相关文章