org.apache.hadoop.fs.Path.toString()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(10.8k)|赞(0)|评价(0)|浏览(166)

本文整理了Java中org.apache.hadoop.fs.Path.toString()方法的一些代码示例,展示了Path.toString()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Path.toString()方法的具体详情如下:
包路径:org.apache.hadoop.fs.Path
类名称:Path
方法名:toString

Path.toString介绍

暂无

代码示例

代码示例来源:origin: apache/storm

private static Path getDirLockFile(Path dir) {
  return new Path(dir.toString() + Path.SEPARATOR_CHAR + DIR_LOCK_FILE);
}

代码示例来源:origin: apache/storm

private Path renameCompletedFile(Path file) throws IOException {
  String fileName = file.toString();
  String fileNameMinusSuffix = fileName.substring(0, fileName.indexOf(inprogress_suffix));
  String newName = new Path(fileNameMinusSuffix).getName();
  Path newFile = new Path(archiveDirPath + Path.SEPARATOR + newName);
  LOG.info("Completed consuming file {}", fileNameMinusSuffix);
  if (!hdfs.rename(file, newFile)) {
    throw new IOException("Rename failed for file: " + file);
  }
  LOG.debug("Renamed file {} to {} ", file, newFile);
  return newFile;
}

代码示例来源:origin: apache/hive

static String getQualifiedPath(Configuration conf, Path path) throws IOException {
 FileSystem fs;
 if (path == null) {
  return null;
 }
 fs = path.getFileSystem(conf);
 return fs.makeQualified(path).toString();
}

代码示例来源:origin: apache/hbase

/**
 * Check permissions for bulk load staging directory. This directory has special hidden
 * permissions. Create it if necessary.
 * @throws IOException
 */
private void checkStagingDir() throws IOException {
 Path p = new Path(this.rootdir, HConstants.BULKLOAD_STAGING_DIR_NAME);
 try {
  if (!this.fs.exists(p)) {
   if (!this.fs.mkdirs(p, HiddenDirPerms)) {
    throw new IOException("Failed to create staging directory " + p.toString());
   }
  } else {
   this.fs.setPermission(p, HiddenDirPerms);
  }
 } catch (IOException e) {
  LOG.error("Failed to create or set permission on staging directory " + p.toString());
  throw new IOException("Failed to create or set permission on staging directory "
    + p.toString(), e);
 }
}

代码示例来源:origin: apache/incubator-gobblin

private List<String> getNonDeletableVersionLocations(List<HivePartitionVersion> versions,
  List<HivePartitionVersion> deletableVersions) {
 List<String> nonDeletableVersionLocations = new ArrayList<>();
 for (HivePartitionVersion version : versions) {
  if (!deletableVersions.contains(version)) {
   nonDeletableVersionLocations.add(version.getLocation().toString());
  }
 }
 nonDeletableVersionLocations.add(this.getLocation().toString());
 return nonDeletableVersionLocations;
}

代码示例来源:origin: apache/hive

private int runPackagePy(String[] args, Path tmpDir, Path scriptParent,
  String version, String outputDir) throws IOException, InterruptedException {
 Path scriptPath = new Path(new Path(scriptParent, "yarn"), "package.py");
 List<String> scriptArgs = new ArrayList<>(args.length + 7);
 scriptArgs.add("python");
 scriptArgs.add(scriptPath.toString());
 scriptArgs.add("--input");
 scriptArgs.add(tmpDir.toString());
 scriptArgs.add("--output");
 scriptArgs.add(outputDir);
 scriptArgs.add("--javaChild");
 for (String arg : args) {
  scriptArgs.add(arg);
 }
 LOG.debug("Calling package.py via: " + scriptArgs);
 ProcessBuilder builder = new ProcessBuilder(scriptArgs);
 builder.redirectError(ProcessBuilder.Redirect.INHERIT);
 builder.redirectOutput(ProcessBuilder.Redirect.INHERIT);
 builder.environment().put("HIVE_VERSION", version);
 return builder.start().waitFor();
}

代码示例来源:origin: apache/hbase

private List<String> convert(List<FileStatus> walFiles) {
 List<String> result = new ArrayList<String>();
 for (FileStatus fs : walFiles) {
  LOG.debug("+++WAL: " + fs.getPath().toString());
  result.add(fs.getPath().toString());
 }
 return result;
}

代码示例来源:origin: apache/drill

protected static PartitionDesc getPartitionDescFromPath(
  Map<Path, PartitionDesc> pathToPartitionInfo, Path dir)
  throws IOException {
 PartitionDesc partDesc = pathToPartitionInfo.get(dir);
 if (partDesc == null) {
  partDesc = pathToPartitionInfo.get(Path.getPathWithoutSchemeAndAuthority(dir));
 }
 if (partDesc == null) {
  throw new IOException("cannot find dir = " + dir.toString()
    + " in " + pathToPartitionInfo);
 }
 return partDesc;
}

代码示例来源:origin: apache/kylin

protected InputStream openPushdown(String resPath) throws IOException {
  try {
    Path p = pushdownPath(resPath);
    FileSystem fs = pushdownFS();
    if (fs.exists(p))
      return fs.open(p);
    else
      throw new FileNotFoundException(p.toString() + "  (FS: " + fs + ")");
  } catch (Exception ex) {
    throw new IOException("Failed to read big resource " + resPath, ex);
  }
}

代码示例来源:origin: prestodb/presto

Path file = tempFile.getPath();
OrcDataSource dataSource = new HdfsOrcDataSource(
    new OrcDataSourceId(file.toString()),
    fileSystem.getFileStatus(file).getLen(),
    new DataSize(1, MEGABYTE),
fileSystem.delete(file, false);
if (fileSystem.exists(file)) {
  throw new IOException("Failed to delete temporary file: " + file);

代码示例来源:origin: apache/hive

/**
 * @param conf
 * @return path to destination directory on hdfs
 * @throws LoginException if we are unable to figure user information
 * @throws IOException when any dfs operation fails.
 */
@SuppressWarnings("deprecation")
public Path getDefaultDestDir(Configuration conf) throws LoginException, IOException {
 UserGroupInformation ugi = Utils.getUGI();
 String userName = ugi.getShortUserName();
 String userPathStr = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_USER_INSTALL_DIR);
 Path userPath = new Path(userPathStr);
 FileSystem fs = userPath.getFileSystem(conf);
 String jarPathStr = userPathStr + "/" + userName;
 String hdfsDirPathStr = jarPathStr;
 Path hdfsDirPath = new Path(hdfsDirPathStr);
 try {
  FileStatus fstatus = fs.getFileStatus(hdfsDirPath);
  if (!fstatus.isDir()) {
   throw new IOException(ErrorMsg.INVALID_DIR.format(hdfsDirPath.toString()));
  }
 } catch (FileNotFoundException e) {
  // directory does not exist, create it
  fs.mkdirs(hdfsDirPath);
 }
 Path retPath = new Path(hdfsDirPath.toString() + "/.hiveJars");
 fs.mkdirs(retPath);
 return retPath;
}

代码示例来源:origin: apache/incubator-gobblin

public static String getFirstDataFilePathInDir(String dirInHdfs) throws IOException {
  FileStatus[] fileStatuses = getFileSystem().listStatus(new Path(dirInHdfs));
  for (FileStatus fileStatus : fileStatuses) {
   Path dataFilePath = fileStatus.getPath();
   if (!fileStatus.isDirectory() && !dataFilePath.getName().startsWith("_")) {
    return dataFilePath.toString();
   }
  }
  String message = dirInHdfs + " does not contain a valid data file.";
  LOG.error(message);
  throw new RuntimeException(message);
 }
}

代码示例来源:origin: apache/hive

private BufferedWriter writer() throws IOException {
 Path exportToFile = new Path(exportRootDataDir, EximUtil.FILES_NAME);
 if (exportFileSystem.exists(exportToFile)) {
  throw new IllegalArgumentException(
    exportToFile.toString() + " already exists and cant export data from path(dir) "
      + dataPathList);
 }
 logger.debug("exporting data files in dir : " + dataPathList + " to " + exportToFile);
 return new BufferedWriter(
   new OutputStreamWriter(exportFileSystem.create(exportToFile))
 );
}

代码示例来源:origin: apache/hive

@Override
 public Void call() throws Exception {
  synchronized (fs) {
   String tezLibs = conf.get(TezConfiguration.TEZ_LIB_URIS);
   if (tezLibs == null) {
    LOG.warn("Missing tez.lib.uris in tez-site.xml");
   }
   if (LOG.isDebugEnabled()) {
    LOG.debug("Copying tez libs from " + tezLibs);
   }
   lfs.mkdirs(tezDir);
   fs.copyToLocalFile(new Path(tezLibs), new Path(libDir, "tez.tar.gz"));
   CompressionUtils.unTar(new Path(libDir, "tez.tar.gz").toString(), tezDir.toString(),
     true);
   lfs.delete(new Path(libDir, "tez.tar.gz"), false);
  }
  return null;
 }
};

代码示例来源:origin: apache/incubator-gobblin

/**
 * Create a temporary job directory based on job id or (if not available) UUID
 */
private void initJobDir (SourceState state) throws IOException {
 String tmpBase = state.getProp(MRCompactor.COMPACTION_TMP_DEST_DIR, MRCompactor.DEFAULT_COMPACTION_TMP_DEST_DIR);
 String jobId;
 if (state instanceof JobState) {
  jobId = ((JobState) state).getJobId();
 } else {
  jobId = UUID.randomUUID().toString();
 }
 this.tmpJobDir = new Path (tmpBase, jobId);
 this.fs.mkdirs(this.tmpJobDir);
 state.setProp (MRCompactor.COMPACTION_JOB_DIR, tmpJobDir.toString());
 log.info ("Job dir is created under {}", this.tmpJobDir);
}

代码示例来源:origin: apache/hive

private static void fileDiff(String datafile, String testdir) throws Exception {
 String testFileDir = conf.get("test.data.files");
 // inbuilt assumption that the testdir has only one output file.
 Path di_test = new Path(tmppath, testdir);
 if (!fs.exists(di_test)) {
  throw new RuntimeException(tmpdir + File.separator + testdir + " does not exist");
 }
 if (!ShimLoader.getHadoopShims().isDirectory(fs.getFileStatus(di_test))) {
  throw new RuntimeException(tmpdir + File.separator + testdir + " is not a directory");
 }
 FSDataInputStream fi_test = fs.open((fs.listStatus(di_test))[0].getPath());
 FileInputStream fi_gold = new FileInputStream(new File(testFileDir,datafile));
 if (!Utilities.contentsEqual(fi_gold, fi_test, false)) {
  LOG.error(di_test.toString() + " does not match " + datafile);
  assertEquals(false, true);
 }
}

代码示例来源:origin: apache/hive

public PerformTestRCFileAndSeqFile(boolean local, String file)
  throws IOException {
 if (local) {
  fs = FileSystem.getLocal(conf);
 } else {
  fs = FileSystem.get(conf);
 }
 conf.setInt(RCFile.Writer.COLUMNS_BUFFER_SIZE_CONF_STR, 1 * 1024 * 1024);
 if (file == null) {
  Path dir = new Path(System.getProperty("test.tmp.dir", ".") + "/mapred");
  testRCFile = new Path(dir, "test_rcfile");
  testSeqFile = new Path(dir, "test_seqfile");
 } else {
  testRCFile = new Path(file + "-rcfile");
  testSeqFile = new Path(file + "-seqfile");
 }
 fs.delete(testRCFile, true);
 fs.delete(testSeqFile, true);
 System.out.println("RCFile:" + testRCFile.toString());
 System.out.println("SequenceFile:" + testSeqFile.toString());
}

代码示例来源:origin: apache/hive

public static String externalTableLocation(HiveConf hiveConf, String location) throws SemanticException {
 String baseDir = hiveConf.get(HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname);
 Path basePath = new Path(baseDir);
 Path currentPath = new Path(location);
 String targetPathWithoutSchemeAndAuth = basePath.toUri().getPath() + currentPath.toUri().getPath();
 Path dataLocation;
 try {
  dataLocation = PathBuilder.fullyQualifiedHDFSUri(
      new Path(targetPathWithoutSchemeAndAuth),
      basePath.getFileSystem(hiveConf)
  );
 } catch (IOException e) {
  throw new SemanticException(ErrorMsg.INVALID_PATH.getMsg(), e);
 }
 LOG.info("Incoming external table location: {} , new location: {}", location, dataLocation.toString());
 return dataLocation.toString();
}

代码示例来源:origin: apache/hbase

@VisibleForTesting
List<Procedure> createSplitWALProcedures(List<FileStatus> splittingWALs,
  ServerName crashedServer) {
 return splittingWALs.stream()
   .map(wal -> new SplitWALProcedure(wal.getPath().toString(), crashedServer))
   .collect(Collectors.toList());
}

代码示例来源:origin: apache/hive

private List<DataSegment> fetchSegmentsMetadata(Path segmentDescriptorDir) throws IOException {
 if (!segmentDescriptorDir.getFileSystem(getConf()).exists(segmentDescriptorDir)) {
  LOG.info("Directory {} does not exist, ignore this if it is create statement or inserts of 0 rows,"
      + " no Druid segments to move, cleaning working directory {}",
    segmentDescriptorDir.toString(),
    getStagingWorkingDir().toString());
  return Collections.emptyList();
 }
 return DruidStorageHandlerUtils.getCreatedSegments(segmentDescriptorDir, getConf());
}

相关文章