org.apache.hadoop.hdfs.server.common.Util类的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(11.0k)|赞(0)|评价(0)|浏览(79)

本文整理了Java中org.apache.hadoop.hdfs.server.common.Util类的一些代码示例,展示了Util类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Util类的具体详情如下:
包路径:org.apache.hadoop.hdfs.server.common.Util
类名称:Util

Util介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Returns edit directories that are shared between primary and secondary.
 * @param conf configuration
 * @return collection of edit directories from {@code conf}
 */
public static List<URI> getSharedEditsDirs(Configuration conf) {
 // don't use getStorageDirs here, because we want an empty default
 // rather than the dir in /tmp
 Collection<String> dirNames = conf.getTrimmedStringCollection(
   DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
 return Util.stringCollectionAsURIs(dirNames);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Interprets the passed string as a URI. In case of error it 
 * assumes the specified string is a file.
 *
 * @param s the string to interpret
 * @return the resulting URI
 */
static URI stringAsURI(String s) throws IOException {
 URI u = null;
 // try to make a URI
 try {
  u = new URI(s);
 } catch (URISyntaxException e){
  LOG.error("Syntax error in URI " + s
    + ". Please check hdfs configuration.", e);
 }
 // if URI is null or scheme is undefined, then assume it's file://
 if(u == null || u.getScheme() == null){
  LOG.info("Assuming 'file' scheme for path " + s + " in configuration.");
  u = fileAsURI(new File(s));
 }
 return u;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Converts a collection of strings into a collection of URIs.
 * @param names collection of strings to convert to URIs
 * @return collection of URIs
 */
public static List<URI> stringCollectionAsURIs(
                Collection<String> names) {
 List<URI> uris = new ArrayList<>(names.size());
 for(String name : names) {
  try {
   uris.add(stringAsURI(name));
  } catch (IOException e) {
   LOG.error("Error while processing URI: " + name, e);
  }
 }
 return uris;
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Load an edit log, and apply the changes to the in-memory structure
 * This is where we apply edits that we've been writing to disk all
 * along.
 */
int loadFSEdits(EditLogInputStream edits, long expectedStartingTxId)
throws IOException {
 long startTime = now();
 currentTxId = expectedStartingTxId;
 int numEdits = loadFSEdits(edits, true);
 FSImage.LOG.info("Edits file " + edits.getName() 
   + " of size " + edits.length() + " edits # " + numEdits 
   + " loaded in " + (now()-startTime)/1000 + " seconds.");
 return numEdits;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

deleteTmpFiles(localPaths);
deleteTmpFiles(localPaths);
throw new IOException("File " + url + " received length " + received +
  " is not of the advertised size " + advertisedSize +
deleteTmpFiles(localPaths);
throw new IOException("File " + url + " computed digest " +
  computedDigest + " does not match advertised digest " +

代码示例来源:origin: com.facebook.hadoop/hadoop-core

private long dispatchBlockMoves() throws InterruptedException {
 long bytesLastMoved = bytesMoved.get();
 Future<?>[] futures = new Future<?>[sources.size()];
 int i=0;
 for (Source source : sources) {
  futures[i++] = dispatcherExecutor.submit(
           source.new BlockMoveDispatcher(Util.now()));
 }
 // wait for all dispatcher threads to finish
 for (Future<?> future : futures) {
  try {
   future.get();
  } catch (ExecutionException e) {
   LOG.warn("Dispatcher thread failed", e.getCause());
  }
 }
 // wait for all block moving to be done
 waitForMoveCompletion();
 return bytesMoved.get()-bytesLastMoved;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Return the storage directory corresponding to the passed URI.
 * @param uri URI of a storage directory
 * @return The matching storage directory or null if none found
 */
public StorageDirectory getStorageDirectory(URI uri) {
 try {
  uri = Util.fileAsURI(new File(uri));
  Iterator<StorageDirectory> it = dirIterator();
  while (it.hasNext()) {
   StorageDirectory sd = it.next();
   if (Util.fileAsURI(sd.getRoot()).equals(uri)) {
    return sd;
   }
  }
 } catch (IOException ioe) {
  LOG.warn("Error converting file to URI", ioe);
 }
 return null;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

static List<URI> getCheckpointEditsDirs(Configuration conf,
  String defaultName) {
 Collection<String> dirNames = conf.getTrimmedStringCollection(
   DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
 if (dirNames.size() == 0 && defaultName != null) {
  dirNames.add(defaultName);
 }
 return Util.stringCollectionAsURIs(dirNames);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

/**
 * Test for a relative path, os independent
 * @throws IOException 
 */
public void testRelativePathAsURI() throws IOException {
 URI u = Util.stringAsURI(RELATIVE_FILE_PATH);
 LOG.info("Uri: " + u);
 assertNotNull(u);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public int run(String[] args) throws Exception {
 final long startTime = Util.now();
 try {
  checkReplicationPolicyCompatibility(conf);
  final List<InetSocketAddress> namenodes = DFSUtil.getClientRpcAddresses(conf, null);
  parse(args);
  return Balancer.run(namenodes, conf);
 } catch (IOException e) {
  System.out.println(e + ".  Exiting ...");
  return IO_EXCEPTION;
 } catch (InterruptedException e) {
  System.out.println(e + ".  Exiting ...");
  return INTERRUPTED;
 } catch (Exception e) {
  e.printStackTrace();
  return ILLEGAL_ARGS; 
 } finally {
  System.out.println("Balancing took " + time2Str(Util.now()-startTime));
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Return the list of locations being used for a specific purpose.
 * i.e. Image or edit log storage.
 *
 * @param dirType Purpose of locations requested.
 * @throws IOException
 */
Collection<URI> getDirectories(NameNodeDirType dirType)
  throws IOException {
 ArrayList<URI> list = new ArrayList<>();
 Iterator<StorageDirectory> it = (dirType == null) ? dirIterator() :
                 dirIterator(dirType);
 for ( ; it.hasNext();) {
  StorageDirectory sd = it.next();
  try {
   list.add(Util.fileAsURI(sd.getRoot()));
  } catch (IOException e) {
   throw new IOException("Exception while processing " +
     "StorageDirectory " + sd.getRoot(), e);
  }
 }
 return list;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Retrieve checkpoint dirs from configuration.
 *
 * @param conf the Configuration
 * @param defaultValue a default value for the attribute, if null
 * @return a Collection of URIs representing the values in 
 * dfs.namenode.checkpoint.dir configuration property
 */
static Collection<URI> getCheckpointDirs(Configuration conf,
  String defaultValue) {
 Collection<String> dirNames = conf.getTrimmedStringCollection(
   DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
 if (dirNames.size() == 0 && defaultValue != null) {
  dirNames.add(defaultValue);
 }
 return Util.stringCollectionAsURIs(dirNames);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
  * Converts a collection of strings into a collection of URIs.
  * @param names collection of strings to convert to URIs
  * @return collection of URIs
  */
 public static List<URI> stringCollectionAsURIs(
                 Collection<String> names) {
  List<URI> uris = new ArrayList<URI>(names.size());
  for(String name : names) {
   try {
    uris.add(stringAsURI(name));
   } catch (IOException e) {
    LOG.error("Error while processing URI: " + name, e);
   }
  }
  return uris;
 }
}

代码示例来源:origin: org.jvnet.hudson.hadoop/hadoop-core

private void dispatchBlocks() {
 long startTime = Util.now();
 this.blocksToReceive = 2*scheduledSize;
 boolean isTimeUp = false;
  if (Util.now()-startTime > MAX_ITERATION_TIME) {
   isTimeUp = true;
   continue;

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Interprets the passed string as a URI. In case of error it 
 * assumes the specified string is a file.
 *
 * @param s the string to interpret
 * @return the resulting URI 
 * @throws IOException 
 */
public static URI stringAsURI(String s) throws IOException {
 URI u = null;
 // try to make a URI
 try {
  u = new URI(s);
 } catch (URISyntaxException e){
  LOG.error("Syntax error in URI " + s
    + ". Please check hdfs configuration.", e);
 }
 // if URI is null or scheme is undefined, then assume it's file://
 if(u == null || u.getScheme() == null){
  LOG.warn("Path " + s + " should be specified as a URI "
    + "in configuration files. Please update hdfs configuration.");
  u = fileAsURI(new File(s));
 }
 return u;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

private static Collection<URI> getStorageDirs(Configuration conf,
                       String propertyName) {
 Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
 StartupOption startOpt = NameNode.getStartupOption(conf);
 if(startOpt == StartupOption.IMPORT) {
  // In case of IMPORT this will get rid of default directories 
  // but will retain directories specified in hdfs-site.xml
  // When importing image from a checkpoint, the name-node can
  // start with empty set of storage directories.
  Configuration cE = new HdfsConfiguration(false);
  cE.addResource("core-default.xml");
  cE.addResource("core-site.xml");
  cE.addResource("hdfs-default.xml");
  Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
  dirNames.removeAll(dirNames2);
  if(dirNames.isEmpty())
   LOG.warn("!!! WARNING !!!" +
    "\n\tThe NameNode currently runs without persistent storage." +
    "\n\tAny changes to the file system meta-data may be lost." +
    "\n\tRecommended actions:" +
    "\n\t\t- shutdown and restart NameNode with configured \"" 
    + propertyName + "\" in hdfs-site.xml;" +
    "\n\t\t- use Backup Node as a persistent and up-to-date storage " +
    "of the file system meta-data.");
 } else if (dirNames.isEmpty()) {
  dirNames = Collections.singletonList(
    DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
 }
 return Util.stringCollectionAsURIs(dirNames);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
  * Converts a collection of strings into a collection of URIs.
  * @param names collection of strings to convert to URIs
  * @return collection of URIs
  */
 public static Collection<URI> stringCollectionAsURIs(
                 Collection<String> names) {
  Collection<URI> uris = new ArrayList<URI>(names.size());
  for(String name : names) {
   try {
    uris.add(stringAsURI(name));
   } catch (IOException e) {
    LOG.error("Error while processing URI: " + name, e);
   }
  }
  return uris;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

public void testThrottler() throws IOException {
 Configuration conf = new HdfsConfiguration();
 FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
 long bandwidthPerSec = 1024*1024L;
 final long TOTAL_BYTES =6*bandwidthPerSec; 
 long bytesToSend = TOTAL_BYTES; 
 long start = Util.now();
 DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
 long totalBytes = 0L;
 long bytesSent = 1024*512L; // 0.5MB
 throttler.throttle(bytesSent);
 bytesToSend -= bytesSent;
 bytesSent = 1024*768L; // 0.75MB
 throttler.throttle(bytesSent);
 bytesToSend -= bytesSent;
 try {
  Thread.sleep(1000);
 } catch (InterruptedException ignored) {}
 throttler.throttle(bytesToSend);
 long end = Util.now();
 assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

/**
 * Interprets the passed string as a URI. In case of error it 
 * assumes the specified string is a file.
 *
 * @param s the string to interpret
 * @return the resulting URI 
 * @throws IOException 
 */
public static URI stringAsURI(String s) throws IOException {
 URI u = null;
 // try to make a URI
 try {
  u = new URI(s);
 } catch (URISyntaxException e){
  LOG.error("Syntax error in URI " + s
    + ". Please check hdfs configuration.", e);
 }
 // if URI is null or scheme is undefined, then assume it's file://
 if(u == null || u.getScheme() == null){
  LOG.warn("Path " + s + " should be specified as a URI "
    + "in configuration files. Please update hdfs configuration.");
  u = fileAsURI(new File(s));
 }
 return u;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT);
Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf
  .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY));

相关文章

微信公众号

最新文章

更多