org.apache.hadoop.hdfs.protocol.Block.setNumBytes()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(6.7k)|赞(0)|评价(0)|浏览(135)

本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.setNumBytes()方法的一些代码示例,展示了Block.setNumBytes()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.setNumBytes()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:setNumBytes

Block.setNumBytes介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public DBlock getInternalBlock(StorageGroup storage) {
 int idxInLocs = locations.indexOf(storage);
 if (idxInLocs == -1) {
  return null;
 }
 byte idxInGroup = indices[idxInLocs];
 long blkId = getBlock().getBlockId() + idxInGroup;
 long numBytes = getInternalBlockLength(getNumBytes(), cellSize,
   dataBlockNum, idxInGroup);
 Block blk = new Block(getBlock());
 blk.setBlockId(blkId);
 blk.setNumBytes(numBytes);
 DBlock dblk = new DBlock(blk);
 dblk.addLocation(storage);
 return dblk;
}

代码示例来源:origin: linkedin/dynamometer

@Override
synchronized public void setNumBytes(long length) {
 if (!finalized) {
  bytesRcvd = length;
 } else {
  theBlock.setNumBytes(length);
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public void setNumBytes(final long len) {
 block.setNumBytes(len);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

synchronized public void setNumBytes(long length) {
 if (!finalized) {
   bytesRcvd = length;
 } else {
  theBlock.setNumBytes(length);
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public void setBlockSize(long size) {
 b.setNumBytes(size);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public void setNumBytes(final long len) {
 block.setNumBytes(len);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public static void markAsDeleted(Block block) {
 block.setNumBytes(BlockFlags.DELETED);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-client

public void setNumBytes(final long len) {
 block.setNumBytes(len);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

long blkId = blk.getBlockId() + blkIndex;
blk.setBlockId(blkId);
blk.setNumBytes(numBytes);
BlockMovingInfo blkMovingInfo = new BlockMovingInfo(blk, sourceNode,
  targetNode, sourceStorageType, targetStorageType);

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override
synchronized public void setNumBytes(long length) {
 if (!finalized) {
   bytesRcvd = length;
 } else {
  theBlock.setNumBytes(length);
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Set last block's block size to be the file's default block size
 * @param file file inode under construction
 */
private void setLastBlockSize(INodeFileUnderConstruction pendingFile) {
 Block block = pendingFile.getLastBlock();
 if (block != null) {
  block.setNumBytes(pendingFile.getPreferredBlockSize());
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

private void corruptBlockLen(final Block block)
 throws IOException {
 if (block == null) {
  throw new IOException("Block isn't suppose to be null");
 }
 long oldLen = block.getNumBytes();
 long newLen = oldLen - rand.nextLong();
 assertTrue("Old and new length shouldn't be the same",
  block.getNumBytes() != newLen);
 block.setNumBytes(newLen);
 if(LOG.isDebugEnabled()) {
  LOG.debug("Length of " + block.getBlockName() +
    " is changed to " + newLen + " from " + oldLen);
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

@Override
public Block getStoredBlock(long blkid) throws IOException {
 Block b = new Block(blkid);
 BInfo binfo = blockMap.get(b);
 if (binfo == null) {
  return null;
 }
 b.setGenerationStamp(binfo.getGenerationStamp());
 b.setNumBytes(binfo.getNumBytes());
 return b;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public void removeBlock(Block block) {
 assert namesystem.hasWriteLock();
 // No need to ACK blocks that are being removed entirely
 // from the namespace, since the removal of the associated
 // file already removes them from the block map below.
 block.setNumBytes(BlockCommand.NO_ACK);
 addToInvalidates(block);
 removeBlockFromMap(block);
 // Remove the block from pendingReplications and neededReplications
 pendingReplications.remove(block);
 neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
 postponedMisreplicatedBlocks.remove(block);
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public void removeBlock(Block block) {
 assert namesystem.hasWriteLock();
 // No need to ACK blocks that are being removed entirely
 // from the namespace, since the removal of the associated
 // file already removes them from the block map below.
 block.setNumBytes(BlockCommand.NO_ACK);
 addToInvalidates(block);
 removeBlockFromMap(block);
 // Remove the block from pendingReplications and neededReplications
 pendingReplications.remove(block);
 neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
 postponedMisreplicatedBlocks.remove(block);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

BInfo(Block b, boolean forWriting) throws IOException {
 theBlock = new Block(b);
 if (theBlock.getNumBytes() < 0) {
  theBlock.setNumBytes(0);
 }
 if (!storage.alloc(theBlock.getNumBytes())) { // expected length - actual length may
                   // be more - we find out at finalize
  DataNode.LOG.warn("Lack of free storage on a block alloc");
  throw new IOException("Creating block, no free space available");
 }
 if (forWriting) {
  finalized = false;
  oStream = new SimulatedOutputStream();
 } else {
  finalized = true;
  oStream = null;
 }
}

代码示例来源:origin: linkedin/dynamometer

BInfo(String bpid, Block b, boolean forWriting) throws IOException {
 theBlock = new Block(b);
 if (theBlock.getNumBytes() < 0) {
  theBlock.setNumBytes(0);
 }
 if (!getStorage(theBlock).alloc(bpid, theBlock.getNumBytes())) {
  // expected length - actual length may
  // be more - we find out at finalize
  DataNode.LOG.warn("Lack of free storage on a block alloc");
  throw new IOException("Creating block, no free space available");
 }
 if (forWriting) {
  finalized = false;
  oStream = new SimulatedOutputStream();
 } else {
  finalized = true;
  oStream = null;
 }
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public static void getBlockInfo(Block b, long[] blockArray, int index){
 b.setBlockId(blockArray[index2BlockId(index)]);
 b.setNumBytes(blockArray[index2BlockLen(index)]);
 b.setGenerationStamp(blockArray[index2BlockGenStamp(index)]);
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public Boolean call() throws Exception {
  try {
   if (crossDatanode) {
    data.copyBlockLocal(srcFileSystem, srcBlockFile,
      srcNamespaceId, srcBlock, dstNamespaceId, dstBlock);
   } else {
    data.copyBlockLocal(srcFileSystem,
      data.getBlockFile(srcNamespaceId, srcBlock),
      srcNamespaceId, srcBlock, dstNamespaceId, dstBlock);
   }
   dstBlock.setNumBytes(srcBlock.getNumBytes());
   notifyNamenodeReceivedBlock(dstNamespaceId, dstBlock, null);
   blockScanner.addBlock(dstNamespaceId, dstBlock);
  } catch (Exception e) {
   LOG.warn("Local block copy for src : " + srcBlock.getBlockName()
     + ", dst : " + dstBlock.getBlockName() + " failed", e);
   throw e;
  }
  return true;
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

BInfo(String bpid, Block b, boolean forWriting) throws IOException {
 theBlock = new Block(b);
 if (theBlock.getNumBytes() < 0) {
  theBlock.setNumBytes(0);
 }
 if (!storage.alloc(bpid, theBlock.getNumBytes())) { 
  // expected length - actual length may
  // be more - we find out at finalize
  DataNode.LOG.warn("Lack of free storage on a block alloc");
  throw new IOException("Creating block, no free space available");
 }
 if (forWriting) {
  finalized = false;
  oStream = new SimulatedOutputStream();
 } else {
  finalized = true;
  oStream = null;
 }
}

相关文章