org.apache.hadoop.hdfs.protocol.Block.setBlockId()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(6.7k)|赞(0)|评价(0)|浏览(149)

本文整理了Java中org.apache.hadoop.hdfs.protocol.Block.setBlockId()方法的一些代码示例,展示了Block.setBlockId()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Block.setBlockId()方法的具体详情如下:
包路径:org.apache.hadoop.hdfs.protocol.Block
类名称:Block
方法名:setBlockId

Block.setBlockId介绍

暂无

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
  * @param b A block object whose id is set to the starting point for check
  * @return true if any ID in the range
  *      {id, id+HdfsConstants.MAX_BLOCKS_IN_GROUP} is pointed-to by a stored
  *      block.
  */
 private boolean hasValidBlockInRange(Block b) {
  final long id = b.getBlockId();
  for (int i = 0; i < MAX_BLOCKS_IN_GROUP; i++) {
   b.setBlockId(id + i);
   if (blockManager.getStoredBlock(b) != null) {
    return true;
   }
  }
  return false;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

/**
 * Identify the block stored in the given datanode storage. Note that
 * the returned block has the same block Id with the one seen/reported by the
 * DataNode.
 */
Block getBlockOnStorage(DatanodeStorageInfo storage) {
 int index = getStorageBlockIndex(storage);
 if (index < 0) {
  return null;
 } else {
  Block block = new Block(this);
  block.setBlockId(this.getBlockId() + index);
  return block;
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 Block b = new Block(super.nextValue());
 // There may be an occasional conflict with randomly generated
 // block IDs. Skip over the conflicts.
 while(isValidBlock(b)) {
  b.setBlockId(super.nextValue());
 }
 if (b.getBlockId() < 0) {
  throw new IllegalStateException("All positive block IDs are used, " +
    "wrapping to negative IDs, " +
    "which might conflict with erasure coded block groups.");
 }
 return b.getBlockId();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

public DBlock getInternalBlock(StorageGroup storage) {
 int idxInLocs = locations.indexOf(storage);
 if (idxInLocs == -1) {
  return null;
 }
 byte idxInGroup = indices[idxInLocs];
 long blkId = getBlock().getBlockId() + idxInGroup;
 long numBytes = getInternalBlockLength(getNumBytes(), cellSize,
   dataBlockNum, idxInGroup);
 Block blk = new Block(getBlock());
 blk.setBlockId(blkId);
 blk.setNumBytes(numBytes);
 DBlock dblk = new DBlock(blk);
 dblk.addLocation(storage);
 return dblk;
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 skipTo((getCurrentValue() & ~BLOCK_GROUP_INDEX_MASK) + MAX_BLOCKS_IN_GROUP);
 // Make sure there's no conflict with existing random block IDs
 final Block b = new Block(getCurrentValue());
 while (hasValidBlockInRange(b)) {
  skipTo(getCurrentValue() + MAX_BLOCKS_IN_GROUP);
  b.setBlockId(getCurrentValue());
 }
 if (b.getBlockId() >= 0) {
  throw new IllegalStateException("All negative block group IDs are used, "
    + "growing into positive IDs, "
    + "which might conflict with non-erasure coded blocks.");
 }
 return getCurrentValue();
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public void setBlockId(final long bid) {
 block.setBlockId(bid);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-client

public void setBlockId(final long bid) {
 block.setBlockId(bid);
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

public void setBlockId(final long bid) {
 block.setBlockId(bid);
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

Block blk = new Block(ExtendedBlock.getLocalBlock(extBlock));
long blkId = blk.getBlockId() + blkIndex;
blk.setBlockId(blkId);
blk.setNumBytes(numBytes);
BlockMovingInfo blkMovingInfo = new BlockMovingInfo(blk, sourceNode,

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

Block bi = new Block(storedBlock);
if (storedBlock.isStriped()) {
 bi.setBlockId(bi.getBlockId() + i);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs

corrupted.setBlockId(b.getStored().getBlockId());

代码示例来源:origin: com.facebook.hadoop/hadoop-core

/**
 * Allocate a block at the given pending filename
 *
 * @param src    path to the file
 * @param inodes INode representing each of the components of src.
 *               <code>inodes[inodes.length-1]</code> is the INode for the file.
 */
private Block allocateBlock(String src, INode[] inodes) throws IOException {
 Block b = new Block(FSNamesystem.randBlockId.nextLong(), 0, 0);
 while (isValidBlock(b)) {
  b.setBlockId(FSNamesystem.randBlockId.nextLong());
 }
 b.setGenerationStamp(getGenerationStamp());
 b = dir.addBlock(src, inodes, b);
 return b;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Override // NumberGenerator
public long nextValue() {
 Block b = new Block(super.nextValue());
 // There may be an occasional conflict with randomly generated
 // block IDs. Skip over the conflicts.
 while(isValidBlock(b)) {
  b.setBlockId(super.nextValue());
 }
 return b.getBlockId();
}

代码示例来源:origin: com.facebook.hadoop/hadoop-core

public static void getBlockInfo(Block b, long[] blockArray, int index){
 b.setBlockId(blockArray[index2BlockId(index)]);
 b.setNumBytes(blockArray[index2BlockLen(index)]);
 b.setGenerationStamp(blockArray[index2BlockGenStamp(index)]);
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

@Override // NumberGenerator
public long nextValue() {
 Block b = new Block(super.nextValue());
 // There may be an occasional conflict with randomly generated
 // block IDs. Skip over the conflicts.
 while(isValidBlock(b)) {
  b.setBlockId(super.nextValue());
 }
 return b.getBlockId();
}

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

blk.setBlockId(blkid-1);
DataTransferProtocol.Sender.opReadBlock(sendOut, blk, 0L, fileLen, "cl",
   BlockTokenSecretManager.DUMMY_TOKEN);
blk.setBlockId(blkid);
DataTransferProtocol.Sender.opReadBlock(sendOut, blk, -1L, fileLen, "cl",
   BlockTokenSecretManager.DUMMY_TOKEN);

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

b.setBlockId(0);
assertNull(map.get(b));

代码示例来源:origin: org.apache.hadoop/hadoop-hdfs-test

@Test
 public void testRemove() {
  // Test 1: null argument throws invalid argument exception
  try {
   map.remove(null);
   fail("Expected exception not thrown");
  } catch (IllegalArgumentException expected) { }
  
  // Test 2: remove failure - generation stamp mismatch 
  Block b = new Block(block);
  b.setGenerationStamp(0);
  assertNull(map.remove(b));
  
  // Test 3: remove failure - blockID mismatch
  b.setGenerationStamp(block.getGenerationStamp());
  b.setBlockId(0);
  assertNull(map.remove(b));
  
  // Test 4: remove success
  assertNotNull(map.remove(block));
  
  // Test 5: remove failure - invalid blockID
  assertNull(map.remove(0));
  
  // Test 6: remove success
  map.add(new FinalizedReplica(block, null, null));
  assertNotNull(map.remove(block.getBlockId()));
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

b.setBlockId(0);
assertNull(map.get(bpid, b));

代码示例来源:origin: ch.cern.hadoop/hadoop-hdfs

@Test
 public void testRemove() {
  // Test 1: null argument throws invalid argument exception
  try {
   map.remove(bpid, null);
   fail("Expected exception not thrown");
  } catch (IllegalArgumentException expected) { }
  
  // Test 2: remove failure - generation stamp mismatch 
  Block b = new Block(block);
  b.setGenerationStamp(0);
  assertNull(map.remove(bpid, b));
  
  // Test 3: remove failure - blockID mismatch
  b.setGenerationStamp(block.getGenerationStamp());
  b.setBlockId(0);
  assertNull(map.remove(bpid, b));
  
  // Test 4: remove success
  assertNotNull(map.remove(bpid, block));
  
  // Test 5: remove failure - invalid blockID
  assertNull(map.remove(bpid, 0));
  
  // Test 6: remove success
  map.add(bpid, new FinalizedReplica(block, null, null));
  assertNotNull(map.remove(bpid, block.getBlockId()));
 }
}

相关文章