org.apache.hadoop.io.BoundedByteArrayOutputStream类的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(7.9k)|赞(0)|评价(0)|浏览(148)

本文整理了Java中org.apache.hadoop.io.BoundedByteArrayOutputStream类的一些代码示例,展示了BoundedByteArrayOutputStream类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。BoundedByteArrayOutputStream类的具体详情如下:
包路径:org.apache.hadoop.io.BoundedByteArrayOutputStream
类名称:BoundedByteArrayOutputStream

BoundedByteArrayOutputStream介绍

[英]A byte array backed output stream with a limit. The limit should be smaller than the buffer capacity. The object can be reused through reset API and choose different limits in each round.
[中]有限制的字节数组支持的输出流。该限制应小于缓冲区容量。该对象可以通过resetAPI重用,并在每一轮中选择不同的限制。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

currentKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
lastKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
this.conf = conf;

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

public KeyRegister(int len) {
 super(currentKeyBufferOS);
 if (len >= 0) {
  currentKeyBufferOS.reset(len);
 } else {
  currentKeyBufferOS.reset();
 }
 expectedLength = len;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

new BoundedByteArrayOutputStream(SIZE);
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));
 stream.write(INPUT[0]);
} catch (Exception e) {
 caughtException = true;
stream.reset();
assertTrue("Limit did not get reset correctly", 
  (stream.getLimit() == SIZE));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));
 stream.write(INPUT[0]);
} catch (Exception e) {
 caughtException = true;
stream.reset(SIZE - 1);
assertTrue("Limit did not get reset correctly",
  (stream.getLimit() == SIZE -1));
caughtException = false;
 stream.write(INPUT, 0, SIZE);
} catch (Exception e) {
 caughtException = true;

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, int size, 
     boolean primaryMapOutput) {
 this.id = ID.incrementAndGet();
 this.mapId = mapId;
 this.merger = merger;
 type = Type.MEMORY;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 this.size = size;
 
 localFS = null;
 disk = null;
 outputPath = null;
 tmpOutputPath = null;
 
 this.primaryMapOutput = primaryMapOutput;
}

代码示例来源:origin: org.apache.hadoop/hadoop-common

protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
 resetBuffer(buf, offset, limit);
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapreduce-client-core

private void fillOutput(InMemoryMapOutput<Text, Text> output) throws IOException {
 BoundedByteArrayOutputStream stream = output.getArrayStream();
 int count = stream.getLimit();
 for (int i=0; i < count; ++i) {
  stream.write(i);
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

new BoundedByteArrayOutputStream(SIZE);
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));
 stream.write(INPUT[0]);
} catch (Exception e) {
 caughtException = true;
stream.reset();
assertTrue("Limit did not get reset correctly", 
  (stream.getLimit() == SIZE));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));
 stream.write(INPUT[0]);
} catch (Exception e) {
 caughtException = true;
stream.reset(SIZE - 1);
assertTrue("Limit did not get reset correctly",
  (stream.getLimit() == SIZE -1));
caughtException = false;
 stream.write(INPUT, 0, SIZE);
} catch (Exception e) {
 caughtException = true;

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
             MergeManagerImpl<K, V> merger,
             int size, CompressionCodec codec,
             boolean primaryMapOutput) {
 super(conf, merger, mapId, (long)size, primaryMapOutput);
 this.codec = codec;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 if (codec != null) {
  decompressor = CodecPool.getDecompressor(codec);
 } else {
  decompressor = null;
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
 resetBuffer(buf, offset, limit);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

new BoundedByteArrayOutputStream(SIZE);
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));
 stream.write(INPUT[0]);
} catch (Exception e) {
 caughtException = true;
stream.reset();
assertTrue("Limit did not get reset correctly", 
  (stream.getLimit() == SIZE));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));
 stream.write(INPUT[0]);
} catch (Exception e) {
 caughtException = true;
stream.reset(SIZE - 1);
assertTrue("Limit did not get reset correctly",
  (stream.getLimit() == SIZE -1));
caughtException = false;
 stream.write(INPUT, 0, SIZE);
} catch (Exception e) {
 caughtException = true;

代码示例来源:origin: org.apache.hadoop/hadoop-common

byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 byte[] lastKey = lastKeyBufferOS.getBuffer();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
   lastLen) < 0) {

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
             MergeManagerImpl<K, V> merger,
             int size, CompressionCodec codec,
             boolean primaryMapOutput) {
 super(mapId, (long)size, primaryMapOutput);
 this.conf = conf;
 this.merger = merger;
 this.codec = codec;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 if (codec != null) {
  decompressor = CodecPool.getDecompressor(codec);
 } else {
  decompressor = null;
 }
}

代码示例来源:origin: io.hops/hadoop-common

currentKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
lastKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
this.conf = conf;

代码示例来源:origin: io.hops/hadoop-common

public KeyRegister(int len) {
 super(currentKeyBufferOS);
 if (len >= 0) {
  currentKeyBufferOS.reset(len);
 } else {
  currentKeyBufferOS.reset();
 }
 expectedLength = len;
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

protected BoundedByteArrayOutputStream(byte[] buf, int offset, int limit) {
 resetBuffer(buf, offset, limit);
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
             MergeManagerImpl<K, V> merger,
             int size, CompressionCodec codec,
             boolean primaryMapOutput) {
 super(mapId, (long)size, primaryMapOutput);
 this.conf = conf;
 this.merger = merger;
 this.codec = codec;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 if (codec != null) {
  decompressor = CodecPool.getDecompressor(codec);
 } else {
  decompressor = null;
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

currentKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
lastKeyBufferOS = new BoundedByteArrayOutputStream(MAX_KEY_SIZE);
this.conf = conf;

代码示例来源:origin: ch.cern.hadoop/hadoop-common

public KeyRegister(int len) {
 super(currentKeyBufferOS);
 if (len >= 0) {
  currentKeyBufferOS.reset(len);
 } else {
  currentKeyBufferOS.reset();
 }
 expectedLength = len;
}

相关文章