org.apache.hadoop.io.BoundedByteArrayOutputStream.getBuffer()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(8.7k)|赞(0)|评价(0)|浏览(82)

本文整理了Java中org.apache.hadoop.io.BoundedByteArrayOutputStream.getBuffer()方法的一些代码示例,展示了BoundedByteArrayOutputStream.getBuffer()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。BoundedByteArrayOutputStream.getBuffer()方法的具体详情如下:
包路径:org.apache.hadoop.io.BoundedByteArrayOutputStream
类名称:BoundedByteArrayOutputStream
方法名:getBuffer

BoundedByteArrayOutputStream.getBuffer介绍

[英]Returns the underlying buffer. Data is only valid to #size().
[中]返回基础缓冲区。数据仅对#size()有效。

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 byte[] lastKey = lastKeyBufferOS.getBuffer();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-mapred

MapOutput(TaskAttemptID mapId, MergeManager<K,V> merger, int size, 
     boolean primaryMapOutput) {
 this.id = ID.incrementAndGet();
 this.mapId = mapId;
 this.merger = merger;
 type = Type.MEMORY;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 this.size = size;
 
 localFS = null;
 disk = null;
 outputPath = null;
 tmpOutputPath = null;
 
 this.primaryMapOutput = primaryMapOutput;
}

代码示例来源:origin: io.hops/hadoop-mapreduce-client-core

public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
             MergeManagerImpl<K, V> merger,
             int size, CompressionCodec codec,
             boolean primaryMapOutput) {
 super(conf, merger, mapId, (long)size, primaryMapOutput);
 this.codec = codec;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 if (codec != null) {
  decompressor = CodecPool.getDecompressor(codec);
 } else {
  decompressor = null;
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-mapreduce-client-core

public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
             MergeManagerImpl<K, V> merger,
             int size, CompressionCodec codec,
             boolean primaryMapOutput) {
 super(mapId, (long)size, primaryMapOutput);
 this.conf = conf;
 this.merger = merger;
 this.codec = codec;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 if (codec != null) {
  decompressor = CodecPool.getDecompressor(codec);
 } else {
  decompressor = null;
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
             MergeManagerImpl<K, V> merger,
             int size, CompressionCodec codec,
             boolean primaryMapOutput) {
 super(mapId, (long)size, primaryMapOutput);
 this.conf = conf;
 this.merger = merger;
 this.codec = codec;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 if (codec != null) {
  decompressor = CodecPool.getDecompressor(codec);
 } else {
  decompressor = null;
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-mapreduce-client-core

public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId,
             MergeManagerImpl<K, V> merger,
             int size, CompressionCodec codec,
             boolean primaryMapOutput) {
 super(mapId, (long)size, primaryMapOutput);
 this.conf = conf;
 this.merger = merger;
 this.codec = codec;
 byteStream = new BoundedByteArrayOutputStream(size);
 memory = byteStream.getBuffer();
 if (codec != null) {
  decompressor = CodecPool.getDecompressor(codec);
 } else {
  decompressor = null;
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 byte[] lastKey = lastKeyBufferOS.getBuffer();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,

代码示例来源:origin: org.apache.apex/malhar-library

byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 byte[] lastKey = lastKeyBufferOS.getBuffer();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 byte[] lastKey = lastKeyBufferOS.getBuffer();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,

代码示例来源:origin: io.hops/hadoop-common

byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 byte[] lastKey = lastKeyBufferOS.getBuffer();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,

代码示例来源:origin: ch.cern.hadoop/hadoop-common

byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 byte[] lastKey = lastKeyBufferOS.getBuffer();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: org.apache.apex/malhar-library

/**
  * Close the current data block if necessary.
  *
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: io.hops/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: org.apache.hadoop/hadoop-common-test

Arrays.equals(INPUT, stream.getBuffer()));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));

代码示例来源:origin: ch.cern.hadoop/hadoop-common

Arrays.equals(INPUT, stream.getBuffer()));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

Arrays.equals(INPUT, stream.getBuffer()));
stream.write(INPUT, 0, SIZE);
assertTrue("Array Contents Mismatch",
  Arrays.equals(INPUT, stream.getBuffer()));

相关文章