org.apache.hadoop.io.BoundedByteArrayOutputStream.size()方法的使用及代码示例

x33g5p2x  于2022-01-17 转载在 其他  
字(5.7k)|赞(0)|评价(0)|浏览(93)

本文整理了Java中org.apache.hadoop.io.BoundedByteArrayOutputStream.size()方法的一些代码示例,展示了BoundedByteArrayOutputStream.size()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。BoundedByteArrayOutputStream.size()方法的具体详情如下:
包路径:org.apache.hadoop.io.BoundedByteArrayOutputStream
类名称:BoundedByteArrayOutputStream
方法名:size

BoundedByteArrayOutputStream.size介绍

[英]Returns the length of the valid data currently in the buffer.
[中]

代码示例

代码示例来源:origin: org.apache.hadoop/hadoop-common

++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
   lastLen) < 0) {

代码示例来源:origin: org.apache.hadoop/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: io.hops/hadoop-common

++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
   lastLen) < 0) {

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
   lastLen) < 0) {

代码示例来源:origin: org.apache.apex/malhar-library

++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
   lastLen) < 0) {

代码示例来源:origin: ch.cern.hadoop/hadoop-common

++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
   lastLen) < 0) {

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
 int lastLen = lastKeyBufferOS.size();
 if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
   lastLen) < 0) {

代码示例来源:origin: org.apache.apex/malhar-library

/**
  * Close the current data block if necessary.
  *
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: com.github.jiayuhan-it/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: io.prestosql.hadoop/hadoop-apache

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: io.hops/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

代码示例来源:origin: ch.cern.hadoop/hadoop-common

/**
  * Close the current data block if necessary.
  * 
  * @param bForceFinish
  *          Force the closure regardless of the block size.
  * @throws IOException
  */
 void finishDataBlock(boolean bForceFinish) throws IOException {
  if (blkAppender == null) {
   return;
  }
  // exceeded the size limit, do the compression and finish the block
  if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
   // keep tracks of the last key of each data block, no padding
   // for now
   TFileIndexEntry keyLast =
     new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
       .size(), blkRecordCount);
   tfileIndex.addEntry(keyLast);
   // close the appender
   blkAppender.close();
   blkAppender = null;
   blkRecordCount = 0;
  }
 }
}

相关文章