本文整理了Java中org.apache.hadoop.io.BoundedByteArrayOutputStream.size()
方法的一些代码示例,展示了BoundedByteArrayOutputStream.size()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。BoundedByteArrayOutputStream.size()
方法的具体详情如下:
包路径:org.apache.hadoop.io.BoundedByteArrayOutputStream
类名称:BoundedByteArrayOutputStream
方法名:size
[英]Returns the length of the valid data currently in the buffer.
[中]
代码示例来源:origin: org.apache.hadoop/hadoop-common
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
代码示例来源:origin: org.apache.hadoop/hadoop-common
/**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast =
new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
}
}
代码示例来源:origin: io.hops/hadoop-common
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
代码示例来源:origin: com.github.jiayuhan-it/hadoop-common
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
代码示例来源:origin: org.apache.apex/malhar-library
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
代码示例来源:origin: ch.cern.hadoop/hadoop-common
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
++errorCount;
byte[] key = currentKeyBufferOS.getBuffer();
int len = currentKeyBufferOS.size();
int lastLen = lastKeyBufferOS.size();
if (tfileMeta.getComparator().compare(key, 0, len, lastKey, 0,
lastLen) < 0) {
代码示例来源:origin: org.apache.apex/malhar-library
/**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast =
new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
}
}
代码示例来源:origin: com.github.jiayuhan-it/hadoop-common
/**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast =
new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
}
}
代码示例来源:origin: io.prestosql.hadoop/hadoop-apache
/**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast =
new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
}
}
代码示例来源:origin: io.hops/hadoop-common
/**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast =
new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
}
}
代码示例来源:origin: ch.cern.hadoop/hadoop-common
/**
* Close the current data block if necessary.
*
* @param bForceFinish
* Force the closure regardless of the block size.
* @throws IOException
*/
void finishDataBlock(boolean bForceFinish) throws IOException {
if (blkAppender == null) {
return;
}
// exceeded the size limit, do the compression and finish the block
if (bForceFinish || blkAppender.getCompressedSize() >= sizeMinBlock) {
// keep tracks of the last key of each data block, no padding
// for now
TFileIndexEntry keyLast =
new TFileIndexEntry(lastKeyBufferOS.getBuffer(), 0, lastKeyBufferOS
.size(), blkRecordCount);
tfileIndex.addEntry(keyLast);
// close the appender
blkAppender.close();
blkAppender = null;
blkRecordCount = 0;
}
}
}
内容来源于网络,如有侵权,请联系作者删除!