本文整理了Java中org.apache.hadoop.hbase.util.Bytes.writeByteArray()
方法的一些代码示例,展示了Bytes.writeByteArray()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Bytes.writeByteArray()
方法的具体详情如下:
包路径:org.apache.hadoop.hbase.util.Bytes
类名称:Bytes
方法名:writeByteArray
[英]Write byte-array with a WritableableUtils.vint prefix.
[中]使用WritableUtils写入字节数组。文特前缀。
代码示例来源:origin: apache/hbase
public void write(DataOutput out) throws IOException {
Bytes.writeByteArray(out, this.m_tableName.getName());
Bytes.writeByteArray(out, this.m_startRow);
Bytes.writeByteArray(out, this.m_endRow);
Bytes.writeByteArray(out, Bytes.toBytes(this.m_regionLocation));
}
代码示例来源:origin: apache/hbase
/**
* Writes the field values to the output.
*
* @param out The output to write to.
* @throws IOException When writing the values to the output fails.
*/
@Override
public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, VERSION.code);
Bytes.writeByteArray(out, tableName.getName());
Bytes.writeByteArray(out, startRow);
Bytes.writeByteArray(out, endRow);
Bytes.writeByteArray(out, Bytes.toBytes(regionLocation));
Bytes.writeByteArray(out, Bytes.toBytes(scan));
WritableUtils.writeVLong(out, length);
Bytes.writeByteArray(out, Bytes.toBytes(encodedRegionName));
}
代码示例来源:origin: apache/hbase
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
// Explicitly writing null to maintain se/deserialize backward compatibility.
Bytes.writeByteArray(out, table == null ? null : table.getName());
out.writeBoolean(family != null);
if (family != null) {
Bytes.writeByteArray(out, family);
}
out.writeBoolean(qualifier != null);
if (qualifier != null) {
Bytes.writeByteArray(out, qualifier);
}
}
}
代码示例来源:origin: apache/hbase
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
Bytes.writeByteArray(out, Bytes.toBytes(namespace));
}
}
代码示例来源:origin: apache/hbase
/**
* Writes this chunk into the given output stream in the root block index
* format. This format is similar to the {@link HFile} version 1 block
* index format, except that we store on-disk size of the block instead of
* its uncompressed size.
*
* @param out the data output stream to write the block index to. Typically
* a stream writing into an {@link HFile} block.
* @throws IOException
*/
void writeRoot(DataOutput out) throws IOException {
for (int i = 0; i < blockKeys.size(); ++i) {
out.writeLong(blockOffsets.get(i));
out.writeInt(onDiskDataSizes.get(i));
Bytes.writeByteArray(out, blockKeys.get(i));
}
}
代码示例来源:origin: apache/hbase
/**
* Write byte-array with a WritableableUtils.vint prefix.
* @param out output stream to be written to
* @param b array to write
* @throws IOException e
*/
public static void writeByteArray(final DataOutput out, final byte [] b)
throws IOException {
if(b == null) {
WritableUtils.writeVInt(out, 0);
} else {
writeByteArray(out, b, 0, b.length);
}
}
代码示例来源:origin: apache/hbase
/**
* Write the number of regions moved in the first line followed by regions moved in subsequent
* lines
*/
private void writeFile(String filename, List<RegionInfo> movedRegions) throws IOException {
try (DataOutputStream dos = new DataOutputStream(
new BufferedOutputStream(new FileOutputStream(filename)))) {
dos.writeInt(movedRegions.size());
for (RegionInfo region : movedRegions) {
Bytes.writeByteArray(dos, RegionInfo.toByteArray(region));
}
} catch (IOException e) {
LOG.error(
"ERROR: Was Not able to write regions moved to output file but moved " + movedRegions
.size() + " regions", e);
throw e;
}
}
代码示例来源:origin: apache/hbase
/**
* This is modeled after {@link CompoundBloomFilterWriter.MetaWriter} for simplicity,
* although the two metadata formats do not have to be consistent. This
* does have to be consistent with how {@link
* CompoundBloomFilter#CompoundBloomFilter(DataInput,
* org.apache.hadoop.hbase.io.hfile.HFile.Reader)} reads fields.
*/
@Override
public void write(DataOutput out) throws IOException {
out.writeInt(VERSION);
out.writeLong(getByteSize());
out.writeInt(prevChunk.getHashCount());
out.writeInt(prevChunk.getHashType());
out.writeLong(getKeyCount());
out.writeLong(getMaxKeys());
// Fields that don't have equivalents in ByteBloomFilter.
out.writeInt(numChunks);
if (comparator != null) {
Bytes.writeByteArray(out, Bytes.toBytes(comparator.getClass().getName()));
} else {
// Internally writes a 0 vint if the byte[] is null
Bytes.writeByteArray(out, null);
}
// Write a single-level index without compression or block header.
bloomBlockIndexWriter.writeSingleLevelIndex(out, "Bloom filter");
}
}
代码示例来源:origin: apache/hbase
@Override
public void write(DataOutput out) throws IOException {
TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder()
.setTable(ProtobufUtil.toTableSchema(htd))
.setRegion(HRegionInfo.convert(regionInfo));
for (String location : locations) {
builder.addLocations(location);
}
TableSnapshotRegionSplit split = builder.build();
ByteArrayOutputStream baos = new ByteArrayOutputStream();
split.writeTo(baos);
baos.close();
byte[] buf = baos.toByteArray();
out.writeInt(buf.length);
out.write(buf);
Bytes.writeByteArray(out, Bytes.toBytes(scan));
Bytes.writeByteArray(out, Bytes.toBytes(restoreDir));
}
代码示例来源:origin: apache/phoenix
@Override
public void write(DataOutput output) throws IOException {
super.write(output);
Bytes.writeByteArray(output, cf);
Bytes.writeByteArray(output, cq);
}
代码示例来源:origin: forcedotcom/phoenix
@Override
public void write(DataOutput output) throws IOException {
super.write(output);
Bytes.writeByteArray(output, cf);
Bytes.writeByteArray(output, cq);
}
代码示例来源:origin: org.apache.hbase/hbase-client
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
Bytes.writeByteArray(out, user);
}
}
代码示例来源:origin: org.apache.hbase/hbase-client
@Override
public void write(DataOutput out) throws IOException {
super.write(out);
// Explicitly writing null to maintain se/deserialize backward compatibility.
Bytes.writeByteArray(out, (table == null) ? null : table.getName());
out.writeBoolean(family != null);
if (family != null) {
Bytes.writeByteArray(out, family);
}
out.writeBoolean(qualifier != null);
if (qualifier != null) {
Bytes.writeByteArray(out, qualifier);
}
out.writeBoolean(namespace != null);
if(namespace != null) {
Bytes.writeByteArray(out, Bytes.toBytes(namespace));
}
}
}
代码示例来源:origin: forcedotcom/phoenix
@Override
public void write(DataOutput output) throws IOException {
super.write(output);
schema.write(output);
output.writeInt(position);
Bytes.writeByteArray(output, name);
}
代码示例来源:origin: apache/phoenix
@Override
public void write(DataOutput output) throws IOException {
super.write(output);
try {
output.writeBoolean(allCFs);
if (!allCFs) {
Bytes.writeByteArray(output, essentialCF);
}
} catch (Throwable t) { // Catches incompatibilities during reading/writing and doesn't retry
ServerUtil.throwIOException("MultiKeyValueComparisonFilter failed during writing", t);
}
}
代码示例来源:origin: forcedotcom/phoenix
/**
* Internal write the underlying data for the entry - this does not do any special prefixing. Writing should be done
* via {@link KeyValueCodec#write(DataOutput, KeyValue)} to ensure consistent reading/writing of
* {@link IndexedKeyValue}s.
*
* @param out
* to write data to. Does not close or flush the passed object.
* @throws IOException
* if there is a problem writing the underlying data
*/
void writeData(DataOutput out) throws IOException {
Bytes.writeByteArray(out, this.indexTableName.get());
out.writeUTF(this.mutation.getClass().getName());
this.mutation.write(out);
}
代码示例来源:origin: apache/phoenix
/**
* Internal write the underlying data for the entry - this does not do any special prefixing. Writing should be done
* via {@link KeyValueCodec#write(DataOutput, KeyValue)} to ensure consistent reading/writing of
* {@link IndexedKeyValue}s.
*
* @param out
* to write data to. Does not close or flush the passed object.
* @throws IOException
* if there is a problem writing the underlying data
*/
void writeData(DataOutput out) throws IOException {
Bytes.writeByteArray(out, this.indexTableName.get());
MutationProto m = toMutationProto(this.mutation);
Bytes.writeByteArray(out, m.toByteArray());
}
代码示例来源:origin: apache/phoenix
private void serializeViewConstantsIntoScan(byte[][] viewConstants, Scan scan) {
ByteArrayOutputStream stream = new ByteArrayOutputStream();
try {
DataOutputStream output = new DataOutputStream(stream);
WritableUtils.writeVInt(output, viewConstants.length);
for (byte[] viewConstant : viewConstants) {
Bytes.writeByteArray(output, viewConstant);
}
scan.setAttribute(BaseScannerRegionObserver.VIEW_CONSTANTS, stream.toByteArray());
} catch (IOException e) {
throw new RuntimeException(e);
} finally {
try {
stream.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
代码示例来源:origin: forcedotcom/phoenix
@Override
public void write(DataOutput output) throws IOException {
if (regionGuidePosts == null) {
WritableUtils.writeVInt(output, 0);
return;
}
WritableUtils.writeVInt(output, regionGuidePosts.size());
for (Entry<String, byte[][]> entry : regionGuidePosts.entrySet()) {
WritableUtils.writeString(output, entry.getKey());
byte[][] value = entry.getValue();
WritableUtils.writeVInt(output, value.length);
for (int i=0; i<value.length; i++) {
Bytes.writeByteArray(output, value[i]);
}
}
}
}
代码示例来源:origin: forcedotcom/phoenix
@Override
public void write(DataOutput output) throws IOException {
Bytes.writeByteArray(output, name.getBytes());
Bytes.writeByteArray(output, familyName == null ? ByteUtil.EMPTY_BYTE_ARRAY : familyName.getBytes());
WritableUtils.writeVInt(output, dataType.ordinal());
WritableUtils.writeVInt(output, maxLength == null ? NO_MAXLENGTH : maxLength);
WritableUtils.writeVInt(output, scale == null ? NO_SCALE : scale);
output.writeBoolean(nullable);
WritableUtils.writeVInt(output, position);
WritableUtils.writeVInt(output, ColumnModifier.toSystemValue(columnModifier));
}
内容来源于网络,如有侵权,请联系作者删除!