org.apache.hadoop.hbase.util.Bytes.readByteArray()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(8.6k)|赞(0)|评价(0)|浏览(94)

本文整理了Java中org.apache.hadoop.hbase.util.Bytes.readByteArray()方法的一些代码示例,展示了Bytes.readByteArray()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Bytes.readByteArray()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.util.Bytes
类名称:Bytes
方法名:readByteArray

Bytes.readByteArray介绍

[英]Read byte-array written with a WritableableUtils.vint prefix.
[中]读取用可写UTILS写入的字节数组。文特前缀。

代码示例

代码示例来源:origin: apache/hbase

public void readFields(DataInput in) throws IOException {
 this.m_tableName = TableName.valueOf(Bytes.readByteArray(in));
 this.m_startRow = Bytes.readByteArray(in);
 this.m_endRow = Bytes.readByteArray(in);
 this.m_regionLocation = Bytes.toString(Bytes.readByteArray(in));
}

代码示例来源:origin: apache/hbase

/**
 * Read byte-array written with a WritableableUtils.vint prefix.
 * IOException is converted to a RuntimeException.
 * @param in Input to read from.
 * @return byte array read off <code>in</code>
 */
public static byte [] readByteArrayThrowsRuntime(final DataInput in) {
 try {
  return readByteArray(in);
 } catch (Exception e) {
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/hbase

/**
 * @deprecated Writables are going away. Use the pb serialization methods instead.
 * Remove in a release after 0.96 goes out.  This is here only to migrate
 * old Reference files written with Writables before 0.96.
 */
@Deprecated
public void readFields(DataInput in) throws IOException {
 boolean tmp = in.readBoolean();
 // If true, set region to top.
 this.region = tmp? Range.top: Range.bottom;
 this.splitkey = Bytes.readByteArray(in);
}

代码示例来源:origin: apache/hbase

@Override
public void readFields(DataInput in) throws IOException {
 super.readFields(in);
 byte[] tableBytes = Bytes.readByteArray(in);
 if(tableBytes.length > 0) {
  table = TableName.valueOf(tableBytes);
 }
 if (in.readBoolean()) {
  family = Bytes.readByteArray(in);
 }
 if (in.readBoolean()) {
  qualifier = Bytes.readByteArray(in);
 }
}

代码示例来源:origin: apache/hbase

/** Now parse the old Writable format.  It was a list of Map entries.  Each map entry was a key and a value of
 * a byte [].  The old map format had a byte before each entry that held a code which was short for the key or
 * value type.  We know it was a byte [] so in below we just read and dump it.
 * @throws IOException
 */
void parseWritable(final DataInputStream in) throws IOException {
 // First clear the map.  Otherwise we will just accumulate entries every time this method is called.
 this.map.clear();
 // Read the number of entries in the map
 int entries = in.readInt();
 // Then read each key/value pair
 for (int i = 0; i < entries; i++) {
  byte [] key = Bytes.readByteArray(in);
  // We used to read a byte that encoded the class type.  Read and ignore it because it is always byte [] in hfile
  in.readByte();
  byte [] value = Bytes.readByteArray(in);
  this.map.put(key, value);
 }
}

代码示例来源:origin: apache/hbase

@Override
public void readFields(DataInput in) throws IOException {
 super.readFields(in);
 namespace = Bytes.toString(Bytes.readByteArray(in));
}

代码示例来源:origin: apache/hbase

private List<RegionInfo> readRegionsFromFile(String filename) throws IOException {
 List<RegionInfo> regions = new ArrayList<>();
 File f = new File(filename);
 if (!f.exists()) {
  return regions;
 }
 try (DataInputStream dis = new DataInputStream(
   new BufferedInputStream(new FileInputStream(f)))) {
  int numRegions = dis.readInt();
  int index = 0;
  while (index < numRegions) {
   regions.add(RegionInfo.parseFromOrNull(Bytes.readByteArray(dis)));
   index++;
  }
 } catch (IOException e) {
  LOG.error("Error while reading regions from file:" + filename, e);
  throw e;
 }
 return regions;
}

代码示例来源:origin: apache/hbase

@Override
 public void readFields(DataInput in) throws IOException {
  int len = in.readInt();
  byte[] buf = new byte[len];
  in.readFully(buf);
  TableSnapshotRegionSplit split = TableSnapshotRegionSplit.PARSER.parseFrom(buf);
  this.htd = ProtobufUtil.toTableDescriptor(split.getTable());
  this.regionInfo = HRegionInfo.convert(split.getRegion());
  List<String> locationsList = split.getLocationsList();
  this.locations = locationsList.toArray(new String[locationsList.size()]);
  this.scan = Bytes.toString(Bytes.readByteArray(in));
  this.restoreDir = Bytes.toString(Bytes.readByteArray(in));
 }
}

代码示例来源:origin: apache/hbase

in.readFully(tableNameBytes);
tableName = TableName.valueOf(tableNameBytes);
startRow = Bytes.readByteArray(in);
endRow = Bytes.readByteArray(in);
regionLocation = Bytes.toString(Bytes.readByteArray(in));
if (version.atLeast(Version.INITIAL)) {
 scan = Bytes.toString(Bytes.readByteArray(in));
 encodedRegionName = Bytes.toString(Bytes.readByteArray(in));

代码示例来源:origin: apache/hbase

/**
 * Read in the root-level index from the given input stream. Must match
 * what was written into the root level by
 * {@link BlockIndexWriter#writeIndexBlocks(FSDataOutputStream)} at the
 * offset that function returned.
 *
 * @param in the buffered input stream or wrapped byte input stream
 * @param numEntries the number of root-level index entries
 * @throws IOException
 */
public void readRootIndex(DataInput in, final int numEntries) throws IOException {
 blockOffsets = new long[numEntries];
 initialize(numEntries);
 blockDataSizes = new int[numEntries];
 // If index size is zero, no index was written.
 if (numEntries > 0) {
  for (int i = 0; i < numEntries; ++i) {
   long offset = in.readLong();
   int dataSize = in.readInt();
   byte[] key = Bytes.readByteArray(in);
   add(key, offset, dataSize);
  }
 }
}

代码示例来源:origin: apache/hbase

totalMaxKeys = meta.readLong();
numChunks = meta.readInt();
byte[] comparatorClassName = Bytes.readByteArray(meta);

代码示例来源:origin: forcedotcom/phoenix

@Override
public void readFields(DataInput input) throws IOException {
  super.readFields(input);
  cf = Bytes.readByteArray(input);
  cq = Bytes.readByteArray(input);
}

代码示例来源:origin: apache/phoenix

@Override
public void readFields(DataInput input) throws IOException {
  super.readFields(input);
  cf = Bytes.readByteArray(input);
  cq = Bytes.readByteArray(input);
}

代码示例来源:origin: org.apache.hbase/hbase-client

@Override
public void readFields(DataInput in) throws IOException {
 super.readFields(in);
 byte[] tableBytes = Bytes.readByteArray(in);
 if(tableBytes.length > 0) {
  table = TableName.valueOf(tableBytes);
 }
 if (in.readBoolean()) {
  family = Bytes.readByteArray(in);
 }
 if (in.readBoolean()) {
  qualifier = Bytes.readByteArray(in);
 }
 if(in.readBoolean()) {
  namespace = Bytes.toString(Bytes.readByteArray(in));
 }
}

代码示例来源:origin: org.apache.hbase/hbase-client

@Override
public void readFields(DataInput in) throws IOException {
 super.readFields(in);
 user = Bytes.readByteArray(in);
}

代码示例来源:origin: apache/phoenix

@Override
public void readFields(DataInput input) throws IOException {
  super.readFields(input);
  try {
    allCFs = input.readBoolean();
    if (!allCFs) {
      essentialCF = Bytes.readByteArray(input);
    }
  } catch (EOFException e) { // Ignore as this will occur when a 4.10 client is used
  }
  init();
}

代码示例来源:origin: apache/phoenix

public static ColumnReference[] deserializeDataTableColumnsToJoin(Scan scan) {
  byte[] columnsBytes = scan.getAttribute(BaseScannerRegionObserver.DATA_TABLE_COLUMNS_TO_JOIN);
  if (columnsBytes == null) return null;
  ByteArrayInputStream stream = new ByteArrayInputStream(columnsBytes); // TODO: size?
  try {
    DataInputStream input = new DataInputStream(stream);
    int numColumns = WritableUtils.readVInt(input);
    ColumnReference[] dataColumns = new ColumnReference[numColumns];
    for (int i = 0; i < numColumns; i++) {
      dataColumns[i] = new ColumnReference(Bytes.readByteArray(input), Bytes.readByteArray(input));
    }
    return dataColumns;
  } catch (IOException e) {
    throw new RuntimeException(e);
  } finally {
    try {
      stream.close();
    } catch (IOException e) {
      throw new RuntimeException(e);
    }
  }
}

代码示例来源:origin: apache/phoenix

/**
 * This method shouldn't be used - you should use {@link KeyValueCodec#readKeyValue(DataInput)} instead. Its the
 * complement to {@link #writeData(DataOutput)}.
 */
@SuppressWarnings("javadoc")
public void readFields(DataInput in) throws IOException {
  this.indexTableName = new ImmutableBytesPtr(Bytes.readByteArray(in));
  byte[] mutationData = Bytes.readByteArray(in);
  MutationProto mProto = MutationProto.parseFrom(mutationData);
  this.mutation = org.apache.hadoop.hbase.protobuf.ProtobufUtil.toMutation(mProto);
  this.hashCode = calcHashCode(indexTableName, mutation);
}

代码示例来源:origin: forcedotcom/phoenix

@Override
public void readFields(DataInput input) throws IOException {
  super.readFields(input);
  schema = new KeyValueSchema();
  schema.readFields(input);
  bitSet = ValueBitSet.newInstance(schema);
  position = input.readInt();
  name = Bytes.readByteArray(input);
}

代码示例来源:origin: forcedotcom/phoenix

@Override
public void readFields(DataInput input) throws IOException {
  byte[] columnNameBytes = Bytes.readByteArray(input);
  PName columnName = PNameFactory.newName(columnNameBytes);
  byte[] familyNameBytes = Bytes.readByteArray(input);
  PName familyName = familyNameBytes.length == 0 ? null : PNameFactory.newName(familyNameBytes);
  // TODO: optimize the reading/writing of this b/c it could likely all fit in a single byte or two
  PDataType dataType = PDataType.values()[WritableUtils.readVInt(input)];
  int maxLength = WritableUtils.readVInt(input);
  int scale = WritableUtils.readVInt(input);
  boolean nullable = input.readBoolean();
  int position = WritableUtils.readVInt(input);
  ColumnModifier columnModifier = ColumnModifier.fromSystemValue(WritableUtils.readVInt(input));
  init(columnName, familyName, dataType, maxLength == NO_MAXLENGTH ? null : maxLength,
      scale == NO_SCALE ? null : scale, nullable, position, columnModifier);
}

相关文章

微信公众号

最新文章

更多