org.apache.hadoop.hbase.util.Bytes.copy()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(9.1k)|赞(0)|评价(0)|浏览(99)

本文整理了Java中org.apache.hadoop.hbase.util.Bytes.copy()方法的一些代码示例,展示了Bytes.copy()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Bytes.copy()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.util.Bytes
类名称:Bytes
方法名:copy

Bytes.copy介绍

[英]Copy the byte array given in parameter and return an instance of a new byte array with the same length and the same content.
[中]复制参数中给定的字节数组,并返回具有相同长度和相同内容的新字节数组的实例。

代码示例

代码示例来源:origin: apache/hbase

@Override
public byte[] getName() {
 return Bytes.copy(name);
}

代码示例来源:origin: apache/hbase

protected static byte[] getSuffix(byte[] keyBytes) {
  int separatorIdx = validateCompositeKey(keyBytes);
  return Bytes.copy(keyBytes, separatorIdx+1, keyBytes.length - separatorIdx - 1);
 }
}

代码示例来源:origin: apache/hbase

/**
 * Create a Get operation for the specified row.
 * @param row
 * @param rowOffset
 * @param rowLength
 */
public Get(byte[] row, int rowOffset, int rowLength) {
 Mutation.checkRow(row, rowOffset, rowLength);
 this.row = Bytes.copy(row, rowOffset, rowLength);
}

代码示例来源:origin: apache/hbase

/** Create a Append operation for the specified row.
 * <p>
 * At least one column must be appended to.
 * @param rowArray Makes a copy out of this buffer.
 * @param rowOffset
 * @param rowLength
 */
public Append(final byte [] rowArray, final int rowOffset, final int rowLength) {
 checkRow(rowArray, rowOffset, rowLength);
 this.row = Bytes.copy(rowArray, rowOffset, rowLength);
}

代码示例来源:origin: apache/hbase

/**
 * Create a Increment operation for the specified row.
 * <p>
 * At least one column must be incremented.
 * @param row row key (we will make a copy of this).
 */
public Increment(final byte [] row, final int offset, final int length) {
 checkRow(row, offset, length);
 this.row = Bytes.copy(row, offset, length);
}
/**

代码示例来源:origin: apache/hbase

/**
 * Treat the byte[] as an unsigned series of bytes, most significant bits first.  Start by adding
 * 1 to the rightmost bit/byte and carry over all overflows to the more significant bits/bytes.
 *
 * @param input The byte[] to increment.
 * @return The incremented copy of "in".  May be same length or 1 byte longer.
 */
public static byte[] unsignedCopyAndIncrement(final byte[] input) {
 byte[] copy = copy(input);
 if (copy == null) {
  throw new IllegalArgumentException("cannot increment null array");
 }
 for (int i = copy.length - 1; i >= 0; --i) {
  if (copy[i] == -1) {// -1 is all 1-bits, which is the unsigned maximum
   copy[i] = 0;
  } else {
   ++copy[i];
   return copy;
  }
 }
 // we maxed out the array
 byte[] out = new byte[copy.length + 1];
 out[0] = 1;
 System.arraycopy(copy, 0, out, 1, copy.length);
 return out;
}

代码示例来源:origin: apache/hbase

protected static byte[] getTableName(byte[] keyBytes) {
 int separatorIdx = validateCompositeKey(keyBytes);
 return Bytes.copy(keyBytes, 0, separatorIdx);
}

代码示例来源:origin: apache/hbase

/**
 * Create an atomic mutation for the specified row.
 * @param row row key
 * @param initialCapacity the initial capacity of the RowMutations
 */
public RowMutations(byte [] row, int initialCapacity) {
 this.row = Bytes.copy(Mutation.checkRow(row));
 if (initialCapacity <= 0) {
  this.mutations = new ArrayList<>();
 } else {
  this.mutations = new ArrayList<>(initialCapacity);
 }
}

代码示例来源:origin: apache/hbase

/**
 * We make a copy of the passed in row key to keep local.
 * @param rowArray
 * @param rowOffset
 * @param rowLength
 * @param ts
 */
public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) {
 checkRow(rowArray, rowOffset, rowLength);
 this.row = Bytes.copy(rowArray, rowOffset, rowLength);
 this.ts = ts;
 if (ts < 0) {
  throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
 }
}

代码示例来源:origin: apache/hbase

/**
 * Create a Put operation for an immutable row key, using a given timestamp.
 *
 * @param row row key
 * @param ts timestamp
 * @param rowIsImmutable whether the input row is immutable.
 *                       Set to true if the caller can guarantee that
 *                       the row will not be changed for the Put duration.
 */
public Put(byte[] row, long ts, boolean rowIsImmutable) {
 // Check and set timestamp
 if (ts < 0) {
  throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts);
 }
 this.ts = ts;
 // Deal with row according to rowIsImmutable
 checkRow(row);
 if (rowIsImmutable) {  // Row is immutable
  this.row = row;  // Do not make a local copy, but point to the provided byte array directly
 } else {  // Row is not immutable
  this.row = Bytes.copy(row, 0, row.length);  // Make a local copy
 }
}

代码示例来源:origin: apache/hbase

/**
 * Create a Delete operation for the specified row and timestamp.<p>
 *
 * If no further operations are done, this will delete all columns in all
 * families of the specified row with a timestamp less than or equal to the
 * specified timestamp.<p>
 *
 * This timestamp is ONLY used for a delete row operation.  If specifying
 * families or columns, you must specify each timestamp individually.
 * @param row We make a local copy of this passed in row.
 * @param rowOffset
 * @param rowLength
 * @param timestamp maximum version timestamp (only for delete row)
 */
public Delete(final byte[] row, final int rowOffset, final int rowLength, long timestamp) {
 checkRow(row, rowOffset, rowLength);
 this.row = Bytes.copy(row, rowOffset, rowLength);
 setTimestamp(timestamp);
}

代码示例来源:origin: apache/hbase

@Override
 public ExtendedCell deepClone() {
  byte[] copy = Bytes.copy(this.bytes, this.offset, this.length);
  KeyValue kv = new NoTagsKeyValue(copy, 0, copy.length);
  kv.setSequenceId(this.getSequenceId());
  return kv;
 }
}

代码示例来源:origin: apache/hbase

@Override
 public ExtendedCell deepClone() {
  byte[] copy = Bytes.copy(this.bytes, this.offset, this.length);
  KeyValue kv = new KeyValue(copy, 0, copy.length);
  kv.setSequenceId(this.getSequenceId());
  return kv;
 }
}

代码示例来源:origin: apache/hbase

private Cell toOnheapCell(ByteBuffer valAndTagsBuffer, int vOffset,
  int tagsLenSerializationSize) {
 byte[] tagsArray = HConstants.EMPTY_BYTE_ARRAY;
 int tOffset = 0;
 if (this.includeTags) {
  if (this.tagCompressionContext == null) {
   tagsArray = valAndTagsBuffer.array();
   tOffset = valAndTagsBuffer.arrayOffset() + vOffset + this.valueLength
     + tagsLenSerializationSize;
  } else {
   tagsArray = Bytes.copy(tagsBuffer, 0, this.tagsLength);
   tOffset = 0;
  }
 }
 return new OnheapDecodedCell(Bytes.copy(keyBuffer, 0, this.keyLength),
   currentKey.getRowLength(), currentKey.getFamilyOffset(), currentKey.getFamilyLength(),
   currentKey.getQualifierOffset(), currentKey.getQualifierLength(),
   currentKey.getTimestamp(), currentKey.getTypeByte(), valAndTagsBuffer.array(),
   valAndTagsBuffer.arrayOffset() + vOffset, this.valueLength, memstoreTS, tagsArray,
   tOffset, this.tagsLength);
}

代码示例来源:origin: apache/hbase

private Cell toOffheapCell(ByteBuffer valAndTagsBuffer, int vOffset,
   int tagsLenSerializationSize) {
  ByteBuffer tagsBuf = HConstants.EMPTY_BYTE_BUFFER;
  int tOffset = 0;
  if (this.includeTags) {
   if (this.tagCompressionContext == null) {
    tagsBuf = valAndTagsBuffer;
    tOffset = vOffset + this.valueLength + tagsLenSerializationSize;
   } else {
    tagsBuf = ByteBuffer.wrap(Bytes.copy(tagsBuffer, 0, this.tagsLength));
    tOffset = 0;
   }
  }
  return new OffheapDecodedExtendedCell(
    ByteBuffer.wrap(Bytes.copy(keyBuffer, 0, this.keyLength)), currentKey.getRowLength(),
    currentKey.getFamilyOffset(), currentKey.getFamilyLength(),
    currentKey.getQualifierOffset(), currentKey.getQualifierLength(),
    currentKey.getTimestamp(), currentKey.getTypeByte(), valAndTagsBuffer, vOffset,
    this.valueLength, memstoreTS, tagsBuf, tOffset, this.tagsLength);
 }
}

代码示例来源:origin: apache/hbase

private static CINode getCINode(Result result, CINode node) {
 node.key = Bytes.copy(result.getRow());
 if (result.containsColumn(FAMILY_NAME, COLUMN_PREV)) {
  node.prev = Bytes.copy(result.getValue(FAMILY_NAME, COLUMN_PREV));
 } else {
  node.prev = NO_KEY;
 }
 if (result.containsColumn(FAMILY_NAME, COLUMN_COUNT)) {
  node.count = Bytes.toLong(result.getValue(FAMILY_NAME, COLUMN_COUNT));
 } else {
  node.count = -1;
 }
 if (result.containsColumn(FAMILY_NAME, COLUMN_CLIENT)) {
  node.client = Bytes.toString(result.getValue(FAMILY_NAME, COLUMN_CLIENT));
 } else {
  node.client = "";
 }
 return node;
}

代码示例来源:origin: apache/hbase

public void testCopy() throws Exception {
 byte [] bytes = Bytes.toBytes("ABCDEFGHIJKLMNOPQRSTUVWXYZ");
 byte [] copy =  Bytes.copy(bytes);
 assertFalse(bytes == copy);
 assertTrue(Bytes.equals(bytes, copy));
}

代码示例来源:origin: apache/hbase

private void assertCell(Cell cell, byte[] row, byte[] cf, byte[] cq) {
 assertArrayEquals(row,
   Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
 assertArrayEquals(cf,
   Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()));
 assertArrayEquals(cq,
   Bytes.copy(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()));
}

代码示例来源:origin: apache/hbase

@Test
 public void testFirstSeveralCellsFilterAndBatch() throws IOException {
  Scan scan = new Scan();
  scan.setFilter(new FirstSeveralCellsFilter());
  scan.setBatch(NUM_COLS);
  InternalScanner scanner = REGION.getScanner(scan);
  List<Cell> results = new ArrayList<>();
  for (int i = 0; i < NUM_ROWS; i++) {
   results.clear();
   scanner.next(results);
   assertEquals(NUM_COLS, results.size());
   Cell cell = results.get(0);
   assertArrayEquals(ROWS[i],
     Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
   assertArrayEquals(FAMILIES[0],
     Bytes.copy(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength()));
   assertArrayEquals(QUALIFIERS[0], Bytes.copy(cell.getQualifierArray(),
     cell.getQualifierOffset(), cell.getQualifierLength()));
  }
  assertFalse(scanner.next(results));
  scanner.close();
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testFirstKeyOnlyFilterAndBatch() throws IOException {
 Scan scan = new Scan();
 scan.setFilter(new FirstKeyOnlyFilter());
 scan.setBatch(1);
 InternalScanner scanner = REGION.getScanner(scan);
 List<Cell> results = new ArrayList<>();
 for (int i = 0; i < NUM_ROWS; i++) {
  results.clear();
  scanner.next(results);
  assertEquals(1, results.size());
  Cell cell = results.get(0);
  assertArrayEquals(ROWS[i],
    Bytes.copy(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()));
 }
 assertFalse(scanner.next(results));
 scanner.close();
}

相关文章

微信公众号

最新文章

更多