org.apache.hadoop.hbase.TableName.toBytes()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(12.2k)|赞(0)|评价(0)|浏览(131)

本文整理了Java中org.apache.hadoop.hbase.TableName.toBytes()方法的一些代码示例,展示了TableName.toBytes()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TableName.toBytes()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.TableName
类名称:TableName
方法名:toBytes

TableName.toBytes介绍

暂无

代码示例

代码示例来源:origin: apache/hbase

public ThriftTable(TableName tableName, THBaseService.Client client, TTransport tTransport,
  Configuration conf) {
 this.tableName = tableName;
 this.tableNameInBytes = ByteBuffer.wrap(tableName.toBytes());
 this.conf = conf;
 this.tTransport = tTransport;
 this.client = client;
 this.scannerCaching = conf.getInt(HBASE_THRIFT_CLIENT_SCANNER_CACHING,
   HBASE_THRIFT_CLIENT_SCANNER_CACHING_DEFAULT);
 this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
   HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);
}

代码示例来源:origin: apache/hbase

@Override
 public long estimatedSerializedSizeOf() {
  long size = encodedRegionName != null ? encodedRegionName.length : 0;
  size += tablename != null ? tablename.toBytes().length : 0;
  if (clusterIds != null) {
   size += 16 * clusterIds.size();
  }
  if (nonceGroup != HConstants.NO_NONCE) {
   size += Bytes.SIZEOF_LONG; // nonce group
  }
  if (nonce != HConstants.NO_NONCE) {
   size += Bytes.SIZEOF_LONG; // nonce
  }
  if (replicationScope != null) {
   for (Map.Entry<byte[], Integer> scope: replicationScope.entrySet()) {
    size += scope.getKey().length;
    size += Bytes.SIZEOF_INT;
   }
  }
  size += Bytes.SIZEOF_LONG; // sequence number
  size += Bytes.SIZEOF_LONG; // write time
  if (origLogSeqNum > 0) {
   size += Bytes.SIZEOF_LONG; // original sequence number
  }
  return size;
 }
}

代码示例来源:origin: apache/hbase

@Override
public void write(DataOutput out) throws IOException {
 byte[] name = this.tableName.toBytes();
 out.writeInt(name.length);
 out.write(name);
 out.writeInt(startRow);
 out.writeInt(rows);
 out.writeInt(totalRows);
 out.writeInt(clients);
 out.writeBoolean(flushCommits);
 out.writeBoolean(writeToWAL);
 out.writeBoolean(useTags);
 out.writeInt(noOfTags);
}

代码示例来源:origin: apache/hbase

@Test
public void testFamilyWithAndWithoutColon() throws Exception {
 byte[] cf = Bytes.toBytes(COLUMN_FAMILY);
 this.region = initHRegion(tableName, method, CONF, cf);
 Put p = new Put(tableName.toBytes());
 byte[] cfwithcolon = Bytes.toBytes(COLUMN_FAMILY + ":");
 p.addColumn(cfwithcolon, cfwithcolon, cfwithcolon);
 boolean exception = false;
 try {
  this.region.put(p);
 } catch (NoSuchColumnFamilyException e) {
  exception = true;
 }
 assertTrue(exception);
}

代码示例来源:origin: apache/hbase

@Test
public void verifyBulkLoadEvent() throws IOException {
 TableName tableName = TableName.valueOf("test", "test");
 List<Pair<byte[], String>> familyPaths = withFamilyPathsFor(family1);
 byte[] familyName = familyPaths.get(0).getFirst();
 String storeFileName = familyPaths.get(0).getSecond();
 storeFileName = (new Path(storeFileName)).getName();
 List<String> storeFileNames = new ArrayList<>();
 storeFileNames.add(storeFileName);
 when(log.append(any(), any(),
     argThat(bulkLogWalEdit(WALEdit.BULK_LOAD, tableName.toBytes(),
         familyName, storeFileNames)),
     anyBoolean())).thenAnswer(new Answer() {
      @Override
      public Object answer(InvocationOnMock invocation) {
       WALKeyImpl walKey = invocation.getArgument(1);
       MultiVersionConcurrencyControl mvcc = walKey.getMvcc();
       if (mvcc != null) {
        MultiVersionConcurrencyControl.WriteEntry we = mvcc.begin();
        walKey.setWriteEntry(we);
       }
       return 01L;
      }
 });
 testRegionWithFamiliesAndSpecifiedTableName(tableName, family1)
   .bulkLoadHFiles(familyPaths, false, null);
 verify(log).sync(anyLong());
}

代码示例来源:origin: apache/hbase

public static CompactionDescriptor toCompactionDescriptor(
  org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName,
  byte[] family, List<Path> inputPaths, List<Path> outputPaths, Path storeDir) {
 // compaction descriptor contains relative paths.
 // input / output paths are relative to the store dir
 // store dir is relative to region dir
 CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder()
   .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes()))
   .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(
    regionName == null ? info.getEncodedNameAsBytes() : regionName))
   .setFamilyName(UnsafeByteOperations.unsafeWrap(family))
   .setStoreHomeDir(storeDir.getName()); //make relative
 for (Path inputPath : inputPaths) {
  builder.addCompactionInput(inputPath.getName()); //relative path
 }
 for (Path outputPath : outputPaths) {
  builder.addCompactionOutput(outputPath.getName());
 }
 builder.setRegionName(UnsafeByteOperations.unsafeWrap(info.getRegionName()));
 return builder.build();
}

代码示例来源:origin: apache/hbase

RegionInfo info, WALKey logKey, WALEdit logEdit) throws IOException {
if (!Bytes.equals(info.getTable().toBytes(), this.tableName)) {
 return;

代码示例来源:origin: apache/hbase

private static void appendRegionEvent(Writer w, String region) throws IOException {
 WALProtos.RegionEventDescriptor regionOpenDesc = ProtobufUtil.toRegionEventDescriptor(
   WALProtos.RegionEventDescriptor.EventType.REGION_OPEN,
   TABLE_NAME.toBytes(),
   Bytes.toBytes(region),
   Bytes.toBytes(String.valueOf(region.hashCode())),
   1,
   ServerName.parseServerName("ServerName:9099"), ImmutableMap.<byte[], List<Path>>of());
 final long time = EnvironmentEdgeManager.currentTime();
 KeyValue kv = new KeyValue(Bytes.toBytes(region), WALEdit.METAFAMILY, WALEdit.REGION_EVENT,
   time, regionOpenDesc.toByteArray());
 final WALKeyImpl walKey = new WALKeyImpl(Bytes.toBytes(region), TABLE_NAME, 1, time,
   HConstants.DEFAULT_CLUSTER_ID);
 w.append(
   new Entry(walKey, new WALEdit().add(kv)));
 w.sync(false);
}

代码示例来源:origin: apache/hbase

@Test
public void testSuperSimple() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 Table ht = TEST_UTIL.createTable(tableName, FAMILY);
 Put put = new Put(ROW);
 put.addColumn(FAMILY, QUALIFIER, VALUE);
 ht.put(put);
 Scan scan = new Scan();
 scan.addColumn(FAMILY, tableName.toBytes());
 ResultScanner scanner = ht.getScanner(scan);
 Result result = scanner.next();
 assertTrue("Expected null result", result == null);
 scanner.close();
}

代码示例来源:origin: apache/hbase

/**
 * Test that we get the expected flush results back
 */
@Test
public void testFlushResult() throws IOException {
 byte[] family = Bytes.toBytes("family");
 this.region = initHRegion(tableName, method, family);
 // empty memstore, flush doesn't run
 HRegion.FlushResult fr = region.flush(true);
 assertFalse(fr.isFlushSucceeded());
 assertFalse(fr.isCompactionNeeded());
 // Flush enough files to get up to the threshold, doesn't need compactions
 for (int i = 0; i < 2; i++) {
  Put put = new Put(tableName.toBytes()).addColumn(family, family, tableName.toBytes());
  region.put(put);
  fr = region.flush(true);
  assertTrue(fr.isFlushSucceeded());
  assertFalse(fr.isCompactionNeeded());
 }
 // Two flushes after the threshold, compactions are needed
 for (int i = 0; i < 2; i++) {
  Put put = new Put(tableName.toBytes()).addColumn(family, family, tableName.toBytes());
  region.put(put);
  fr = region.flush(true);
  assertTrue(fr.isFlushSucceeded());
  assertTrue(fr.isCompactionNeeded());
 }
}

代码示例来源:origin: apache/hbase

@Before
public void before() throws Exception  {
 Admin admin = TEST_UTIL.getAdmin();
 if (admin.tableExists(TABLE)) {
  if (admin.isTableEnabled(TABLE)) admin.disableTable(TABLE);
  admin.deleteTable(TABLE);
 }
 HTableDescriptor htd = new HTableDescriptor(TABLE);
 htd.addFamily(new HColumnDescriptor(COLUMN_1).setMaxVersions(3));
 htd.addFamily(new HColumnDescriptor(COLUMN_2).setMaxVersions(3));
 htd.addFamily(new HColumnDescriptor(COLUMN_3).setMaxVersions(3));
 admin.createTable(htd);
 try (Table table = TEST_UTIL.getConnection().getTable(TABLE)) {
  Put put = new Put(ROW_1);
  put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, VALUE_1);
  table.put(put);
  put = new Put(ROW_2);
  put.addColumn(COLUMN_1, QUALIFIER_1, TS_1, VALUE_1);
  put.addColumn(COLUMN_1, QUALIFIER_1, TS_2, VALUE_2);
  put.addColumn(COLUMN_2, QUALIFIER_2, TS_2, VALUE_2);
  table.put(put);
 }
 remoteTable = new RemoteHTable(
  new Client(new Cluster().add("localhost",
    REST_TEST_UTIL.getServletPort())),
   TEST_UTIL.getConfiguration(), TABLE.toBytes());
}

代码示例来源:origin: apache/hbase

assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes()));
assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),
 hri.getEncodedNameAsBytes()));

代码示例来源:origin: apache/hbase

assertTrue(Bytes.equals(desc.getTableName().toByteArray(), htd.getTableName().toBytes()));
assertTrue(Bytes.equals(desc.getEncodedRegionName().toByteArray(),
 hri.getEncodedNameAsBytes()));

代码示例来源:origin: apache/hbase

private static void appendCompactionEvent(Writer w, RegionInfo hri, String[] inputs,
  String output) throws IOException {
 WALProtos.CompactionDescriptor.Builder desc = WALProtos.CompactionDescriptor.newBuilder();
 desc.setTableName(ByteString.copyFrom(hri.getTable().toBytes()))
   .setEncodedRegionName(ByteString.copyFrom(hri.getEncodedNameAsBytes()))
   .setRegionName(ByteString.copyFrom(hri.getRegionName()))
   .setFamilyName(ByteString.copyFrom(FAMILY))
   .setStoreHomeDir(hri.getEncodedName() + "/" + Bytes.toString(FAMILY))
   .addAllCompactionInput(Arrays.asList(inputs))
   .addCompactionOutput(output);
 WALEdit edit = WALEdit.createCompaction(hri, desc.build());
 WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), TABLE_NAME, 1,
   EnvironmentEdgeManager.currentTime(), HConstants.DEFAULT_CLUSTER_ID);
 w.append(new Entry(key, edit));
 w.sync(false);
}

代码示例来源:origin: org.apache.hbase/hbase-client

public static CompactionDescriptor toCompactionDescriptor(
  org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName,
  byte[] family, List<Path> inputPaths, List<Path> outputPaths, Path storeDir) {
 // compaction descriptor contains relative paths.
 // input / output paths are relative to the store dir
 // store dir is relative to region dir
 CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder()
   .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes()))
   .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(
    regionName == null ? info.getEncodedNameAsBytes() : regionName))
   .setFamilyName(UnsafeByteOperations.unsafeWrap(family))
   .setStoreHomeDir(storeDir.getName()); //make relative
 for (Path inputPath : inputPaths) {
  builder.addCompactionInput(inputPath.getName()); //relative path
 }
 for (Path outputPath : outputPaths) {
  builder.addCompactionOutput(outputPath.getName());
 }
 builder.setRegionName(UnsafeByteOperations.unsafeWrap(info.getRegionName()));
 return builder.build();
}

代码示例来源:origin: apache/phoenix

assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER "
    + Bytes.toString(MetaDataUtil.getViewIndexPhysicalName(
      SchemaUtil.getPhysicalTableName(Bytes.toBytes(tableName), isNamespaceMapped).toBytes()))
    + " [-9223372036854775808,'" + tenantId + "','f']\n" + "    SERVER FILTER BY FIRST KEY ONLY",
    QueryUtil.getExplainPlan(rs));

代码示例来源:origin: harbby/presto-connectors

/**
 * Get current table name of the region
 * @return byte array of table name
 * @deprecated As of release 0.96
 *             (<a href="https://issues.apache.org/jira/browse/HBASE-9508">HBASE-9508</a>).
 *             This will be removed in HBase 2.0.0. Use {@link #getTable()}.
 */
@Deprecated
public byte [] getTableName() {
 return getTable().toBytes();
}

代码示例来源:origin: com.aliyun.hbase/alihbase-rest

@Override
public void write(DataOutput out) throws IOException {
 byte[] name = this.tableName.toBytes();
 out.writeInt(name.length);
 out.write(name);
 out.writeInt(startRow);
 out.writeInt(rows);
 out.writeInt(totalRows);
 out.writeInt(clients);
 out.writeBoolean(flushCommits);
 out.writeBoolean(writeToWAL);
 out.writeBoolean(useTags);
 out.writeInt(noOfTags);
}

代码示例来源:origin: org.apache.hbase/hbase-server

@Test
public void testSuperSimple() throws Exception {
 final TableName tableName = TableName.valueOf(name.getMethodName());
 Table ht = TEST_UTIL.createTable(tableName, FAMILY);
 Put put = new Put(ROW);
 put.addColumn(FAMILY, QUALIFIER, VALUE);
 ht.put(put);
 Scan scan = new Scan();
 scan.addColumn(FAMILY, tableName.toBytes());
 ResultScanner scanner = ht.getScanner(scan);
 Result result = scanner.next();
 assertTrue("Expected null result", result == null);
 scanner.close();
}

代码示例来源:origin: org.apache.hbase/hbase-server

private static void appendCompactionEvent(Writer w, RegionInfo hri, String[] inputs,
  String output) throws IOException {
 WALProtos.CompactionDescriptor.Builder desc = WALProtos.CompactionDescriptor.newBuilder();
 desc.setTableName(ByteString.copyFrom(hri.getTable().toBytes()))
   .setEncodedRegionName(ByteString.copyFrom(hri.getEncodedNameAsBytes()))
   .setRegionName(ByteString.copyFrom(hri.getRegionName()))
   .setFamilyName(ByteString.copyFrom(FAMILY))
   .setStoreHomeDir(hri.getEncodedName() + "/" + Bytes.toString(FAMILY))
   .addAllCompactionInput(Arrays.asList(inputs))
   .addCompactionOutput(output);
 WALEdit edit = WALEdit.createCompaction(hri, desc.build());
 WALKeyImpl key = new WALKeyImpl(hri.getEncodedNameAsBytes(), TABLE_NAME, 1,
   EnvironmentEdgeManager.currentTime(), HConstants.DEFAULT_CLUSTER_ID);
 w.append(new Entry(key, edit));
 w.sync(false);
}

相关文章