org.apache.hadoop.hbase.client.Table.batch()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(11.5k)|赞(0)|评价(0)|浏览(103)

本文整理了Java中org.apache.hadoop.hbase.client.Table.batch()方法的一些代码示例,展示了Table.batch()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.batch()方法的具体详情如下:
包路径:org.apache.hadoop.hbase.client.Table
类名称:Table
方法名:batch

Table.batch介绍

[英]Same as #batch(List,Object[]), but returns an array of results instead of using a results parameter reference.
[中]与#batch(List,Object[])相同,但返回结果数组,而不是使用结果参数引用。

代码示例

代码示例来源:origin: thinkaurelius/titan

@Override
public void batch(List<Row> writes, Object[] results) throws IOException, InterruptedException
{
  table.batch(writes, results);
  /* table.flushCommits(); not needed anymore */
}

代码示例来源:origin: apache/hbase

/**
 * Execute the passed <code>mutations</code> against <code>hbase:meta</code> table.
 * @param connection connection we're using
 * @param mutations Puts and Deletes to execute on hbase:meta
 * @throws IOException
 */
public static void mutateMetaTable(final Connection connection,
                  final List<Mutation> mutations)
 throws IOException {
 Table t = getMetaHTable(connection);
 try {
  debugLogMutations(mutations);
  t.batch(mutations, null);
 } catch (InterruptedException e) {
  InterruptedIOException ie = new InterruptedIOException(e.getMessage());
  ie.initCause(e);
  throw ie;
 } finally {
  t.close();
 }
}

代码示例来源:origin: apache/hbase

/**
 * Do the changes and handle the pool
 * @param tableName table to insert into
 * @param allRows list of actions
 * @throws IOException
 */
private void batch(TableName tableName, Collection<List<Row>> allRows) throws IOException {
 if (allRows.isEmpty()) {
  return;
 }
 Connection connection = getConnection();
 try (Table table = connection.getTable(tableName)) {
  for (List<Row> rows : allRows) {
   table.batch(rows, null);
  }
 } catch (RetriesExhaustedWithDetailsException rewde) {
  for (Throwable ex : rewde.getCauses()) {
   if (ex instanceof TableNotFoundException) {
    throw new TableNotFoundException("'" + tableName + "'");
   }
  }
  throw rewde;
 } catch (InterruptedException ix) {
  throw (InterruptedIOException) new InterruptedIOException().initCause(ix);
 }
}

代码示例来源:origin: apache/hbase

private void testAppend(Increment inc) throws Exception {
 checkResult(table.increment(inc));
 List<Row> actions = Arrays.asList(inc, inc);
 Object[] results = new Object[actions.size()];
 table.batch(actions, results);
 checkResult(results);
}

代码示例来源:origin: apache/hbase

private void testAppend(Append append) throws Exception {
 checkResult(table.append(append));
 List<Row> actions = Arrays.asList(append, append);
 Object[] results = new Object[actions.size()];
 table.batch(actions, results);
 checkResult(results);
}

代码示例来源:origin: apache/hbase

@Override
 public void prePut(final ObserverContext<RegionCoprocessorEnvironment> e, final Put put,
   final WALEdit edit, final Durability durability) throws IOException {
  try (Table table = e.getEnvironment().getConnection().getTable(otherTable, getPool())) {
   Put p = new Put(new byte[]{'a'});
   p.addColumn(family, null, new byte[]{'a'});
   try {
    table.batch(Collections.singletonList(put), null);
   } catch (InterruptedException e1) {
    throw new IOException(e1);
   }
   completedWithPool[0] = true;
  }
 }
}

代码示例来源:origin: apache/hbase

table.batch(puts, results);
table.batch(gets, multiRes);

代码示例来源:origin: apache/hbase

@Test
public void testHTableBatchWithEmptyPut ()throws Exception {
  Table table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()),
    new byte[][] { FAMILY });
 try {
  List actions = (List) new ArrayList();
  Object[] results = new Object[2];
  // create an empty Put
  Put put1 = new Put(ROW);
  actions.add(put1);
  Put put2 = new Put(ANOTHERROW);
  put2.addColumn(FAMILY, QUALIFIER, VALUE);
  actions.add(put2);
  table.batch(actions, results);
  fail("Empty Put should have failed the batch call");
 } catch (IllegalArgumentException iae) {
 } finally {
  table.close();
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testHTableWithLargeBatch() throws Exception {
 Table table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()),
   new byte[][] { FAMILY });
 int sixtyFourK = 64 * 1024;
 try {
  List actions = new ArrayList();
  Object[] results = new Object[(sixtyFourK + 1) * 2];
  for (int i = 0; i < sixtyFourK + 1; i ++) {
   Put put1 = new Put(ROW);
   put1.addColumn(FAMILY, QUALIFIER, VALUE);
   actions.add(put1);
   Put put2 = new Put(ANOTHERROW);
   put2.addColumn(FAMILY, QUALIFIER, VALUE);
   actions.add(put2);
  }
  table.batch(actions, results);
 } finally {
  table.close();
 }
}

代码示例来源:origin: apache/hbase

@Test
public void testBadFam() throws Exception {
 LOG.info("test=testBadFam");
 Table table = UTIL.getConnection().getTable(TEST_TABLE);
 List<Row> actions = new ArrayList<>();
 Put p = new Put(Bytes.toBytes("row1"));
 p.addColumn(Bytes.toBytes("bad_family"), Bytes.toBytes("qual"), Bytes.toBytes("value"));
 actions.add(p);
 p = new Put(Bytes.toBytes("row2"));
 p.addColumn(BYTES_FAMILY, Bytes.toBytes("qual"), Bytes.toBytes("value"));
 actions.add(p);
 // row1 and row2 should be in the same region.
 Object [] r = new Object[actions.size()];
 try {
  table.batch(actions, r);
  fail();
 } catch (RetriesExhaustedWithDetailsException ex) {
  LOG.debug(ex.toString(), ex);
  // good!
  assertFalse(ex.mayHaveClusterIssues());
 }
 assertEquals(2, r.length);
 assertTrue(r[0] instanceof Throwable);
 assertTrue(r[1] instanceof Result);
 table.close();
}

代码示例来源:origin: apache/hbase

new Put(ROW).addColumn(FAMILY, QUALIFIERS[0], VALUE)));
Object[] batchResult = new Object[1];
t.batch(Arrays.asList(arm), batchResult);
 new Put(ROW).addColumn(FAMILY, QUALIFIERS[1], VALUE),
 new Delete(ROW).addColumns(FAMILY, QUALIFIERS[0])));
t.batch(Arrays.asList(arm), batchResult);
r = t.get(g);
assertEquals(0, Bytes.compareTo(VALUE, r.getValue(FAMILY, QUALIFIERS[1])));
 arm = RowMutations.of(Collections.singletonList(
  new Put(ROW).addColumn(new byte[]{'b', 'o', 'g', 'u', 's'}, QUALIFIERS[0], VALUE)));
 t.batch(Arrays.asList(arm), batchResult);
 fail("Expected RetriesExhaustedWithDetailsException with NoSuchColumnFamilyException");
} catch(RetriesExhaustedWithDetailsException e) {

代码示例来源:origin: apache/hbase

@Test
public void testBatchWithDelete() throws Exception {
 LOG.info("test=testBatchWithDelete");
 Table table = UTIL.getConnection().getTable(TEST_TABLE);
 // Load some data
 List<Put> puts = constructPutRequests();
 Object[] results = new Object[puts.size()];
 table.batch(puts, results);
 validateSizeAndEmpty(results, KEYS.length);
 // Deletes
 List<Row> deletes = new ArrayList<>();
 for (int i = 0; i < KEYS.length; i++) {
  Delete delete = new Delete(KEYS[i]);
  delete.addFamily(BYTES_FAMILY);
  deletes.add(delete);
 }
 results= new Object[deletes.size()];
 table.batch(deletes, results);
 validateSizeAndEmpty(results, KEYS.length);
 // Get to make sure ...
 for (byte[] k : KEYS) {
  Get get = new Get(k);
  get.addColumn(BYTES_FAMILY, QUALIFIER);
  Assert.assertFalse(table.exists(get));
 }
 table.close();
}

代码示例来源:origin: apache/hbase

@Test
public void testBatchAppendWithReturnResultFalse() throws Exception {
 LOG.info("Starting testBatchAppendWithReturnResultFalse");
 final TableName tableName = TableName.valueOf(name.getMethodName());
 Table table = TEST_UTIL.createTable(tableName, FAMILY);
 Append append1 = new Append(Bytes.toBytes("row1"));
 append1.setReturnResults(false);
 append1.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value1"));
 Append append2 = new Append(Bytes.toBytes("row1"));
 append2.setReturnResults(false);
 append2.addColumn(FAMILY, Bytes.toBytes("f1"), Bytes.toBytes("value2"));
 List<Append> appends = new ArrayList<>();
 appends.add(append1);
 appends.add(append2);
 Object[] results = new Object[2];
 table.batch(appends, results);
 assertTrue(results.length == 2);
 for(Object r : results) {
  Result result = (Result)r;
  assertTrue(result.isEmpty());
 }
 table.close();
}

代码示例来源:origin: apache/hbase

table.batch(actions, multiRes);
validateResult(multiRes[1], QUAL1, Bytes.toBytes("abcdef"));
validateResult(multiRes[1], QUAL4, Bytes.toBytes("xyz"));

代码示例来源:origin: apache/hbase

@Test
public void testBatchIncrementsWithReturnResultFalse() throws Exception {
 LOG.info("Starting testBatchIncrementsWithReturnResultFalse");
 final TableName tableName = TableName.valueOf(name.getMethodName());
 Table table = TEST_UTIL.createTable(tableName, FAMILY);
 Increment inc1 = new Increment(Bytes.toBytes("row2"));
 inc1.setReturnResults(false);
 inc1.addColumn(FAMILY, Bytes.toBytes("f1"), 1);
 Increment inc2 = new Increment(Bytes.toBytes("row2"));
 inc2.setReturnResults(false);
 inc2.addColumn(FAMILY, Bytes.toBytes("f1"), 1);
 List<Increment> incs = new ArrayList<>();
 incs.add(inc1);
 incs.add(inc2);
 Object[] results = new Object[2];
 table.batch(incs, results);
 assertTrue(results.length == 2);
 for(Object r : results) {
  Result result = (Result)r;
  assertTrue(result.isEmpty());
 }
 table.close();
}

代码示例来源:origin: apache/hbase

@Test
public void testHTableDeleteWithList() throws Exception {
 LOG.info("test=testHTableDeleteWithList");
 Table table = UTIL.getConnection().getTable(TEST_TABLE);
 // Load some data
 List<Put> puts = constructPutRequests();
 Object[] results = new Object[puts.size()];
 table.batch(puts, results);
 validateSizeAndEmpty(results, KEYS.length);
 // Deletes
 ArrayList<Delete> deletes = new ArrayList<>();
 for (int i = 0; i < KEYS.length; i++) {
  Delete delete = new Delete(KEYS[i]);
  delete.addFamily(BYTES_FAMILY);
  deletes.add(delete);
 }
 table.delete(deletes);
 Assert.assertTrue(deletes.isEmpty());
 // Get to make sure ...
 for (byte[] k : KEYS) {
  Get get = new Get(k);
  get.addColumn(BYTES_FAMILY, QUALIFIER);
  Assert.assertFalse(table.exists(get));
 }
 table.close();
}

代码示例来源:origin: apache/hbase

@Test
public void testBatchWithPut() throws Exception {
 LOG.info("test=testBatchWithPut");
 Table table = CONNECTION.getTable(TEST_TABLE);
 // put multiple rows using a batch
 List<Put> puts = constructPutRequests();
 Object[] results = new Object[puts.size()];
 table.batch(puts, results);
 validateSizeAndEmpty(results, KEYS.length);
 if (true) {
  int liveRScount = UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().size();
  assert liveRScount > 0;
  JVMClusterUtil.RegionServerThread liveRS =
   UTIL.getMiniHBaseCluster().getLiveRegionServerThreads().get(0);
  liveRS.getRegionServer().abort("Aborting for tests", new Exception("testBatchWithPut"));
  puts = constructPutRequests();
  try {
   results = new Object[puts.size()];
   table.batch(puts, results);
  } catch (RetriesExhaustedWithDetailsException ree) {
   LOG.info(ree.getExhaustiveDescription());
   table.close();
   throw ree;
  }
  validateSizeAndEmpty(results, KEYS.length);
 }
 validateLoadedData(table);
 table.close();
}

代码示例来源:origin: apache/hbase

Object[] objs = new Object[batches.size()];
try (Table table = TEST_UTIL.getConnection().getTable(TABLE_NAME)) {
 table.batch(batches, objs);
 fail("Where is the exception? We put the malformed cells!!!");
} catch (RetriesExhaustedWithDetailsException e) {

代码示例来源:origin: apache/hbase

/**
 * This is for testing the active number of threads that were used while
 * doing a batch operation. It inserts one row per region via the batch
 * operation, and then checks the number of active threads.
 * <p/>
 * For HBASE-3553
 */
@Test
public void testActiveThreadsCount() throws Exception {
 UTIL.getConfiguration().setLong("hbase.htable.threads.coresize", slaves + 1);
 try (Connection connection = ConnectionFactory.createConnection(UTIL.getConfiguration())) {
  ThreadPoolExecutor executor = HTable.getDefaultExecutor(UTIL.getConfiguration());
  try {
   try (Table t = connection.getTable(TEST_TABLE, executor)) {
    List<Put> puts = constructPutRequests(); // creates a Put for every region
    t.batch(puts, null);
    HashSet<ServerName> regionservers = new HashSet<>();
    try (RegionLocator locator = connection.getRegionLocator(TEST_TABLE)) {
     for (Row r : puts) {
      HRegionLocation location = locator.getRegionLocation(r.getRow());
      regionservers.add(location.getServerName());
     }
    }
    assertEquals(regionservers.size(), executor.getLargestPoolSize());
   }
  } finally {
   executor.shutdownNow();
  }
 }
}

代码示例来源:origin: apache/hbase

Object[] results3 = new Object[actions.size()];
Object[] results1 = results3;
hTableInterface.batch(actions, results1);
assertEquals(MyObserver.tr2.getMin(), range2.getMin());
assertEquals(MyObserver.tr2.getMax(), range2.getMax());

相关文章