org.apache.hadoop.hive.ql.metadata.Table.setDbName()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(10.5k)|赞(0)|评价(0)|浏览(73)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.setDbName()方法的一些代码示例,展示了Table.setDbName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setDbName()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:setDbName

Table.setDbName介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
 try {
  Table targetTable = originTable.copy();
  targetTable.setDbName(this.targetDatabase);
  targetTable.setDataLocation(targetLocation);
  /*
   * Need to set the table owner as the flow executor
   */
  targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.startTime));
  targetTable.getTTable().unsetCreateTime();
  HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
  return targetTable;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: apache/incubator-gobblin

private CopyableDatasetRequestor getRequestor(String dbName, String tableName) {
  CopyableDatasetRequestor requestor = Mockito.mock(CopyableDatasetRequestor.class);
  HiveDataset dataset = Mockito.mock(HiveDataset.class);

  Table table = new Table(new org.apache.hadoop.hive.metastore.api.Table());
  table.setDbName(dbName);
  table.setTableName(tableName);

  Mockito.when(dataset.getTable()).thenReturn(table);
  Mockito.when(requestor.getDataset()).thenReturn(dataset);

  return requestor;
 }
}

代码示例来源:origin: apache/incubator-gobblin

private HiveTargetPathHelper createTestTargetPathHelper(Properties properties) {
 HiveDataset dataset = Mockito.mock(HiveDataset.class);
 Table table = new Table(new org.apache.hadoop.hive.metastore.api.Table());
 table.setDbName("dbName");
 table.setTableName("tableName");
 Mockito.when(dataset.getTable()).thenReturn(table);
 Mockito.when(dataset.getTableRootPath()).thenReturn(Optional.of(TABLE_ROOT));
 Mockito.when(dataset.getProperties()).thenReturn(properties);
 HiveTargetPathHelper helper = new HiveTargetPathHelper(dataset);
 return helper;
}

代码示例来源:origin: apache/hive

private static class ThreadLocalHive extends ThreadLocal<Hive> {
 @Override
 protected Hive initialValue() {
  return null;
 }
 @Override
 public synchronized void set(Hive hiveObj) {
  Hive currentHive = this.get();
  if (currentHive != hiveObj) {
   // Remove/close current thread-local Hive object before overwriting with new Hive object.
   remove();
   super.set(hiveObj);
  }
 }
 @Override
 public synchronized void remove() {
  Hive currentHive = this.get();
  if (currentHive != null) {
   // Close the metastore connections before removing it from thread local hiveDB.
   currentHive.close(false);
   super.remove();
  }
 }
}

代码示例来源:origin: apache/incubator-gobblin

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: apache/hive

table.setDbName(dbName);
table.setInputFormatClass(TextInputFormat.class);
table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);

代码示例来源:origin: apache/hive

@Test
public void testDataDeletion() throws HiveException,
 IOException, TException {
 Database db = new Database();
 db.setName(dbName);
 hive.createDatabase(db);
 Table table = new Table(dbName, tableName);
 table.setDbName(dbName);
 table.setInputFormatClass(TextInputFormat.class);
 table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
 table.setPartCols(partCols);
 hive.createTable(table);
 table = hive.getTable(dbName, tableName);
 Path fakeTable = table.getPath().getParent().suffix(
   Path.SEPARATOR + "faketable");
 fs = fakeTable.getFileSystem(hive.getConf());
 fs.mkdirs(fakeTable);
 fs.deleteOnExit(fakeTable);
 Path fakePart = new Path(table.getDataLocation().toString(),
   "fakepartition=fakevalue");
 fs.mkdirs(fakePart);
 fs.deleteOnExit(fakePart);
 hive.dropTable(dbName, tableName, true, true);
 assertFalse(fs.exists(fakePart));
 hive.dropDatabase(dbName);
 assertFalse(fs.exists(fakeTable));
}

代码示例来源:origin: apache/drill

tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName()));
 tbl.setTableName(Utilities.getTableName(alterTbl.getNewName()));
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {

代码示例来源:origin: apache/hive

tbl.setDbName(Utilities.getDatabaseName(alterTbl.getNewName()));
 tbl.setTableName(Utilities.getTableName(alterTbl.getNewName()));
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {

代码示例来源:origin: apache/hive

String[] names = Utilities.getDbTableName(targetTableName);
tbl.setDbName(names[0]);
tbl.setTableName(names[1]);

代码示例来源:origin: apache/drill

String[] names = Utilities.getDbTableName(targetTableName);
tbl.setDbName(names[0]);
tbl.setTableName(names[1]);

代码示例来源:origin: apache/drill

try {
 if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) {
  tbl.setDbName(SessionState.get().getCurrentDatabase());

代码示例来源:origin: apache/hive

table.setDbName(dbName);
table.setInputFormatClass(TextInputFormat.class);
table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);

代码示例来源:origin: apache/hive

private Table createTestTable() throws HiveException, AlreadyExistsException {
 Database db = new Database();
 db.setName(dbName);
 hive.createDatabase(db, true);
 Table table = new Table(dbName, tableName);
 table.setDbName(dbName);
 table.setInputFormatClass(TextInputFormat.class);
 table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
 table.setPartCols(partCols);
 hive.createTable(table);
 table = hive.getTable(dbName, tableName);
 Assert.assertTrue(table.getTTable().isSetId());
 table.getTTable().unsetId();
 for (Map<String, String> partSpec : parts) {
  hive.createPartition(table, partSpec);
 }
 return table;
}

代码示例来源:origin: apache/lens

/**
 * Creates the table.
 *
 * @param eventName the event name
 * @param className the class name
 * @return the table
 * @throws Exception the exception
 */
private Table createTable(String eventName, String className) throws Exception {
 Table tmp;
 try {
  Database db = new Database();
  db.setName(database);
  client.createDatabase(db, true);
  Class<LoggableLensStatistics> statisticsClass = (Class<LoggableLensStatistics>) Class.forName(className);
  LoggableLensStatistics stat = statisticsClass.newInstance();
  tmp = stat.getHiveTable(conf);
  tmp.setDbName(database);
  if (LOG.isDebugEnabled()) {
   LOG.debug("Creating table  " + tmp.getTableName());
  }
  client.createTable(tmp);
  tmp = client.getTable(database, eventName);
 } catch (Exception e1) {
  LOG.warn("Unable to create hive table, exiting", e1);
  throw e1;
 }
 return tmp;
}

代码示例来源:origin: com.linkedin.gobblin/gobblin-data-management

private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
 try {
  Table targetTable = originTable.copy();
  targetTable.setDbName(this.targetDatabase);
  targetTable.setDataLocation(targetLocation);
  /*
   * Need to set the table owner as the flow executor
   */
  targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.startTime));
  targetTable.getTTable().unsetCreateTime();
  HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
  return targetTable;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: org.apache.gobblin/gobblin-data-management

private Table getTargetTable(Table originTable, Path targetLocation) throws IOException {
 try {
  Table targetTable = originTable.copy();
  targetTable.setDbName(this.targetDatabase);
  targetTable.setDataLocation(targetLocation);
  /*
   * Need to set the table owner as the flow executor
   */
  targetTable.setOwner(UserGroupInformation.getCurrentUser().getShortUserName());
  targetTable.getTTable().putToParameters(HiveDataset.REGISTERER, GOBBLIN_DISTCP);
  targetTable.getTTable().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.startTime));
  targetTable.getTTable().unsetCreateTime();
  HiveAvroCopyEntityHelper.updateTableAttributesIfAvro(targetTable, this);
  return targetTable;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: com.linkedin.gobblin/gobblin-data-management

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: org.apache.gobblin/gobblin-data-management

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

try {
 if (tbl.getDbName() == null || "".equals(tbl.getDbName().trim())) {
  tbl.setDbName(getCurrentDatabase());

相关文章

微信公众号

最新文章

更多

Table类方法