本文整理了Java中org.apache.hadoop.hive.metastore.api.Partition.setDbName()
方法的一些代码示例,展示了Partition.setDbName()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.setDbName()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Partition
类名称:Partition
方法名:setDbName
暂无
代码示例来源:origin: apache/hive
@Override
public void setDbName(String dbName) {
partitionSpec.setDbName(dbName);
for (Partition partition : partitionSpec.getPartitionList().getPartitions()) {
partition.setDbName(dbName);
}
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAddPartitionsNullDb() throws Exception {
createTable();
Partition partition1 = buildPartition(DB_NAME, TABLE_NAME, "2016");
Partition partition2 = buildPartition(DB_NAME, TABLE_NAME, "2017");
partition2.setDbName(null);
List<Partition> partitions = new ArrayList<>();
partitions.add(partition1);
partitions.add(partition2);
client.add_partitions(partitions);
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAlterPartitionsChangeDbName() throws Exception {
createTable4PartColsParts(client);
List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
Partition p = partitions.get(3);
p.setDbName(DB_NAME+"_changed");
client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testRenamePartitionNullDbInPartition() throws Exception {
List<List<String>> oldValues = createTable4PartColsParts(client);
List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
Partition partToRename = oldParts.get(3);
partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
partToRename.setDbName(null);
client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"),
partToRename);
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testRenamePartitionChangeDbName() throws Exception {
List<List<String>> oldValues = createTable4PartColsParts(client);
List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
Partition partToRename = oldParts.get(3);
partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
partToRename.setDbName(DB_NAME + "_2");
client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAlterPartitionChangeDbName() throws Exception {
createTable4PartColsParts(client);
List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
Partition partition = partitions.get(3);
partition.setDbName(DB_NAME+"_changed");
client.alter_partition(DB_NAME, TABLE_NAME, partition);
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAlterPartitionsWithEnvironmentCtxChangeDbName() throws Exception {
createTable4PartColsParts(client);
List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
Partition p = partitions.get(3);
p.setDbName(DB_NAME+"_changed");
client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAlterPartitionWithEnvironmentCtxChangeDbName() throws Exception {
createTable4PartColsParts(client);
List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
Partition partition = partitions.get(3);
partition.setDbName(DB_NAME+"_changed");
client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
}
代码示例来源:origin: apache/hive
Partition toHivePartition() throws HCatException {
Partition hivePtn = new Partition();
hivePtn.setDbName(dbName);
hivePtn.setTableName(tableName);
hivePtn.setValues(values);
hivePtn.setParameters(parameters);
if (sd.getLocation() == null) {
LOG.warn("Partition location is not set! Attempting to construct default partition location.");
try {
String partName = Warehouse.makePartName(HCatSchemaUtils.getFieldSchemas(hcatTable.getPartCols()), values);
sd.setLocation(new Path(hcatTable.getSd().getLocation(), partName).toString());
}
catch(MetaException exception) {
throw new HCatException("Could not construct default partition-path for "
+ hcatTable.getDbName() + "." + hcatTable.getTableName() + "[" + values + "]");
}
}
hivePtn.setSd(sd);
hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000));
hivePtn.setLastAccessTimeIsSet(false);
return hivePtn;
}
代码示例来源:origin: apache/incubator-gobblin
private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
try {
Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
Long.toString(this.hiveCopyEntityHelper.getStartTime()));
targetPartition.setLocation(targetLocation.toString());
targetPartition.getTPartition().unsetCreateTime();
return targetPartition;
} catch (HiveException he) {
throw new IOException(he);
}
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAddPartitionSpecNoDBAndTableInPartition() throws Exception {
createTable();
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
partition.setDbName(null);
partition.setTableName(null);
PartitionSpecProxy partitionSpecProxy =
buildPartitionSpec(DB_NAME, TABLE_NAME, null, Lists.newArrayList(partition));
client.add_partitions_pspec(partitionSpecProxy);
}
代码示例来源:origin: prestodb/presto
public static org.apache.hadoop.hive.metastore.api.Partition toMetastoreApiPartition(Partition partition)
{
org.apache.hadoop.hive.metastore.api.Partition result = new org.apache.hadoop.hive.metastore.api.Partition();
result.setDbName(partition.getDatabaseName());
result.setTableName(partition.getTableName());
result.setValues(partition.getValues());
result.setSd(makeStorageDescriptor(partition.getTableName(), partition.getColumns(), partition.getStorage()));
result.setParameters(partition.getParameters());
return result;
}
代码示例来源:origin: apache/hive
private static Partition createPtn(Table t, List<String> pvals) {
Partition ptn = new Partition();
ptn.setDbName(t.getDbName());
ptn.setTableName(t.getTableName());
ptn.setValues(pvals);
return ptn;
}
代码示例来源:origin: apache/incubator-gobblin
/**
* Convert a {@link HivePartition} into a {@link Partition}.
*/
public static Partition getPartition(HivePartition hivePartition) {
State props = hivePartition.getProps();
Partition partition = new Partition();
partition.setDbName(hivePartition.getDbName());
partition.setTableName(hivePartition.getTableName());
partition.setValues(hivePartition.getValues());
partition.setParameters(getParameters(props));
if (hivePartition.getCreateTime().isPresent()) {
partition.setCreateTime(Ints.checkedCast(hivePartition.getCreateTime().get()));
} else if (props.contains(HiveConstants.CREATE_TIME)) {
partition.setCreateTime(props.getPropAsInt(HiveConstants.CREATE_TIME));
}
if (props.contains(HiveConstants.LAST_ACCESS_TIME)) {
partition.setLastAccessTime(props.getPropAsInt(HiveConstants.LAST_ACCESS_TIME));
}
partition.setSd(getStorageDescriptor(hivePartition));
return partition;
}
代码示例来源:origin: apache/hive
@Test
public void testAddPartitionSpecDBAndTableSetFromSpecProxy() throws Exception {
createTable();
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
partition.setDbName(null);
partition.setTableName(null);
PartitionSpecProxy partitionSpecProxy =
buildPartitionSpec(null, null, null, Lists.newArrayList(partition));
partitionSpecProxy.setDbName(DB_NAME);
partitionSpecProxy.setTableName(TABLE_NAME);
client.add_partitions_pspec(partitionSpecProxy);
Partition resultPart =
client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList(DEFAULT_YEAR_VALUE));
Assert.assertNotNull(resultPart);
}
代码示例来源:origin: apache/hive
protected Partition newPartition(Table t, String value, List<Order> sortCols) throws Exception {
Partition part = new Partition();
part.addToValues(value);
part.setDbName(t.getDbName());
part.setTableName(t.getTableName());
part.setSd(newStorageDescriptor(getLocation(t.getTableName(), value), sortCols));
part.setParameters(new HashMap<String, String>());
ms.add_partition(part);
return part;
}
代码示例来源:origin: apache/storm
private static void addPartition(IMetaStoreClient client, Table tbl
, List<String> partValues)
throws IOException, TException {
Partition part = new Partition();
part.setDbName(tbl.getDbName());
part.setTableName(tbl.getTableName());
StorageDescriptor sd = new StorageDescriptor(tbl.getSd());
sd.setLocation(sd.getLocation() + Path.SEPARATOR + makePartPath(tbl.getPartitionKeys(), partValues));
part.setSd(sd);
part.setValues(partValues);
client.add_partition(part);
}
代码示例来源:origin: apache/hive
private void add_partition(HiveMetaStoreClient client, Table table,
List<String> vals, String location) throws TException {
Partition part = new Partition();
part.setDbName(table.getDbName());
part.setTableName(table.getTableName());
part.setValues(vals);
part.setParameters(new HashMap<>());
part.setSd(table.getSd().deepCopy());
part.getSd().setSerdeInfo(table.getSd().getSerdeInfo());
part.getSd().setLocation(table.getSd().getLocation() + location);
client.add_partition(part);
}
代码示例来源:origin: apache/hive
private static Partition makePartitionObject(String dbName, String tblName,
List<String> ptnVals, Table tbl, String ptnLocationSuffix) throws MetaException {
Partition part4 = new Partition();
part4.setDbName(dbName);
part4.setTableName(tblName);
part4.setValues(ptnVals);
part4.setParameters(new HashMap<>());
part4.setSd(tbl.getSd().deepCopy());
part4.getSd().setSerdeInfo(tbl.getSd().getSerdeInfo().deepCopy());
part4.getSd().setLocation(tbl.getSd().getLocation() + ptnLocationSuffix);
MetaStoreServerUtils.updatePartitionStatsFast(part4, tbl, warehouse, false, false, null, true);
return part4;
}
代码示例来源:origin: apache/hive
private void addPartition(HiveMetaStoreClient client, Table table,
List<String> vals, String location) throws TException {
Partition part = new Partition();
part.setDbName(table.getDbName());
part.setTableName(table.getTableName());
part.setValues(vals);
part.setParameters(new HashMap<String, String>());
part.setSd(table.getSd().deepCopy());
part.getSd().setSerdeInfo(table.getSd().getSerdeInfo());
part.getSd().setLocation(table.getSd().getLocation() + location);
client.add_partition(part);
}
}
内容来源于网络,如有侵权,请联系作者删除!