本文整理了Java中org.apache.hadoop.hive.metastore.api.Partition.getSd()
方法的一些代码示例,展示了Partition.getSd()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.getSd()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Partition
类名称:Partition
方法名:getSd
暂无
代码示例来源:origin: apache/hive
public String getLocation() {
if (tPartition.getSd() == null) {
return null;
} else {
return tPartition.getSd().getLocation();
}
}
代码示例来源:origin: apache/hive
/**
* Get the storage descriptor for a compaction.
* @param t table from {@link #resolveTable(org.apache.hadoop.hive.metastore.txn.CompactionInfo)}
* @param p table from {@link #resolvePartition(org.apache.hadoop.hive.metastore.txn.CompactionInfo)}
* @return metastore storage descriptor.
*/
protected StorageDescriptor resolveStorageDescriptor(Table t, Partition p) {
return (p == null) ? t.getSd() : p.getSd();
}
代码示例来源:origin: apache/hive
/**
* @param inputFormatClass
*/
public void setInputFormatClass(Class<? extends InputFormat> inputFormatClass) {
this.inputFormatClass = inputFormatClass;
tPartition.getSd().setInputFormat(inputFormatClass.getName());
}
代码示例来源:origin: apache/hive
PartValEqWrapperLite(Partition partition) {
this.values = partition.isSetValues()? partition.getValues() : null;
if (partition.getSd() != null) {
this.location = partition.getSd().getLocation();
}
}
代码示例来源:origin: apache/hive
public void setSkewedValueLocationMap(List<String> valList, String dirName)
throws HiveException {
Map<List<String>, String> mappings = tPartition.getSd().getSkewedInfo()
.getSkewedColValueLocationMaps();
if (null == mappings) {
mappings = new HashMap<List<String>, String>();
tPartition.getSd().getSkewedInfo().setSkewedColValueLocationMaps(mappings);
}
// Add or update new mapping
mappings.put(valList, dirName);
}
代码示例来源:origin: apache/hive
public static String getPartitionInformation(Partition part) {
StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
// Table Metadata
tableInfo.append(LINE_DELIM).append("# Detailed Partition Information").append(LINE_DELIM);
getPartitionMetaDataInformation(tableInfo, part);
// Storage information.
if (part.getTable().getTableType() != TableType.VIRTUAL_VIEW) {
tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM);
getStorageDescriptorInfo(tableInfo, part.getTPartition().getSd());
}
return tableInfo.toString();
}
代码示例来源:origin: apache/hive
public static Properties getPartitionMetadata(
org.apache.hadoop.hive.metastore.api.Partition partition,
org.apache.hadoop.hive.metastore.api.Table table) {
return MetaStoreUtils
.getSchema(partition.getSd(), partition.getSd(), partition
.getParameters(), table.getDbName(), table.getTableName(),
table.getPartitionKeys());
}
代码示例来源:origin: prestodb/presto
public static Partition fromMetastoreApiPartition(org.apache.hadoop.hive.metastore.api.Partition partition)
{
StorageDescriptor storageDescriptor = partition.getSd();
if (storageDescriptor == null) {
throw new PrestoException(HIVE_INVALID_METADATA, "Partition does not contain a storage descriptor: " + partition);
}
Partition.Builder partitionBuilder = Partition.builder()
.setDatabaseName(partition.getDbName())
.setTableName(partition.getTableName())
.setValues(partition.getValues())
.setColumns(storageDescriptor.getCols().stream()
.map(ThriftMetastoreUtil::fromMetastoreApiFieldSchema)
.collect(toList()))
.setParameters(partition.getParameters());
fromMetastoreApiStorageDescriptor(storageDescriptor, partitionBuilder.getStorageBuilder(), format("%s.%s", partition.getTableName(), partition.getValues()));
return partitionBuilder.build();
}
代码示例来源:origin: prestodb/presto
private static List<String> listAllDataPaths(HiveMetastore metastore, String schemaName, String tableName)
{
ImmutableList.Builder<String> locations = ImmutableList.builder();
Table table = metastore.getTable(schemaName, tableName).get();
if (table.getSd().getLocation() != null) {
// For unpartitioned table, there should be nothing directly under this directory.
// But including this location in the set makes the directory content assert more
// extensive, which is desirable.
locations.add(table.getSd().getLocation());
}
Optional<List<String>> partitionNames = metastore.getPartitionNames(schemaName, tableName);
if (partitionNames.isPresent()) {
metastore.getPartitionsByNames(schemaName, tableName, partitionNames.get()).stream()
.map(partition -> partition.getSd().getLocation())
.filter(location -> !location.startsWith(table.getSd().getLocation()))
.forEach(locations::add);
}
return locations.build();
}
代码示例来源:origin: apache/hive
@Test
public void createOnPartitionTable() throws Exception {
helper.createPartitionIfNotExists(PARTITIONED_VALUES);
verify(mockClient).add_partition(partitionCaptor.capture());
Partition actual = partitionCaptor.getValue();
assertThat(actual.getSd().getLocation(), is(PARTITION_LOCATION));
assertThat(actual.getValues(), is(PARTITIONED_VALUES));
}
代码示例来源:origin: apache/hive
@Test
public void testDropPartitionDeleteParentDir() throws Exception {
client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[0].getValues(), true);
client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[1].getValues(), true);
List<Partition> droppedPartitions = Lists.newArrayList(PARTITIONS[0], PARTITIONS[1]);
List<Partition> remainingPartitions = Lists.newArrayList(PARTITIONS[2]);
checkPartitionsAfterDelete(TABLE_NAME, droppedPartitions, remainingPartitions, true, false);
Path parentPath = new Path(PARTITIONS[0].getSd().getLocation()).getParent();
Assert.assertFalse("The parent path '" + parentPath.toString() + "' should not exist.",
metaStore.isPathExists(parentPath));
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAddPartitionsNullColNameInSd() throws Exception {
createTable();
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
partition.getSd().getCols().get(0).setName(null);
client.add_partitions(Lists.newArrayList(partition));
}
代码示例来源:origin: apache/hive
@Test
public void testAddPartitionSpecForViewNullPartLocation() throws Exception {
String tableName = "test_add_partition_view";
createView(tableName);
Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE);
partition.getSd().setLocation(null);
PartitionSpecProxy partitionSpecProxy =
buildPartitionSpec(DB_NAME, tableName, null, Lists.newArrayList(partition));
client.add_partitions_pspec(partitionSpecProxy);
Partition part = client.getPartition(DB_NAME, tableName, "year=2017");
Assert.assertNull(part.getSd().getLocation());
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAddPartitionNullColTypeInSd() throws Exception {
createTable();
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
partition.getSd().getCols().get(0).setType(null);
client.add_partition(partition);
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAddPartitionNullColNameInSd() throws Exception {
createTable();
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
partition.getSd().getCols().get(0).setName(null);
client.add_partition(partition);
}
代码示例来源:origin: apache/hive
@Test
public void testAddPartitionNullLocation() throws Exception {
createTable(DB_NAME, TABLE_NAME, metaStore.getWarehouseRoot() + "/addparttest2");
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE, null);
client.add_partition(partition);
Partition part = client.getPartition(DB_NAME, TABLE_NAME, "year=2017");
Assert.assertEquals(metaStore.getWarehouseRoot() + "/addparttest2/year=2017",
part.getSd().getLocation());
Assert.assertTrue(metaStore.isPathExists(new Path(part.getSd().getLocation())));
}
代码示例来源:origin: apache/hive
@Test(expected = MetaException.class)
public void testAddPartitionEmptySerdeInfo() throws Exception {
createTable();
Partition partition = buildPartition(DB_NAME, TABLE_NAME, DEFAULT_YEAR_VALUE);
partition.getSd().setSerdeInfo(null);
client.add_partition(partition);
}
代码示例来源:origin: apache/hive
public PartitionDesc(final Partition part, final TableDesc tableDesc) throws HiveException {
PartitionDescConstructorHelper(part, tableDesc, true);
if (Utilities.isInputFileFormatSelfDescribing(this)) {
// if IF is self describing no need to send column info per partition, since its not used anyway.
Table tbl = part.getTable();
setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(),
part.getParameters(), tbl.getDbName(), tbl.getTableName(), tbl.getPartitionKeys()));
} else {
setProperties(part.getMetadataFromPartitionSchema());
}
}
代码示例来源:origin: apache/hive
@Test
public void testAddPartitionsForViewNullPartSd() throws Exception {
String tableName = "test_add_partition_view";
createView(tableName);
Partition partition = buildPartition(DB_NAME, tableName, DEFAULT_YEAR_VALUE);
partition.setSd(null);
PartitionSpecProxy partitionSpecProxy =
buildPartitionSpec(DB_NAME, tableName, null, Lists.newArrayList(partition));
client.add_partitions_pspec(partitionSpecProxy);
Partition part = client.getPartition(DB_NAME, tableName, "year=2017");
Assert.assertNull(part.getSd());
}
代码示例来源:origin: apache/hive
@Test
public void testAddPartitionSpecSetRootPath() throws Exception {
Table table = createTable();
String rootPath = table.getSd().getLocation() + "/addPartSpecRootPath/";
String rootPath1 = table.getSd().getLocation() + "/someotherpath/";
Partition partition = buildPartition(DB_NAME, TABLE_NAME, "2007", rootPath + "part2007/");
PartitionSpecProxy partitionSpecProxy =
buildPartitionSpec(DB_NAME, TABLE_NAME, rootPath1, Lists.newArrayList(partition));
client.add_partitions_pspec(partitionSpecProxy);
Partition resultPart = client.getPartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2007"));
Assert.assertEquals(rootPath + "part2007", resultPart.getSd().getLocation());
}
内容来源于网络,如有侵权,请联系作者删除!