本文整理了Java中org.apache.hadoop.hive.ql.metadata.Partition.getTPartition()
方法的一些代码示例,展示了Partition.getTPartition()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.getTPartition()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Partition
类名称:Partition
方法名:getTPartition
[英]Should be only used by serialization.
[中]只能由序列化使用。
代码示例来源:origin: apache/hive
/**
* Determines whether a partition has been archived
*
* @param p
* @return is it archived?
*/
public static boolean isArchived(Partition p) {
return MetaStoreUtils.isArchived(p.getTPartition());
}
代码示例来源:origin: apache/hive
@Override
public Map<String, String> getPartParameters() {
return partition.getTPartition().getParameters();
}
代码示例来源:origin: apache/hive
@Override
public StorageDescriptor getPartSd() {
return partition.getTPartition().getSd();
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public long getUpdateTime(Partition partition) throws UpdateNotFoundException {
// TODO if a table/partition is registered by gobblin an update time will be made available in table properties
// Use the update time instead of create time
return TimeUnit.MILLISECONDS.convert(partition.getTPartition().getCreateTime(), TimeUnit.SECONDS);
}
代码示例来源:origin: apache/hive
@Override
public Object getOutput() throws HiveException {
return new Partition(table, partition.getTPartition());
}
代码示例来源:origin: apache/drill
/**
* Determines whether a partition has been archived
*
* @param p
* @return is it archived?
*/
public static boolean isArchived(Partition p) {
return MetaStoreUtils.isArchived(p.getTPartition());
}
代码示例来源:origin: apache/hive
public static String getPartitionInformation(Partition part) {
StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE);
// Table Metadata
tableInfo.append(LINE_DELIM).append("# Detailed Partition Information").append(LINE_DELIM);
getPartitionMetaDataInformation(tableInfo, part);
// Storage information.
if (part.getTable().getTableType() != TableType.VIRTUAL_VIEW) {
tableInfo.append(LINE_DELIM).append("# Storage Information").append(LINE_DELIM);
getStorageDescriptorInfo(tableInfo, part.getTPartition().getSd());
}
return tableInfo.toString();
}
代码示例来源:origin: apache/incubator-gobblin
/**
* Get the url to <code>partition</code>'s avro {@link Schema} file.
*
* @param partition whose avro schema is to be returned
* @return a {@link Path} to table's avro {@link Schema} file.
*/
public Path getSchemaUrl(Partition partition) throws IOException {
return getSchemaUrl(partition.getTPartition().getSd());
}
代码示例来源:origin: apache/hive
/**
* Returns archiving level, which is how many fields were set in partial
* specification ARCHIVE was run for
*/
public static int getArchivingLevel(Partition p) throws HiveException {
try {
return MetaStoreUtils.getArchivingLevel(p.getTPartition());
} catch (MetaException ex) {
throw new HiveException(ex.getMessage(), ex);
}
}
代码示例来源:origin: apache/hive
private static void getPartitionMetaDataInformation(StringBuilder tableInfo, Partition part) {
formatOutput("Partition Value:", part.getValues().toString(), tableInfo);
formatOutput("Database:", part.getTPartition().getDbName(), tableInfo);
formatOutput("Table:", part.getTable().getTableName(), tableInfo);
formatOutput("CreateTime:", formatDate(part.getTPartition().getCreateTime()), tableInfo);
formatOutput("LastAccessTime:", formatDate(part.getTPartition().getLastAccessTime()),
tableInfo);
formatOutput("Location:", part.getLocation(), tableInfo);
if (part.getTPartition().getParameters().size() > 0) {
tableInfo.append("Partition Parameters:").append(LINE_DELIM);
displayAllParameters(part.getTPartition().getParameters(), tableInfo);
}
}
代码示例来源:origin: apache/incubator-gobblin
public Optional<String> getFileFormat() {
String serdeLib = this.hivePartition.getTPartition().getSd().getSerdeInfo().getSerializationLib();
for (HiveSerDeWrapper.BuiltInHiveSerDe hiveSerDe : HiveSerDeWrapper.BuiltInHiveSerDe.values()) {
if (hiveSerDe.toString().equalsIgnoreCase(serdeLib)) {
return Optional.fromNullable(hiveSerDe.name());
}
}
return Optional.<String>absent();
}
代码示例来源:origin: apache/hive
private static StorageDescriptor retrieveStorageDescriptor(Table tbl, Partition part) {
return (part == null ? tbl.getTTable().getSd() : part.getTPartition().getSd());
}
代码示例来源:origin: apache/drill
/**
* Returns archiving level, which is how many fields were set in partial
* specification ARCHIVE was run for
*/
public static int getArchivingLevel(Partition p) throws HiveException {
try {
return MetaStoreUtils.getArchivingLevel(p.getTPartition());
} catch (MetaException ex) {
throw new HiveException(ex.getMessage(), ex);
}
}
代码示例来源:origin: apache/incubator-gobblin
@Override
public long getUpdateTime(Partition partition) throws UpdateNotFoundException {
return parseDateForLocation(partition.getTPartition().getSd().getLocation());
}
代码示例来源:origin: apache/hive
public Partish buildPartition() {
Partition partition = Mockito.mock(Partition.class);
org.apache.hadoop.hive.metastore.api.Partition tpartition = Mockito.mock(org.apache.hadoop.hive.metastore.api.Partition.class);
doReturn(tpartition).when(partition).getTPartition();
doReturn(params).when(tpartition).getParameters();
return Partish.buildFor(null, partition);
}
}
代码示例来源:origin: apache/incubator-gobblin
public static Partition createMockPartitionWithLocation(String location) {
Partition mockPartition = Mockito.mock(Partition.class, Mockito.RETURNS_SMART_NULLS);
org.apache.hadoop.hive.metastore.api.Partition mockTPartition =
Mockito.mock(org.apache.hadoop.hive.metastore.api.Partition.class, Mockito.RETURNS_SMART_NULLS);
StorageDescriptor mockSd = Mockito.mock(StorageDescriptor.class, Mockito.RETURNS_SMART_NULLS);
Mockito.when(mockSd.getLocation()).thenReturn(location);
Mockito.when(mockTPartition.getSd()).thenReturn(mockSd);
Mockito.when(mockPartition.getTPartition()).thenReturn(mockTPartition);
return mockPartition;
}
}
代码示例来源:origin: apache/hive
private void writePartitions(JsonWriter writer, ReplicationSpec additionalPropertiesProvider)
throws SemanticException, IOException {
writer.jsonGenerator.writeStartArray();
if (partitions != null) {
for (org.apache.hadoop.hive.ql.metadata.Partition partition : partitions) {
new PartitionSerializer(partition.getTPartition())
.writeTo(writer, additionalPropertiesProvider);
}
}
writer.jsonGenerator.writeEndArray();
}
}
代码示例来源:origin: apache/hive
private void setStatsPropAndAlterPartition(boolean resetStatistics, Table tbl,
Partition newTPart, TableSnapshot tableSnapshot) throws TException {
EnvironmentContext ec = new EnvironmentContext();
if (!resetStatistics) {
ec.putToProperties(StatsSetupConst.DO_NOT_UPDATE_STATS, StatsSetupConst.TRUE);
}
LOG.debug("Altering existing partition " + newTPart.getSpec());
getSynchronizedMSC().alter_partition(tbl.getCatName(),
tbl.getDbName(), tbl.getTableName(), newTPart.getTPartition(), new EnvironmentContext(),
tableSnapshot == null ? null : tableSnapshot.getValidWriteIdList());
}
代码示例来源:origin: apache/incubator-gobblin
private Partition getTestPartition(Table table) throws HiveException {
Partition partition = new Partition(table, ImmutableMap.of("partition_key", "1"), null);
StorageDescriptor sd = new StorageDescriptor();
sd.setSerdeInfo(new SerDeInfo("avro", AvroSerDe.class.getName(), null));
sd.setCols(Lists.newArrayList(new FieldSchema("foo", "int", null)));
partition.getTPartition().setSd(sd);
return partition;
}
}
代码示例来源:origin: apache/hive
public PartitionDesc(final Partition part, final TableDesc tableDesc) throws HiveException {
PartitionDescConstructorHelper(part, tableDesc, true);
if (Utilities.isInputFileFormatSelfDescribing(this)) {
// if IF is self describing no need to send column info per partition, since its not used anyway.
Table tbl = part.getTable();
setProperties(MetaStoreUtils.getSchemaWithoutCols(part.getTPartition().getSd(),
part.getParameters(), tbl.getDbName(), tbl.getTableName(), tbl.getPartitionKeys()));
} else {
setProperties(part.getMetadataFromPartitionSchema());
}
}
内容来源于网络,如有侵权,请联系作者删除!