org.apache.hadoop.hive.ql.metadata.Partition.getName()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(10.4k)|赞(0)|评价(0)|浏览(105)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Partition.getName()方法的一些代码示例,展示了Partition.getName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.getName()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Partition
类名称:Partition
方法名:getName

Partition.getName介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

public String getName() {
 return this.hivePartition.getName();
}

代码示例来源:origin: apache/hive

private Map<String, List<String>> convert(Map<Partition, List<String>> mapping) {
 Map<String, List<String>> converted = new HashMap<String, List<String>>();
 for (Map.Entry<Partition, List<String>> entry : mapping.entrySet()) {
  converted.put(entry.getKey().getName(), entry.getValue());
 }
 return converted;
}

代码示例来源:origin: apache/hive

private BufferedWriter writer(Context withinContext, Partition qlPtn)
  throws IOException {
 Path ptnDataPath = new Path(withinContext.eventRoot, qlPtn.getName());
 FileSystem fs = ptnDataPath.getFileSystem(withinContext.hiveConf);
 Path filesPath = new Path(ptnDataPath, EximUtil.FILES_NAME);
 return new BufferedWriter(new OutputStreamWriter(fs.create(filesPath)));
}

代码示例来源:origin: apache/drill

private Map<String, List<String>> convert(Map<Partition, List<String>> mapping) {
 Map<String, List<String>> converted = new HashMap<String, List<String>>();
 for (Map.Entry<Partition, List<String>> entry : mapping.entrySet()) {
  converted.put(entry.getKey().getName(), entry.getValue());
 }
 return converted;
}

代码示例来源:origin: apache/incubator-gobblin

@Override
 public String apply(Partition input) {
  return getQlPartition(purgedTbl, input).getName();
 }
}).toList(), containsInAnyOrder(expectedRetainedPartitions));

代码示例来源:origin: apache/incubator-gobblin

private void addPartitionsToVersions(List<HivePartitionVersion> versions, String name,
  List<Partition> partitions)
  throws IOException {
 for (Partition partition : partitions) {
  if (partition.getName().equalsIgnoreCase(name)) {
   versions.add(new HivePartitionRetentionVersion(partition));
  }
 }
}

代码示例来源:origin: apache/hive

/**
 * @return include the db name
 */
public String getCompleteName() {
 return getTable().getCompleteName() + "@" + getName();
}

代码示例来源:origin: apache/drill

/**
 * @return include the db name
 */
public String getCompleteName() {
 return getTable().getCompleteName() + "@" + getName();
}

代码示例来源:origin: apache/hive

private void checkArchiveProperty(int partSpecLevel,
  boolean recovery, Partition p) throws HiveException {
 if (!ArchiveUtils.isArchived(p) && !recovery) {
  throw new HiveException("Partition " + p.getName()
    + " is not archived.");
 }
 int archiveLevel = ArchiveUtils.getArchivingLevel(p);
 if (partSpecLevel > archiveLevel) {
  throw new HiveException("Partition " + p.getName()
    + " is archived at level " + archiveLevel
    + ", and given partspec only has " + partSpecLevel
    + " specs.");
 }
}

代码示例来源:origin: apache/drill

private void checkArchiveProperty(int partSpecLevel,
  boolean recovery, Partition p) throws HiveException {
 if (!ArchiveUtils.isArchived(p) && !recovery) {
  throw new HiveException("Partition " + p.getName()
    + " is not archived.");
 }
 int archiveLevel = ArchiveUtils.getArchivingLevel(p);
 if (partSpecLevel > archiveLevel) {
  throw new HiveException("Partition " + p.getName()
    + " is archived at level " + archiveLevel
    + ", and given partspec only has " + partSpecLevel
    + " specs.");
 }
}

代码示例来源:origin: apache/hive

private Collection<List<ColumnStatisticsObj>> verifyAndGetPartColumnStats(
  Hive hive, Table tbl, String colName, Set<Partition> parts) throws TException, LockException {
 List<String> partNames = new ArrayList<String>(parts.size());
 for (Partition part : parts) {
  if (!StatsUtils.areColumnStatsUptoDateForQueryAnswering(part.getTable(), part.getParameters(), colName)) {
   Logger.debug("Stats for part : " + part.getSpec() + " column " + colName
     + " are not up to date.");
   return null;
  }
  partNames.add(part.getName());
 }
 AcidUtils.TableSnapshot tableSnapshot =
   AcidUtils.getTableSnapshot(hive.getConf(), tbl);
 Map<String, List<ColumnStatisticsObj>> result = hive.getMSC().getPartitionColumnStatistics(
   tbl.getDbName(), tbl.getTableName(), partNames, Lists.newArrayList(colName),
   tableSnapshot != null ? tableSnapshot.getValidWriteIdList() : null);
 if (result.size() != parts.size()) {
  Logger.debug("Received " + result.size() + " stats for " + parts.size() + " partitions");
  return null;
 }
 return result.values();
}

代码示例来源:origin: apache/incubator-gobblin

public HivePartitionFileSet(HiveCopyEntityHelper hiveCopyEntityHelper, Partition partition, Properties properties) {
 super(partition.getCompleteName(), hiveCopyEntityHelper.getDataset());
 this.hiveCopyEntityHelper = hiveCopyEntityHelper;
 this.partition = partition;
 this.properties = properties;
 this.existingTargetPartition =
   Optional.fromNullable(this.hiveCopyEntityHelper.getTargetPartitions().get(this.partition.getValues()));
 this.eventSubmitter =
   new EventSubmitter.Builder(this.hiveCopyEntityHelper.getDataset().getMetricContext(), "hive.dataset.copy")
     .addMetadata("Partition", this.partition.getName()).build();
}

代码示例来源:origin: apache/hive

public HiveLockObject(Partition par, HiveLockObjectData lockData) {
 this(new String[] {par.getTable().getDbName(),
           org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.encodeTableName(par.getTable().getTableName()), par.getName()}, lockData);
}

代码示例来源:origin: apache/drill

public HiveLockObject(Partition par, HiveLockObjectData lockData) {
 this(new String[] {par.getTable().getDbName(),
   MetaStoreUtils.encodeTableName(par.getTable().getTableName()), par.getName()}, lockData);
}

代码示例来源:origin: apache/incubator-gobblin

public HiveWorkUnit(HiveDataset hiveDataset, Partition partition) {
 this(hiveDataset);
 setPartitionName(partition.getName());
 setPartitionLocation(partition.getLocation());
 setPartitionKeys(partition.getTable().getPartitionKeys());
}

代码示例来源:origin: apache/incubator-gobblin

@Test
public void testDefaults() throws Exception {
 DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
 String tableName = "VfTb1";
 Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition"));
 org.apache.hadoop.hive.metastore.api.Partition tp =
   this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20"), (int) System.currentTimeMillis());
 Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
 assertThat(partition.getName(), is("datepartition=2016-01-01-20"));
 TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
 Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}

代码示例来源:origin: apache/incubator-gobblin

/**
 * Set SLA event metadata in the workunit. The publisher will use this metadta to publish sla events
 */
public static void setPartitionSlaEventMetadata(WorkUnit state, Table table, Partition partition, long updateTime,
  long lowWatermark, long beginGetWorkunitsTime) {
 state.setProp(SlaEventKeys.DATASET_URN_KEY, state.getProp(ConfigurationKeys.DATASET_URN_KEY));
 state.setProp(SlaEventKeys.PARTITION_KEY, partition.getName());
 state.setProp(SlaEventKeys.UPSTREAM_TS_IN_MILLI_SECS_KEY, String.valueOf(updateTime));
 // Time when the workunit was created
 state.setProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY, System.currentTimeMillis());
 state.setProp(EventConstants.WORK_UNIT_CREATE_TIME, state.getProp(SlaEventKeys.ORIGIN_TS_IN_MILLI_SECS_KEY));
 state.setProp(SlaEventKeys.PREVIOUS_PUBLISH_TS_IN_MILLI_SECS_KEY, lowWatermark);
 state.setProp(EventConstants.BEGIN_GET_WORKUNITS_TIME, beginGetWorkunitsTime);
 state.setProp(EventConstants.SOURCE_DATA_LOCATION, partition.getDataLocation());
}

代码示例来源:origin: apache/incubator-gobblin

@Test
public void testUserDefinedDatePattern() throws Exception {
 String tableName = "VfTb2";
 Config conf =
   ConfigFactory.parseMap(ImmutableMap.<String, String> of(DatePartitionHiveVersionFinder.PARTITION_KEY_NAME_KEY, "field1",
     DatePartitionHiveVersionFinder.PARTITION_VALUE_DATE_TIME_PATTERN_KEY, "yyyy/MM/dd/HH"));
 DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, conf);
 Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("field1"));
 org.apache.hadoop.hive.metastore.api.Partition tp =
   this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016/01/01/20"), (int) System.currentTimeMillis());
 Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
 Assert.assertEquals(URLDecoder.decode(partition.getName(), "UTF-8"), "field1=2016/01/01/20");
 TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
 Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}

代码示例来源:origin: apache/incubator-gobblin

@Test
 public void testMultiplePartitionFields() throws Exception {
  DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
  String tableName = "VfTb3";

  Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition", "field1"));
  org.apache.hadoop.hive.metastore.api.Partition tp =
    this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20", "f1"), (int) System.currentTimeMillis());
  Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);

  assertThat(partition.getName(), anyOf(is("field1=f1/datepartition=2016-01-01-20"), is("datepartition=2016-01-01-20/field1=f1")));
  TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
  Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));

 }
}

代码示例来源:origin: apache/hive

private void analyzeCacheMetadata(ASTNode ast) throws SemanticException {
 Table tbl = AnalyzeCommandUtils.getTable(ast, this);
 Map<String,String> partSpec = null;
 CacheMetadataDesc desc;
 // In 2 cases out of 3, we could pass the path and type directly to metastore...
 if (AnalyzeCommandUtils.isPartitionLevelStats(ast)) {
  partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(tbl, ast, conf);
  Partition part = getPartition(tbl, partSpec, true);
  desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), part.getName());
  inputs.add(new ReadEntity(part));
 } else {
  // Should we get all partitions for a partitioned table?
  desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), tbl.isPartitioned());
  inputs.add(new ReadEntity(tbl));
 }
 rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}

相关文章

微信公众号

最新文章

更多