org.apache.hadoop.hive.metastore.api.Table.getPartitionKeysSize()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(8.2k)|赞(0)|评价(0)|浏览(197)

本文整理了Java中org.apache.hadoop.hive.metastore.api.Table.getPartitionKeysSize()方法的一些代码示例,展示了Table.getPartitionKeysSize()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getPartitionKeysSize()方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Table
类名称:Table
方法名:getPartitionKeysSize

Table.getPartitionKeysSize介绍

暂无

代码示例

代码示例来源:origin: apache/hive

TempTable(org.apache.hadoop.hive.metastore.api.Table t) {
 assert t != null;
 this.tTable = t;
 pTree = t.getPartitionKeysSize() > 0 ? new PartitionTree(tTable) : null;
}
private void addPartition(Partition p) throws AlreadyExistsException, MetaException {

代码示例来源:origin: apache/hive

private String getPartitionStr(Table tbl, Map<String,String> partName) throws InvalidPartitionException{
 if(tbl.getPartitionKeysSize() != partName.size()){
  throw new InvalidPartitionException("Number of partition columns in table: "+ tbl.getPartitionKeysSize() +
    " doesn't match with number of supplied partition values: "+partName.size());
 }
 final List<String> storedVals = new ArrayList<>(tbl.getPartitionKeysSize());
 for(FieldSchema partKey : tbl.getPartitionKeys()){
  String partVal = partName.get(partKey.getName());
  if(null == partVal) {
   throw new InvalidPartitionException("No value found for partition column: "+partKey.getName());
  }
  storedVals.add(partVal);
 }
 return join(storedVals,',');
}

代码示例来源:origin: apache/hive

public static Map<String, String> getPartitionKeyValues(Table table, Partition partition) {
 Map<String, String> partitionKeys = new LinkedHashMap<>();
 for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  partitionKeys.put(table.getPartitionKeys().get(i).getName(),
    partition.getValues().get(i));
 }
 return partitionKeys;
}

代码示例来源:origin: apache/hive

private String buildPartColStr(Table table) {
 String partColStr = "";
 for (int i = 0; i < table.getPartitionKeysSize(); ++i) {
  if (i != 0) {
   partColStr += ",";
  }
  partColStr += table.getPartitionKeys().get(i).getName();
 }
 return partColStr;
}

代码示例来源:origin: apache/hive

private static String getCompactionCommand(Table t, Partition p) {
 StringBuilder sb = new StringBuilder("ALTER TABLE ").append(Warehouse.getQualifiedName(t));
 if(t.getPartitionKeysSize() > 0) {
  assert p != null : "must supply partition for partitioned table " +
    Warehouse.getQualifiedName(t);
  sb.append(" PARTITION(");
  for (int i = 0; i < t.getPartitionKeysSize(); i++) {
   sb.append(t.getPartitionKeys().get(i).getName()).append('=').append(
     genPartValueString(t.getPartitionKeys().get(i).getType(), p.getValues().get(i))).
     append(",");
  }
  sb.setCharAt(sb.length() - 1, ')');//replace trailing ','
 }
 return sb.append(" COMPACT 'major'").toString();
}

代码示例来源:origin: apache/hive

private void assertPartitioned() throws MetaException {
 if(tTable.getPartitionKeysSize() <= 0) {
  throw new MetaException(Warehouse.getQualifiedName(tTable) + " is not partitioned");
 }
}

代码示例来源:origin: apache/drill

private int getSerDeOverheadFactor() {
  final int projectedColumnCount;
  if (Utilities.isStarQuery(columns)) {
   Table hiveTable = hiveReadEntry.getTable();
   projectedColumnCount = hiveTable.getSd().getColsSize() + hiveTable.getPartitionKeysSize();
  } else {
   // In cost estimation, # of project columns should be >= 1, even for skipAll query.
   projectedColumnCount = Math.max(columns.size(), 1);
  }

  return projectedColumnCount * HIVE_SERDE_SCAN_OVERHEAD_FACTOR_PER_COLUMN;
 }
}

代码示例来源:origin: apache/hive

private static void createTempTable(org.apache.hadoop.hive.metastore.api.Table t) {
  if(t.getPartitionKeysSize() <= 0) {
   //do nothing as it's not a partitioned table
   return;
  }
  String qualifiedTableName = Warehouse.
    getQualifiedName(t.getDbName().toLowerCase(), t.getTableName().toLowerCase());
  SessionState ss = SessionState.get();
  if (ss == null) {
   LOG.warn("No current SessionState, skipping temp partitions for " + qualifiedTableName);
   return;
  }
  TempTable tt = new TempTable(t);
  if(ss.getTempPartitions().putIfAbsent(qualifiedTableName, tt) != null) {
   throw new IllegalStateException("TempTable for " + qualifiedTableName + " already exists");
  }
 }
}

代码示例来源:origin: apache/hive

private List<Path> getLocationsForTruncate(final RawStore ms,
                      final String catName,
                      final String dbName,
                      final String tableName,
                      final Table table,
                      final List<String> partNames) throws Exception {
 List<Path> locations = new ArrayList<>();
 if (partNames == null) {
  if (0 != table.getPartitionKeysSize()) {
   for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
    locations.add(new Path(partition.getSd().getLocation()));
   }
  } else {
   locations.add(new Path(table.getSd().getLocation()));
  }
 } else {
  for (Partition partition : ms.getPartitionsByNames(catName, dbName, tableName, partNames)) {
   locations.add(new Path(partition.getSd().getLocation()));
  }
 }
 return locations;
}

代码示例来源:origin: apache/hive

if (!updateStats || newDir || tbl.getPartitionKeysSize() != 0) {
 return;

代码示例来源:origin: apache/hive

if (table.getPartitionKeysSize() == 0) {
 Map<String, String> params = table.getParameters();
 List<String> colsToUpdate = null;

代码示例来源:origin: apache/hive

if (!customDynamicLocationUsed) {
 src = new Path(getPartitionRootLocation(jobInfo.getLocation(), jobInfo.getTableInfo().getTable()
   .getPartitionKeysSize()));
} else {
 src = new Path(getCustomPartitionRootLocation(jobInfo, jobContext.getConfiguration()));

代码示例来源:origin: apache/hive

private static Map<String, String> getPtnDesc(Table t, Partition p) {
 assertEquals(t.getPartitionKeysSize(),p.getValuesSize());
 Map<String,String> retval = new HashMap<String,String>();
 Iterator<String> pval = p.getValuesIterator();
 for (FieldSchema fs : t.getPartitionKeys()){
  retval.put(fs.getName(),pval.next());
 }
 return retval;
}

代码示例来源:origin: apache/hive

return Collections.emptyList();
if(t.getPartitionKeysSize() <= 0) {

代码示例来源:origin: apache/hive

@Before public void before() throws Throwable {
 tableWorkingPath = temporaryFolder.newFolder().getAbsolutePath();
 segmentsTable = derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
 Map<String, String> params = new HashMap<>();
 params.put("external.table.purge", "TRUE");
 Mockito.when(tableMock.getParameters()).thenReturn(params);
 Mockito.when(tableMock.getPartitionKeysSize()).thenReturn(0);
 StorageDescriptor storageDes = Mockito.mock(StorageDescriptor.class);
 Mockito.when(storageDes.getBucketColsSize()).thenReturn(0);
 Mockito.when(tableMock.getSd()).thenReturn(storageDes);
 Mockito.when(tableMock.getDbName()).thenReturn(DB_NAME);
 Mockito.when(tableMock.getTableName()).thenReturn(TABLE_NAME);
 config = new Configuration();
 config.set(String.valueOf(HiveConf.ConfVars.HIVEQUERYID), "hive-" + UUID.randomUUID().toString());
 config.set(String.valueOf(HiveConf.ConfVars.DRUID_WORKING_DIR), tableWorkingPath);
 config.set(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY),
   new Path(tableWorkingPath, "finalSegmentDir").toString());
 config.set("hive.druid.maxTries", "0");
 druidStorageHandler =
   new DruidStorageHandler(derbyConnectorRule.getConnector(),
     derbyConnectorRule.metadataTablesConfigSupplier().get());
 druidStorageHandler.setConf(config);
}

代码示例来源:origin: apache/hive

throw new MetaException("LOCATION may not be specified for Druid");
if (table.getPartitionKeysSize() != 0) {
 throw new MetaException("PARTITIONED BY may not be specified for Druid");

代码示例来源:origin: apache/incubator-gobblin

Partition nativePartition = HiveMetaStoreUtils.getPartition(partition);
Preconditions.checkArgument(table.getPartitionKeysSize() == nativePartition.getValues().size(),
  String.format("Partition key size is %s but partition value size is %s", table.getPartitionKeys().size(),
    nativePartition.getValues().size()));

代码示例来源:origin: apache/hive

String validWriteIds, long writeId) throws Exception {
if (partNames == null) {
 if (0 != table.getPartitionKeysSize()) {
  for (Partition partition : ms.getPartitions(catName, dbName, tableName, Integer.MAX_VALUE)) {
   alterPartitionForTruncate(ms, catName, dbName, tableName, table, partition,

代码示例来源:origin: apache/hive

StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
 LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
} else if (isTxn && tbl.getPartitionKeysSize() == 0) {
 if (isCurrentStatsValidForTheQuery(mtable, writeIdList, false)) {
  tbl.setIsStatsCompliant(true);

代码示例来源:origin: apache/hive

partInfo.getTableName());
if (tbl.getPartitionKeysSize() == 0) {
 throw new HCatException("The table " + partInfo.getTableName()
  + " is not partitioned.");

相关文章

微信公众号

最新文章

更多

Table类方法