本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.isPartitioned()
方法的一些代码示例,展示了Table.isPartitioned()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.isPartitioned()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:isPartitioned
暂无
代码示例来源:origin: apache/hive
/**
* get all the partitions of the table that matches the given partial
* specification. partition columns whose value is can be anything should be
* an empty string.
*
* @param tbl
* object for which partition is needed. Must be partitioned.
* @param partialPartSpec
* partial partition specification (some subpartitions can be empty).
* @return list of partition objects
* @throws HiveException
*/
public List<Partition> getPartitionsByNames(Table tbl,
Map<String, String> partialPartSpec)
throws HiveException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
}
List<String> names = getPartitionNames(tbl.getDbName(), tbl.getTableName(),
partialPartSpec, (short)-1);
List<Partition> partitions = getPartitionsByNames(tbl, names);
return partitions;
}
代码示例来源:origin: apache/hive
/**
* Get all the partitions; unlike {@link #getPartitions(Table)}, does not include auth.
* @param tbl table for which partitions are needed
* @return list of partition objects
*/
public Set<Partition> getAllPartitionsOf(Table tbl) throws HiveException {
if (!tbl.isPartitioned()) {
return Sets.newHashSet(new Partition(tbl));
}
List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
try {
tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
Set<Partition> parts = new LinkedHashSet<Partition>(tParts.size());
for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
parts.add(new Partition(tbl, tpart));
}
return parts;
}
代码示例来源:origin: apache/hive
/**
* Get a list of Partitions by filter.
* @param tbl The table containing the partitions.
* @param filter A string represent partition predicates.
* @return a list of partitions satisfying the partition predicates.
* @throws HiveException
* @throws MetaException
* @throws NoSuchObjectException
* @throws TException
*/
public List<Partition> getPartitionsByFilter(Table tbl, String filter)
throws HiveException, MetaException, NoSuchObjectException, TException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
}
List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter(
tbl.getDbName(), tbl.getTableName(), filter, (short)-1);
return convertFromMetastore(tbl, tParts);
}
代码示例来源:origin: apache/hive
/**
* Get a number of Partitions by filter.
* @param tbl The table containing the partitions.
* @param filter A string represent partition predicates.
* @return the number of partitions satisfying the partition predicates.
* @throws HiveException
* @throws MetaException
* @throws NoSuchObjectException
* @throws TException
*/
public int getNumPartitionsByFilter(Table tbl, String filter)
throws HiveException, MetaException, NoSuchObjectException, TException {
if (!tbl.isPartitioned()) {
throw new HiveException("Partition spec should only be supplied for a " +
"partitioned table");
}
int numParts = getMSC().getNumPartitionsByFilter(
tbl.getDbName(), tbl.getTableName(), filter);
return numParts;
}
代码示例来源:origin: apache/drill
/**
* get all the partitions of the table that matches the given partial
* specification. partition columns whose value is can be anything should be
* an empty string.
*
* @param tbl
* object for which partition is needed. Must be partitioned.
* @param partialPartSpec
* partial partition specification (some subpartitions can be empty).
* @return list of partition objects
* @throws HiveException
*/
public List<Partition> getPartitionsByNames(Table tbl,
Map<String, String> partialPartSpec)
throws HiveException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
}
List<String> names = getPartitionNames(tbl.getDbName(), tbl.getTableName(),
partialPartSpec, (short)-1);
List<Partition> partitions = getPartitionsByNames(tbl, names);
return partitions;
}
代码示例来源:origin: apache/hive
if (tbl.isPartitioned()
&& Boolean.TRUE.equals(tableUsePartLevelAuth.get(tbl.getTableName()))) {
String alias_id = topOpMap.getKey();
代码示例来源:origin: apache/hive
/**
* get all the partitions that the table has
*
* @param tbl
* object for which partition is needed
* @return list of partition objects
*/
public List<Partition> getPartitions(Table tbl) throws HiveException {
if (tbl.isPartitioned()) {
List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
try {
tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(),
(short) -1, getUserName(), getGroupNames());
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
List<Partition> parts = new ArrayList<Partition>(tParts.size());
for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
parts.add(new Partition(tbl, tpart));
}
return parts;
} else {
Partition part = new Partition(tbl);
ArrayList<Partition> parts = new ArrayList<Partition>(1);
parts.add(part);
return parts;
}
}
代码示例来源:origin: apache/drill
/**
* Get a list of Partitions by filter.
* @param tbl The table containing the partitions.
* @param filter A string represent partition predicates.
* @return a list of partitions satisfying the partition predicates.
* @throws HiveException
* @throws MetaException
* @throws NoSuchObjectException
* @throws TException
*/
public List<Partition> getPartitionsByFilter(Table tbl, String filter)
throws HiveException, MetaException, NoSuchObjectException, TException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
}
List<org.apache.hadoop.hive.metastore.api.Partition> tParts = getMSC().listPartitionsByFilter(
tbl.getDbName(), tbl.getTableName(), filter, (short)-1);
return convertFromMetastore(tbl, tParts);
}
代码示例来源:origin: apache/hive
if (isValuesTempTable(part.getTable().getTableName())) {
continue;
if (part.getTable().isPartitioned()) {
newInput = new ReadEntity(part, parentViewInfo, isDirectRead);
} else {
代码示例来源:origin: apache/drill
/**
* Get a number of Partitions by filter.
* @param tbl The table containing the partitions.
* @param filter A string represent partition predicates.
* @return the number of partitions satisfying the partition predicates.
* @throws HiveException
* @throws MetaException
* @throws NoSuchObjectException
* @throws TException
*/
public int getNumPartitionsByFilter(Table tbl, String filter)
throws HiveException, MetaException, NoSuchObjectException, TException {
if (!tbl.isPartitioned()) {
throw new HiveException("Partition spec should only be supplied for a " +
"partitioned table");
}
int numParts = getMSC().getNumPartitionsByFilter(
tbl.getDbName(), tbl.getTableName(), filter);
return numParts;
}
代码示例来源:origin: apache/drill
/**
* Get all the partitions; unlike {@link #getPartitions(Table)}, does not include auth.
* @param tbl table for which partitions are needed
* @return list of partition objects
*/
public Set<Partition> getAllPartitionsOf(Table tbl) throws HiveException {
if (!tbl.isPartitioned()) {
return Sets.newHashSet(new Partition(tbl));
}
List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
try {
tParts = getMSC().listPartitions(tbl.getDbName(), tbl.getTableName(), (short)-1);
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
Set<Partition> parts = new LinkedHashSet<Partition>(tParts.size());
for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
parts.add(new Partition(tbl, tpart));
}
return parts;
}
代码示例来源:origin: apache/drill
if (!baseTbl.isPartitioned()) {
new PartitionDesc(desc, null), indexTbl.getTableName(),
new PartitionDesc(Utilities.getTableDesc(baseTbl), null),
baseTbl.getTableName(), indexTbl.getDbName());
indexBuilderTasks.add(indexBuilder);
} else {
new PartitionDesc(indexPart), indexTbl.getTableName(),
new PartitionDesc(basePart), baseTbl.getTableName(), indexTbl.getDbName());
indexBuilderTasks.add(indexBuilder);
代码示例来源:origin: apache/drill
/**
* get all the partitions that the table has
*
* @param tbl
* object for which partition is needed
* @return list of partition objects
* @throws HiveException
*/
public List<Partition> getPartitions(Table tbl) throws HiveException {
if (tbl.isPartitioned()) {
List<org.apache.hadoop.hive.metastore.api.Partition> tParts;
try {
tParts = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(),
(short) -1, getUserName(), getGroupNames());
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
List<Partition> parts = new ArrayList<Partition>(tParts.size());
for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) {
parts.add(new Partition(tbl, tpart));
}
return parts;
} else {
Partition part = new Partition(tbl);
ArrayList<Partition> parts = new ArrayList<Partition>(1);
parts.add(part);
return parts;
}
}
代码示例来源:origin: apache/drill
if (isValuesTempTable(part.getTable().getTableName())) {
continue;
if (part.getTable().isPartitioned()) {
newInput = new ReadEntity(part, parentViewInfo, isDirectRead);
} else {
代码示例来源:origin: apache/hive
throws HiveException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
for (int i = 0; i < nBatches; ++i) {
List<org.apache.hadoop.hive.metastore.api.Partition> tParts =
getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
partNames.subList(i*batchSize, (i+1)*batchSize), getColStats);
if (tParts != null) {
getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
partNames.subList(nBatches*batchSize, nParts), getColStats);
if (tParts != null) {
代码示例来源:origin: apache/drill
throws HiveException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
for (int i = 0; i < nBatches; ++i) {
List<org.apache.hadoop.hive.metastore.api.Partition> tParts =
getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
partNames.subList(i*batchSize, (i+1)*batchSize));
if (tParts != null) {
getMSC().getPartitionsByNames(tbl.getDbName(), tbl.getTableName(),
partNames.subList(nBatches*batchSize, nParts));
if (tParts != null) {
代码示例来源:origin: apache/hive
private Long getRowCnt(
ParseContext pCtx, TableScanOperator tsOp, Table tbl) throws HiveException {
Long rowCnt = 0L;
if (tbl.isPartitioned()) {
for (Partition part : pctx.getPrunedPartitions(
tsOp.getConf().getAlias(), tsOp).getPartitions()) {
Logger.debug("Table doesn't have up to date stats " + tbl.getTableName());
rowCnt = null;
代码示例来源:origin: apache/drill
private Long getRowCnt(
ParseContext pCtx, TableScanOperator tsOp, Table tbl) throws HiveException {
Long rowCnt = 0L;
if (tbl.isPartitioned()) {
for (Partition part : pctx.getPrunedPartitions(
tsOp.getConf().getAlias(), tsOp).getPartitions()) {
Logger.debug("Table doesn't have up to date stats " + tbl.getTableName());
rowCnt = null;
代码示例来源:origin: apache/hive
private void analyzeCacheMetadata(ASTNode ast) throws SemanticException {
Table tbl = AnalyzeCommandUtils.getTable(ast, this);
Map<String,String> partSpec = null;
CacheMetadataDesc desc;
// In 2 cases out of 3, we could pass the path and type directly to metastore...
if (AnalyzeCommandUtils.isPartitionLevelStats(ast)) {
partSpec = AnalyzeCommandUtils.getPartKeyValuePairsFromAST(tbl, ast, conf);
Partition part = getPartition(tbl, partSpec, true);
desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), part.getName());
inputs.add(new ReadEntity(part));
} else {
// Should we get all partitions for a partitioned table?
desc = new CacheMetadataDesc(tbl.getDbName(), tbl.getTableName(), tbl.isPartitioned());
inputs.add(new ReadEntity(tbl));
}
rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc)));
}
代码示例来源:origin: apache/hive
short limit)
throws HiveException {
if (!tbl.isPartitioned()) {
throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName());
partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(),
partialPvals, limit, getUserName(), getGroupNames());
} catch (Exception e) {
内容来源于网络,如有侵权,请联系作者删除!