org.apache.hadoop.hive.ql.metadata.Table.getFullyQualifiedName()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.6k)|赞(0)|评价(0)|浏览(85)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getFullyQualifiedName()方法的一些代码示例,展示了Table.getFullyQualifiedName()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getFullyQualifiedName()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getFullyQualifiedName

Table.getFullyQualifiedName介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public String getCompleteName() {
 return tbl.getFullyQualifiedName();
}

代码示例来源:origin: apache/hive

protected List<String> getTablesFromEntitySet(Set<? extends Entity> entities) {
 List<String> tableNames = new ArrayList<String>();
 for (Entity entity : entities) {
  if (entity.getType() == Entity.Type.TABLE) {
   tableNames.add(entity.getTable().getFullyQualifiedName());
  }
 }
 return tableNames;
}

代码示例来源:origin: apache/hive

public Stream<String> getTableNames() {
  return queryInfo.getInputs().stream()
    .filter(readEntity -> readEntity.getType() == Type.TABLE)
    .map(readEntity -> readEntity.getTable().getFullyQualifiedName());
 }
}

代码示例来源:origin: apache/hive

@Override
 public void visit(RelNode node, int ordinal, RelNode parent) {
  if (node instanceof TableScan) {
   TableScan ts = (TableScan) node;
   tablesUsed.add(((RelOptHiveTable) ts.getTable()).getHiveTableMD().getFullyQualifiedName());
  }
  super.visit(node, ordinal, parent);
 }
}.go(plan);

代码示例来源:origin: apache/hive

private Task<?> dropPartitionTask(Table table, Map<String, String> partSpec) throws SemanticException {
 Task<DDLWork> dropPtnTask = null;
 Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecsExpr =
     ReplUtils.genPartSpecs(table, Collections.singletonList(partSpec));
 if (partSpecsExpr.size() > 0) {
  DropTableDesc dropPtnDesc = new DropTableDesc(table.getFullyQualifiedName(),
      partSpecsExpr, null, true, event.replicationSpec());
  dropPtnTask = TaskFactory.get(
      new DDLWork(new HashSet<>(), new HashSet<>(), dropPtnDesc), context.hiveConf
  );
 }
 return dropPtnTask;
}

代码示例来源:origin: apache/hive

private Task<?> dropTableTask(Table table) {
  assert(table != null);
  DropTableDesc dropTblDesc = new DropTableDesc(table.getFullyQualifiedName(), table.getTableType(),
      true, false, event.replicationSpec());
  return TaskFactory.get(new DDLWork(new HashSet<>(), new HashSet<>(), dropTblDesc), context.hiveConf);
 }
}

代码示例来源:origin: apache/hive

private Set<String> getTablesUsed(ParseContext parseCtx) throws SemanticException {
 Set<String> tablesUsed = new HashSet<>();
 for (TableScanOperator topOp : parseCtx.getTopOps().values()) {
  Table table = topOp.getConf().getTableMetadata();
  if (!table.isMaterializedTable() && !table.isView()) {
   // Add to signature
   tablesUsed.add(table.getFullyQualifiedName());
  }
 }
 return tablesUsed;
}

代码示例来源:origin: apache/hive

/**
 * Assert it supports Acid write.
 */
protected void validateTargetTable(Table mTable) throws SemanticException {
 if (mTable.getTableType() == TableType.VIRTUAL_VIEW || mTable.getTableType() == TableType.MATERIALIZED_VIEW) {
  LOG.error("Table " + mTable.getFullyQualifiedName() + " is a view or materialized view");
  throw new SemanticException(ErrorMsg.UPDATE_DELETE_VIEW.getMsg());
 }
}

代码示例来源:origin: apache/hive

private ValidTxnWriteIdList getQueryValidTxnWriteIdList() throws SemanticException {
 // TODO: Once HIVE-18948 is in, should be able to retrieve writeIdList from the conf.
 //cachedWriteIdList = AcidUtils.getValidTxnWriteIdList(conf);
 //
 List<String> transactionalTables = tablesFromReadEntities(inputs)
     .stream()
     .filter(table -> AcidUtils.isTransactionalTable(table))
     .map(table -> table.getFullyQualifiedName())
     .collect(Collectors.toList());
 if (transactionalTables.size() > 0) {
  try {
   String txnString = conf.get(ValidTxnList.VALID_TXNS_KEY);
   return getTxnMgr().getValidWriteIds(transactionalTables, txnString);
  } catch (Exception err) {
   String msg = "Error while getting the txnWriteIdList for tables " + transactionalTables
       + " and validTxnList " + conf.get(ValidTxnList.VALID_TXNS_KEY);
   throw new SemanticException(msg, err);
  }
 }
 // No transactional tables.
 return null;
}

代码示例来源:origin: apache/hive

AnalyzeRewriteContext getAnalyzeRewriteContext() {
 AnalyzeRewriteContext analyzeRewrite = new AnalyzeRewriteContext();
 analyzeRewrite.setTableName(tbl.getFullyQualifiedName());
 analyzeRewrite.setTblLvl(isTableLevel);
 analyzeRewrite.setColName(colNames);
 analyzeRewrite.setColType(colType);
 return analyzeRewrite;
}

代码示例来源:origin: apache/hive

private static boolean hasExternalTableAncestor(Operator op, StringBuilder sb) {
  boolean result = false;
  Operator ancestor = OperatorUtils.findSingleOperatorUpstream(op, TableScanOperator.class);
  if (ancestor != null) {
   TableScanOperator ts = (TableScanOperator) ancestor;
   if (MetaStoreUtils.isExternalTable(ts.getConf().getTableMetadata().getTTable())) {
    sb.append(ts.getConf().getTableMetadata().getFullyQualifiedName());
    return true;
   }
  }
  return result;
 }
}

代码示例来源:origin: apache/hive

String tableName = tableUsed.getFullyQualifiedName();
ValidTxnWriteIdList currentTxnWriteIdList = lookupInfo.txnWriteIdListProvider.get();
if (currentTxnWriteIdList == null) {
 LOG.debug("Cached query no longer valid due to table {}", tableUsed.getFullyQualifiedName());

代码示例来源:origin: apache/hive

private void alterPartitionSpec(Table tbl,
                Map<String, String> partSpec,
                org.apache.hadoop.hive.metastore.api.Partition tpart,
                boolean inheritTableSpecs,
                String partPath) throws HiveException, InvalidOperationException {
 alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath);
 String fullName = tbl.getTableName();
 if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
  fullName = tbl.getFullyQualifiedName();
 }
 alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(),
   new Partition(tbl, tpart), null, true);
}

代码示例来源:origin: apache/hive

/**
 * Assert that we are not asked to update a bucketing column or partition column.
 * @param colName it's the A in "SET A = B"
 */
protected void checkValidSetClauseTarget(ASTNode colName, Table targetTable) throws SemanticException {
 String columnName = normalizeColName(colName.getText());
 // Make sure this isn't one of the partitioning columns, that's not supported.
 for (FieldSchema fschema : targetTable.getPartCols()) {
  if (fschema.getName().equalsIgnoreCase(columnName)) {
   throw new SemanticException(ErrorMsg.UPDATE_CANNOT_UPDATE_PART_VALUE.getMsg());
  }
 }
 //updating bucket column should move row from one file to another - not supported
 if (targetTable.getBucketCols() != null && targetTable.getBucketCols().contains(columnName)) {
  throw new SemanticException(ErrorMsg.UPDATE_CANNOT_UPDATE_BUCKET_VALUE, columnName);
 }
 boolean foundColumnInTargetTable = false;
 for (FieldSchema col : targetTable.getCols()) {
  if (columnName.equalsIgnoreCase(col.getName())) {
   foundColumnInTargetTable = true;
   break;
  }
 }
 if (!foundColumnInTargetTable) {
  throw new SemanticException(ErrorMsg.INVALID_TARGET_COLUMN_IN_SET_CLAUSE, colName.getText(),
   targetTable.getFullyQualifiedName());
 }
}

代码示例来源:origin: apache/hive

ts.getConf().getTableMetadata().getFullyQualifiedName());
 disableSemiJoin = true;
} else {
    LOG.debug("Join key {} is from {} which is an external table. Disabling semijoin optimization.",
      columnOrigin.col,
      joinKeyTs.getConf().getTableMetadata().getFullyQualifiedName());
    disableSemiJoin = true;

代码示例来源:origin: apache/hive

@Override
public PartitionInfo createPartitionIfNotExists(final List<String> partitionValues) throws StreamingException {
 String partLocation = null;
 String partName = null;
 boolean exists = false;
 try {
  Map<String, String> partSpec = Warehouse.makeSpecFromValues(tableObject.getPartitionKeys(), partitionValues);
  AddPartitionDesc addPartitionDesc = new AddPartitionDesc(database, table, true);
  partName = Warehouse.makePartName(tableObject.getPartitionKeys(), partitionValues);
  partLocation = new Path(tableObject.getDataLocation(), Warehouse.makePartPath(partSpec)).toString();
  addPartitionDesc.addPartition(partSpec, partLocation);
  Partition partition = Hive.convertAddSpecToMetaPartition(tableObject, addPartitionDesc.getPartition(0), conf);
  if (getMSC() == null) {
   // We assume it doesn't exist if we can't check it
   // so the driver will decide
   return new PartitionInfo(partName, partLocation, false);
  }
  getMSC().add_partition(partition);
  if (LOG.isDebugEnabled()) {
   LOG.debug("Created partition {} for table {}", partName,
     tableObject.getFullyQualifiedName());
  }
 } catch (AlreadyExistsException e) {
  exists = true;
 } catch (HiveException | TException e) {
  throw new StreamingException("Unable to creation partition for values: " + partitionValues + " connection: " +
   toConnectionInfoString(), e);
 }
 return new PartitionInfo(partName, partLocation, exists);
}

代码示例来源:origin: apache/hive

static AnalyzeRewriteContext genAnalyzeRewriteContext(HiveConf conf, Table tbl) {
 AnalyzeRewriteContext analyzeRewrite = new AnalyzeRewriteContext();
 analyzeRewrite.setTableName(tbl.getFullyQualifiedName());
 analyzeRewrite.setTblLvl(!(conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()));
 List<String> colNames = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
 List<String> colTypes = getColumnTypes(tbl, colNames);
 analyzeRewrite.setColName(colNames);
 analyzeRewrite.setColType(colTypes);
 return analyzeRewrite;
}

代码示例来源:origin: apache/hive

@Override
public RelNode visit(TableScan scan) {
 if (scan instanceof HiveTableScan) {
  HiveTableScan hiveScan = (HiveTableScan) scan;
  RelOptHiveTable relOptHiveTable = (RelOptHiveTable) hiveScan.getTable();
  Table tab = relOptHiveTable.getHiveTableMD();
  if (tab.isTemporary()) {
   fail(tab.getTableName() + " is a temporary table");
  }
  if (tab.getTableType() == TableType.EXTERNAL_TABLE) {
   fail(tab.getFullyQualifiedName() + " is an external table");
  }
  return scan;
 }
 // TableScan of a non-Hive table - don't support for materializations.
 fail(scan.getTable().getQualifiedName() + " is a table scan of a non-Hive table.");
 return scan;
}

代码示例来源:origin: apache/hive

((RelOptHiveTable)tableScan.getTable()).getHiveTableMD().getFullyQualifiedName();
final ValidWriteIdList tableCurrentTxnList =
  currentTxnList.getTableValidWriteIdList(tableQName);

代码示例来源:origin: apache/hive

ctx.addMaterializedTable(table.getFullyQualifiedName(), table);

相关文章

微信公众号

最新文章

更多

Table类方法