org.apache.hadoop.hive.ql.exec.Utilities.getColumnNamesFromFieldSchema()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(9.0k)|赞(0)|评价(0)|浏览(70)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getColumnNamesFromFieldSchema()方法的一些代码示例,展示了Utilities.getColumnNamesFromFieldSchema()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getColumnNamesFromFieldSchema()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getColumnNamesFromFieldSchema

Utilities.getColumnNamesFromFieldSchema介绍

暂无

代码示例

代码示例来源:origin: apache/hive

private void validateSpecifiedColumnNames(List<String> specifiedCols)
  throws SemanticException {
 List<String> tableCols = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
 for (String sc : specifiedCols) {
  if (!tableCols.contains(sc.toLowerCase())) {
   String msg = "'" + sc + "' (possible columns are " + tableCols.toString() + ")";
   throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(msg));
  }
 }
}

代码示例来源:origin: apache/drill

private void validateSpecifiedColumnNames(List<String> specifiedCols)
  throws SemanticException {
 List<String> tableCols = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
 for(String sc : specifiedCols) {
  if (!tableCols.contains(sc.toLowerCase())) {
   String msg = "'" + sc + "' (possible columns are " + tableCols.toString() + ")";
   throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(msg));
  }
 }
}

代码示例来源:origin: apache/hive

/**
 * Generate the statement of SELECT compute_stats(col1) compute_stats(col2),...,
 * similar to the one generated from ANALYZE TABLE t1 COMPUTE STATISTICS FOR COLUMNS,
 * but t1 is replaced by a TABLE(VALUES(cast(null as int),cast(null as string))) AS t1(col1,col2).
 *
 * We use TABLE-VALUES statement for computing stats for CTAS statement because in those cases
 * the table has not been created yet. Once the plan for the SELECT statement is generated,
 * we connect it to the existing CTAS plan as we do for INSERT or INSERT OVERWRITE.
 */
public void insertTableValuesAnalyzePipeline() throws SemanticException {
 // Instead of starting from analyze statement, we just generate the Select plan
 boolean isPartitionStats = conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned();
 if (isPartitionStats) {
  partSpec = new HashMap<>();
  List<String> partKeys = Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys());
  for (String partKey : partKeys) {
   partSpec.put(partKey, null);
  }
 }
 String command = ColumnStatsSemanticAnalyzer.genRewrittenQuery(
   tbl, Utilities.getColumnNamesFromFieldSchema(tbl.getCols()), conf, partSpec, isPartitionStats, true);
 insertAnalyzePipeline(command, true);
}

代码示例来源:origin: apache/hive

List<String> partKeys = Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys());
for (String partKey : partKeys) {
 if (!partSpec.containsKey(partKey)) {

代码示例来源:origin: apache/drill

List<String> partKeys = Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys());
for (String partKey : partKeys){
 if(!partSpec.containsKey(partKey)) {

代码示例来源:origin: apache/drill

private List<String> getColumnName(ASTNode tree) throws SemanticException{
 switch (tree.getChildCount()) {
  case 2:
   return Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
  case 3:
   int numCols = tree.getChild(2).getChildCount();
   List<String> colName = new LinkedList<String>();
   for (int i = 0; i < numCols; i++) {
    colName.add(i, new String(getUnescapedName((ASTNode) tree.getChild(2).getChild(i))));
   }
   return colName;
  default:
   throw new SemanticException("Internal error. Expected number of children of ASTNode to be"
     + " either 2 or 3. Found : " + tree.getChildCount());
 }
}

代码示例来源:origin: apache/hive

private List<String> getColumnName(ASTNode tree) throws SemanticException {
 switch (tree.getChildCount()) {
 case 2:
  return Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
 case 3:
  int numCols = tree.getChild(2).getChildCount();
  List<String> colName = new ArrayList<String>(numCols);
  for (int i = 0; i < numCols; i++) {
   colName.add(getUnescapedName((ASTNode) tree.getChild(2).getChild(i)));
  }
  return colName;
 default:
  throw new SemanticException("Internal error. Expected number of children of ASTNode to be"
    + " either 2 or 3. Found : " + tree.getChildCount());
 }
}

代码示例来源:origin: apache/hive

List<FieldSchema> cols = t.getCols();
if (cols != null && !cols.isEmpty()) {
 colNames = Utilities.getColumnNamesFromFieldSchema(cols);

代码示例来源:origin: apache/drill

List<FieldSchema> cols = t.getCols();
if (cols != null && !cols.isEmpty()) {
 colNames = Utilities.getColumnNamesFromFieldSchema(cols);

代码示例来源:origin: apache/hive

static AnalyzeRewriteContext genAnalyzeRewriteContext(HiveConf conf, Table tbl) {
 AnalyzeRewriteContext analyzeRewrite = new AnalyzeRewriteContext();
 analyzeRewrite.setTableName(tbl.getFullyQualifiedName());
 analyzeRewrite.setTblLvl(!(conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()));
 List<String> colNames = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
 List<String> colTypes = getColumnTypes(tbl, colNames);
 analyzeRewrite.setColName(colNames);
 analyzeRewrite.setColType(colTypes);
 return analyzeRewrite;
}

代码示例来源:origin: apache/drill

StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
  .getCols());
if (!alterTbl.isTurnOffSorting()) {

代码示例来源:origin: apache/hive

StorageDescriptor sd = retrieveStorageDescriptor(tbl, part);
List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
  .getCols());
if (!alterTbl.isTurnOffSorting()) {

代码示例来源:origin: apache/hive

Map<String, String> partSpec = null;
checkForPartitionColumns(colNames,
  Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
validateSpecifiedColumnNames(colNames);
if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) {

代码示例来源:origin: apache/drill

Map<String, String> partSpec = null;
checkForPartitionColumns(colNames,
  Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
validateSpecifiedColumnNames(colNames);
if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) {

代码示例来源:origin: apache/drill

Map<String,String> partSpec = null;
checkForPartitionColumns(
  colNames, Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
validateSpecifiedColumnNames(colNames);
if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) {

代码示例来源:origin: apache/hive

Map<String, String> partSpec = null;
checkForPartitionColumns(
  colNames, Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
validateSpecifiedColumnNames(colNames);
if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) {

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private void validateSpecifiedColumnNames(List<String> specifiedCols)
  throws SemanticException {
 List<String> tableCols = Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
 for(String sc : specifiedCols) {
  if (!tableCols.contains(sc.toLowerCase())) {
   String msg = "'" + sc + "' (possible columns are " + tableCols.toString() + ")";
   throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(msg));
  }
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private void handlePartialPartitionSpec(Map<String,String> partSpec) throws
 SemanticException {
 // If user has fully specified partition, validate that partition exists
 int partValsSpecified = 0;
 for (String partKey : partSpec.keySet()) {
  partValsSpecified += partSpec.get(partKey) == null ? 0 : 1;
 }
 try {
  if ((partValsSpecified == tbl.getPartitionKeys().size()) && (db.getPartition(tbl, partSpec, false, null, false) == null)) {
   throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec);
  }
 } catch (HiveException he) {
  throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PARTITION.getMsg() + " : " + partSpec);
 }
 // User might have only specified partial list of partition keys, in which case add other partition keys in partSpec
 List<String> partKeys = Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys());
 for (String partKey : partKeys){
  if(!partSpec.containsKey(partKey)) {
   partSpec.put(partKey, null);
  }
 }
 // Check if user have erroneously specified non-existent partitioning columns
 for (String partKey : partSpec.keySet()) {
  if(!partKeys.contains(partKey)){
   throw new SemanticException(ErrorMsg.COLUMNSTATSCOLLECTOR_INVALID_PART_KEY.getMsg() + " : " + partKey);
  }
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private List<String> getColumnName(ASTNode tree) throws SemanticException{
 switch (tree.getChildCount()) {
  case 2:
   return Utilities.getColumnNamesFromFieldSchema(tbl.getCols());
  case 3:
   int numCols = tree.getChild(2).getChildCount();
   List<String> colName = new LinkedList<String>();
   for (int i = 0; i < numCols; i++) {
    colName.add(i, new String(getUnescapedName((ASTNode) tree.getChild(2).getChild(i))));
   }
   return colName;
  default:
   throw new SemanticException("Internal error. Expected number of children of ASTNode to be"
     + " either 2 or 3. Found : " + tree.getChildCount());
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

Map<String,String> partSpec = null;
checkForPartitionColumns(
  colNames, Utilities.getColumnNamesFromFieldSchema(tbl.getPartitionKeys()));
validateSpecifiedColumnNames(colNames);
if (conf.getBoolVar(ConfVars.HIVE_STATS_COLLECT_PART_LEVEL_STATS) && tbl.isPartitioned()) {

相关文章

微信公众号

最新文章

更多

Utilities类方法