org.apache.hadoop.hive.ql.metadata.Table.getSortCols()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(12.0k)|赞(0)|评价(0)|浏览(82)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getSortCols()方法的一些代码示例,展示了Table.getSortCols()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getSortCols()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getSortCols

Table.getSortCols介绍

暂无

代码示例

代码示例来源:origin: apache/hive

private ArrayList<Integer> getSortOrders(String dest, QB qb, Table tab, Operator input)
  throws SemanticException {
 List<Order> tabSortCols = tab.getSortCols();
 List<FieldSchema> tabCols = tab.getCols();
 ArrayList<Integer> orders = new ArrayList<Integer>();
 for (Order sortCol : tabSortCols) {
  for (FieldSchema tabCol : tabCols) {
   if (sortCol.getCol().equals(tabCol.getName())) {
    orders.add(sortCol.getOrder());
    break;
   }
  }
 }
 return orders;
}

代码示例来源:origin: apache/hive

private ArrayList<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc,
                      Operator input, boolean convert)
  throws SemanticException {
 List<Order> tabSortCols = tab.getSortCols();
 List<FieldSchema> tabCols = tab.getCols();
 // Partition by the bucketing column
 List<Integer> posns = new ArrayList<Integer>();
 for (Order sortCol : tabSortCols) {
  int pos = 0;
  for (FieldSchema tabCol : tabCols) {
   if (sortCol.getCol().equals(tabCol.getName())) {
    posns.add(pos);
    break;
   }
   pos++;
  }
 }
 return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
}

代码示例来源:origin: apache/drill

private ArrayList<Integer> getSortOrders(String dest, QB qb, Table tab, Operator input)
  throws SemanticException {
 List<Order> tabSortCols = tab.getSortCols();
 List<FieldSchema> tabCols = tab.getCols();
 ArrayList<Integer> orders = new ArrayList<Integer>();
 for (Order sortCol : tabSortCols) {
  for (FieldSchema tabCol : tabCols) {
   if (sortCol.getCol().equals(tabCol.getName())) {
    orders.add(sortCol.getOrder());
    break;
   }
  }
 }
 return orders;
}

代码示例来源:origin: apache/drill

private ArrayList<ExprNodeDesc> getSortCols(String dest, QB qb, Table tab, TableDesc table_desc,
  Operator input, boolean convert)
  throws SemanticException {
 List<Order> tabSortCols = tab.getSortCols();
 List<FieldSchema> tabCols = tab.getCols();
 // Partition by the bucketing column
 List<Integer> posns = new ArrayList<Integer>();
 for (Order sortCol : tabSortCols) {
  int pos = 0;
  for (FieldSchema tabCol : tabCols) {
   if (sortCol.getCol().equals(tabCol.getName())) {
    posns.add(pos);
    break;
   }
   pos++;
  }
 }
 return genConvertCol(dest, qb, tab, table_desc, input, posns, convert);
}

代码示例来源:origin: apache/hive

private boolean checkTable(Table table,
  List<Integer> bucketPositionsDest,
  List<Integer> sortPositionsDest,
  List<Integer> sortOrderDest,
  int numBucketsDest) {
 // The bucketing and sorting positions should exactly match
 int numBuckets = table.getNumBuckets();
 if (numBucketsDest != numBuckets) {
  return false;
 }
 List<Integer> tableBucketPositions =
   getBucketPositions(table.getBucketCols(), table.getCols());
 List<Integer> sortPositions =
   getSortPositions(table.getSortCols(), table.getCols());
 List<Integer> sortOrder =
   getSortOrder(table.getSortCols(), table.getCols());
 return bucketPositionsDest.equals(tableBucketPositions) &&
   sortPositionsDest.equals(sortPositions) &&
   sortOrderDest.equals(sortOrder);
}

代码示例来源:origin: apache/hive

@Override
public List<RelCollation> getCollationList() {
 ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>();
 for (Order sortColumn : this.hiveTblMetadata.getSortCols()) {
  for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
   FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
   if (field.getName().equals(sortColumn.getCol())) {
    Direction direction;
    NullDirection nullDirection;
    if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
     direction = Direction.ASCENDING;
     nullDirection = NullDirection.FIRST;
    } else {
     direction = Direction.DESCENDING;
     nullDirection = NullDirection.LAST;
    }
    collationList.add(new RelFieldCollation(i, direction, nullDirection));
    break;
   }
  }
 }
 return new ImmutableList.Builder<RelCollation>()
     .add(RelCollationTraitDef.INSTANCE.canonize(
         new HiveRelCollation(collationList.build())))
     .build();
}

代码示例来源:origin: apache/drill

private boolean checkTable(Table table,
  List<Integer> bucketPositionsDest,
  List<Integer> sortPositionsDest,
  List<Integer> sortOrderDest,
  int numBucketsDest) {
 // The bucketing and sorting positions should exactly match
 int numBuckets = table.getNumBuckets();
 if (numBucketsDest != numBuckets) {
  return false;
 }
 List<Integer> tableBucketPositions =
   getBucketPositions(table.getBucketCols(), table.getCols());
 List<Integer> sortPositions =
   getSortPositions(table.getSortCols(), table.getCols());
 List<Integer> sortOrder =
   getSortOrder(table.getSortCols(), table.getCols());
 return bucketPositionsDest.equals(tableBucketPositions) &&
   sortPositionsDest.equals(sortPositions) &&
   sortOrderDest.equals(sortOrder);
}

代码示例来源:origin: apache/hive

private void genPartnCols(String dest, Operator input, QB qb,
  TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
 boolean enforceBucketing = false;
 ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
 if ((dest_tab.getNumBuckets() > 0)) {
  enforceBucketing = true;
  if (updating(dest) || deleting(dest)) {
   partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
  } else {
   partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input,
     false);
  }
 }
 if ((dest_tab.getSortCols() != null) &&
   (dest_tab.getSortCols().size() > 0)) {
  if (!enforceBucketing) {
   throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
  }
  else {
   if(!enforceBucketing) {
    partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
   }
  }
  enforceBucketing = true;
 }
 if (enforceBucketing) {
  ctx.setPartnCols(partnColsNoConvert);
 }
}

代码示例来源:origin: apache/drill

private void checkAcidConstraints(QB qb, TableDesc tableDesc,
                 Table table) throws SemanticException {
 String tableName = tableDesc.getTableName();
 if (!qb.getParseInfo().isInsertIntoTable(tableName)) {
  LOG.debug("Couldn't find table " + tableName + " in insertIntoTable");
  throw new SemanticException(ErrorMsg.NO_INSERT_OVERWRITE_WITH_ACID.getMsg());
 }
 /*
 LOG.info("Modifying config values for ACID write");
 conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true);
 conf.setIntVar(ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER, 1);
 These props are now enabled elsewhere (see commit diffs).  It would be better instead to throw
 if they are not set.  For exmaple, if user has set hive.optimize.reducededuplication=false for
 some reason, we'll run a query contrary to what they wanted...  But throwing now would be
 backwards incompatible.
 */
 conf.set(AcidUtils.CONF_ACID_KEY, "true");
 if (table.getNumBuckets() < 1) {
  throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, table.getTableName());
 }
 if (table.getSortCols() != null && table.getSortCols().size() > 0) {
  throw new SemanticException(ErrorMsg.ACID_NO_SORTED_BUCKETS, table.getTableName());
 }
}

代码示例来源:origin: apache/hive

sortColumnsFirstTable.addAll(tbl.getSortCols());
return checkSortColsAndJoinCols(tbl.getSortCols(),
 joinCols,
 sortColumnsFirstTable);

代码示例来源:origin: apache/drill

private void genPartnCols(String dest, Operator input, QB qb,
  TableDesc table_desc, Table dest_tab, SortBucketRSCtx ctx) throws SemanticException {
 boolean enforceBucketing = false;
 ArrayList<ExprNodeDesc> partnColsNoConvert = new ArrayList<ExprNodeDesc>();
 if ((dest_tab.getNumBuckets() > 0)) {
  enforceBucketing = true;
  if (updating(dest) || deleting(dest)) {
   partnColsNoConvert = getPartitionColsFromBucketColsForUpdateDelete(input, false);
  } else {
   partnColsNoConvert = getPartitionColsFromBucketCols(dest, qb, dest_tab, table_desc, input,
     false);
  }
 }
 if ((dest_tab.getSortCols() != null) &&
   (dest_tab.getSortCols().size() > 0)) {
  if (!enforceBucketing && !dest_tab.isIndexTable()) {
   throw new SemanticException(ErrorMsg.TBL_SORTED_NOT_BUCKETED.getErrorCodedMsg(dest_tab.getCompleteName()));
  }
  else {
   if(!enforceBucketing) {
    partnColsNoConvert = getSortCols(dest, qb, dest_tab, table_desc, input, false);
   }
  }
  enforceBucketing = true;
 }
 if (enforceBucketing) {
  ctx.setPartnCols(partnColsNoConvert);
 }
}

代码示例来源:origin: apache/hive

numBuckets = table.getNumBuckets();
List<String> sortCols = new ArrayList<String>();
for (Order colSortOrder : table.getSortCols()) {
 sortCols.add(colSortOrder.getCol());

代码示例来源:origin: apache/drill

numBuckets = table.getNumBuckets();
List<String> sortCols = new ArrayList<String>();
for (Order colSortOrder : table.getSortCols()) {
 sortCols.add(colSortOrder.getCol());

代码示例来源:origin: apache/hive

List<String> sortCols = Utilities.getColumnNamesFromSortCols(table.getSortCols());
List<String> bucketCols = table.getBucketCols();
return matchBucketSortCols(groupByCols, bucketCols, sortCols);

代码示例来源:origin: apache/hive

if ((dest_tab.getSortCols() != null) &&
  (dest_tab.getSortCols().size() > 0)) {
 sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
 sortOrders = getSortOrders(dest, qb, dest_tab, input);

代码示例来源:origin: apache/drill

if ((dest_tab.getSortCols() != null) &&
  (dest_tab.getSortCols().size() > 0)) {
 sortCols = getSortCols(dest, qb, dest_tab, table_desc, input, true);
 sortOrders = getSortOrders(dest, qb, dest_tab, input);

代码示例来源:origin: apache/hive

if (!destTable.getSortCols().isEmpty()) {
 sortPositions = getSortPositions(destTable.getSortCols(), destTable.getCols());
 sortOrder = getSortOrders(destTable.getSortCols(), destTable.getCols());
} else {

代码示例来源:origin: apache/drill

@Override
public List<RelCollation> getCollationList() {
 ImmutableList.Builder<RelFieldCollation> collationList = new ImmutableList.Builder<RelFieldCollation>();
 for (Order sortColumn : this.hiveTblMetadata.getSortCols()) {
  for (int i=0; i<this.hiveTblMetadata.getSd().getCols().size(); i++) {
   FieldSchema field = this.hiveTblMetadata.getSd().getCols().get(i);
   if (field.getName().equals(sortColumn.getCol())) {
    Direction direction;
    NullDirection nullDirection;
    if (sortColumn.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_ASC) {
     direction = Direction.ASCENDING;
     nullDirection = NullDirection.FIRST;
    }
    else {
     direction = Direction.DESCENDING;
     nullDirection = NullDirection.LAST;
    }
    collationList.add(new RelFieldCollation(i,direction,nullDirection));
    break;
   }
  }
 }
 return new ImmutableList.Builder<RelCollation>()
     .add(RelCollationTraitDef.INSTANCE.canonize(
         new HiveRelCollation(collationList.build())))
     .build();
}

代码示例来源:origin: apache/hive

private void alterPartitionSpecInMemory(Table tbl,
  Map<String, String> partSpec,
  org.apache.hadoop.hive.metastore.api.Partition tpart,
  boolean inheritTableSpecs,
  String partPath) throws HiveException, InvalidOperationException {
 LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
   + partSpec);
 if (inheritTableSpecs) {
  tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
  tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
  tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
  tpart.getSd().getSerdeInfo().setParameters(
    tbl.getTTable().getSd().getSerdeInfo().getParameters());
  tpart.getSd().setBucketCols(tbl.getBucketCols());
  tpart.getSd().setNumBuckets(tbl.getNumBuckets());
  tpart.getSd().setSortCols(tbl.getSortCols());
 }
 if (partPath == null || partPath.trim().equals("")) {
  throw new HiveException("new partition path should not be null or empty.");
 }
 tpart.getSd().setLocation(partPath);
}

代码示例来源:origin: apache/drill

private void alterPartitionSpecInMemory(Table tbl,
  Map<String, String> partSpec,
  org.apache.hadoop.hive.metastore.api.Partition tpart,
  boolean inheritTableSpecs,
  String partPath) throws HiveException, InvalidOperationException {
 LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : "
   + partSpec);
 if (inheritTableSpecs) {
  tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat());
  tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat());
  tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib());
  tpart.getSd().getSerdeInfo().setParameters(
    tbl.getTTable().getSd().getSerdeInfo().getParameters());
  tpart.getSd().setBucketCols(tbl.getBucketCols());
  tpart.getSd().setNumBuckets(tbl.getNumBuckets());
  tpart.getSd().setSortCols(tbl.getSortCols());
 }
 if (partPath == null || partPath.trim().equals("")) {
  throw new HiveException("new partition path should not be null or empty.");
 }
 tpart.getSd().setLocation(partPath);
}

相关文章

微信公众号

最新文章

更多

Table类方法