本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.getAllCols()
方法的一些代码示例,展示了Table.getAllCols()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.getAllCols()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:getAllCols
[英]Returns a list of all the columns of the table (data columns + partition columns in that order.
[中]返回表中所有列(数据列+分区列)的列表。
代码示例来源:origin: apache/hive
private static void extractColumnInfos(Table table, List<String> colNames, List<String> colTypes) {
for (FieldSchema col : table.getAllCols()) {
colNames.add(col.getName());
colTypes.add(col.getType());
}
}
代码示例来源:origin: apache/hive
private String replaceDefaultKeywordForMerge(String valueClause, Table table, ASTNode columnListNode)
throws SemanticException {
if (!valueClause.toLowerCase().contains("`default`")) {
return valueClause;
}
Map<String, String> colNameToDefaultConstraint = getColNameToDefaultValueMap(table);
String[] values = valueClause.trim().split(",");
String[] replacedValues = new String[values.length];
// the list of the column names may be set in the query
String[] columnNames = columnListNode == null ?
table.getAllCols().stream().map(f -> f.getName()).toArray(size -> new String[size]) :
columnListNode.getChildren().stream().map(n -> ((ASTNode)n).toString()).toArray(size -> new String[size]);
for (int i = 0; i < values.length; i++) {
if (values[i].trim().toLowerCase().equals("`default`")) {
replacedValues[i] = MapUtils.getString(colNameToDefaultConstraint, columnNames[i], "null");
} else {
replacedValues[i] = values[i];
}
}
return StringUtils.join(replacedValues, ',');
}
代码示例来源:origin: apache/hive
for (pkPos = 0; pkPos < parentTab.getAllCols().size(); pkPos++) {
String pkColName = parentTab.getAllCols().get(pkPos).getName();
if (pkColName.equals(fkCol.parentColName)) {
break;
|| pkPos == parentTab.getAllCols().size()) {
LOG.error("Column for foreign key definition " + fkCol + " not found");
return ImmutableList.of();
代码示例来源:origin: apache/hive
/**
* Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for
* {@link org.apache.calcite.rel.logical.LogicalProject}.
*/
public TrimResult trimFields(Project project, ImmutableBitSet fieldsUsed,
Set<RelDataTypeField> extraFields) {
// set columnAccessInfo for ViewColumnAuthorization
for (Ord<RexNode> ord : Ord.zip(project.getProjects())) {
if (fieldsUsed.get(ord.i)) {
if (this.columnAccessInfo != null && this.viewProjectToTableSchema != null
&& this.viewProjectToTableSchema.containsKey(project)) {
Table tab = this.viewProjectToTableSchema.get(project);
this.columnAccessInfo.add(tab.getCompleteName(), tab.getAllCols().get(ord.i).getName());
}
}
}
return super.trimFields(project, fieldsUsed, extraFields);
}
代码示例来源:origin: apache/hive
Set<String> constantCols = new HashSet<String>();
Table table = tableScanOp.getConf().getTableMetadata();
for (FieldSchema col : table.getAllCols()) {
tableColsMapping.put(col.getName(), col.getName());
代码示例来源:origin: apache/drill
Set<String> constantCols = new HashSet<String>();
Table table = tableScanOp.getConf().getTableMetadata();
for (FieldSchema col : table.getAllCols()) {
tableColsMapping.put(col.getName(), col.getName());
代码示例来源:origin: apache/drill
for (FieldSchema col : table.getAllCols()) {
colNames.add(col.getName());
colTypes.add(col.getType());
代码示例来源:origin: apache/hive
tempTableObj.setFields(table.getAllCols());
代码示例来源:origin: apache/hive
List<FieldSchema> cols = t.getAllCols();
Map<String, FieldSchema> fieldSchemaMap = new HashMap<String, FieldSchema>();
for(FieldSchema col : cols) {
代码示例来源:origin: apache/drill
List<FieldSchema> cols = t.getAllCols();
Map<String, FieldSchema> fieldSchemaMap = new HashMap<String, FieldSchema>();
for(FieldSchema col : cols) {
代码示例来源:origin: apache/drill
validatePartitionValues(partSpecs);
boolean sameColumns = MetaStoreUtils.compareFieldColumns(
destTable.getAllCols(), sourceTable.getAllCols());
boolean samePartitions = MetaStoreUtils.compareFieldColumns(
destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
代码示例来源:origin: apache/hive
validatePartitionValues(partSpecs);
boolean sameColumns = MetaStoreUtils.compareFieldColumns(
destTable.getAllCols(), sourceTable.getAllCols());
boolean samePartitions = MetaStoreUtils.compareFieldColumns(
destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
代码示例来源:origin: apache/hive
if (table.isMaterializedView()) {
this.createViewDesc = new CreateViewDesc(dbDotView,
table.getAllCols(),
} else {
this.createViewDesc = new CreateViewDesc(dbDotView,
table.getAllCols(),
代码示例来源:origin: apache/drill
if (table.isMaterializedView()) {
this.createViewDesc = new CreateViewDesc(dbDotView,
table.getAllCols(),
} else {
this.createViewDesc = new CreateViewDesc(dbDotView,
table.getAllCols(),
代码示例来源:origin: apache/incubator-atlas
private String getCreateTableString(Table table, String location){
String colString = "";
List<FieldSchema> colList = table.getAllCols();
if ( colList != null) {
for (FieldSchema col : colList) {
colString += col.getName() + " " + col.getType() + ",";
}
if (colList.size() > 0) {
colString = colString.substring(0, colString.length() - 1);
colString = "(" + colString + ")";
}
}
String query = "create external table " + table.getTableName() + colString +
" location '" + location + "'";
return query;
}
代码示例来源:origin: apache/lens
Hive metastoreClient = Hive.get(conf);
Table tbl = (db == null) ? metastoreClient.getTable(inputTable) : metastoreClient.getTable(db, inputTable);
columns = tbl.getAllCols();
columnNameToFieldSchema = new HashMap<String, FieldSchema>();
代码示例来源:origin: apache/lens
List<FieldSchema> allCols = tbl.getAllCols();
int f = 0;
for (int i = 0; i < tbl.getAllCols().size(); i++) {
String colName = allCols.get(i).getName();
if (features.contains(colName)) {
代码示例来源:origin: apache/incubator-atlas
List<FieldSchema> oldColList = oldTable.getAllCols();
Table outputTbl = event.getOutputs().iterator().next().getTable();
outputTbl = dgiBridge.hiveClient.getTable(outputTbl.getDbName(), outputTbl.getTableName());
List<FieldSchema> newColList = outputTbl.getAllCols();
assert oldColList.size() == newColList.size();
代码示例来源:origin: com.facebook.presto.hive/hive-apache
List<FieldSchema> cols = t.getAllCols();
Map<String, FieldSchema> fieldSchemaMap = new HashMap<String, FieldSchema>();
for(FieldSchema col : cols) {
代码示例来源:origin: com.facebook.presto.hive/hive-apache
validatePartitionValues(partSpecs);
boolean sameColumns = MetaStoreUtils.compareFieldColumns(
destTable.getAllCols(), sourceTable.getAllCols());
boolean samePartitions = MetaStoreUtils.compareFieldColumns(
destTable.getPartitionKeys(), sourceTable.getPartitionKeys());
内容来源于网络,如有侵权,请联系作者删除!