org.apache.hadoop.hive.ql.metadata.Table.setPartCols()方法的使用及代码示例

x33g5p2x  于2022-01-29 转载在 其他  
字(9.1k)|赞(0)|评价(0)|浏览(186)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Table.setPartCols()方法的一些代码示例,展示了Table.setPartCols()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Table.setPartCols()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Table
类名称:Table
方法名:setPartCols

Table.setPartCols介绍

暂无

代码示例

代码示例来源:origin: apache/hive

private Table newTable(boolean isPartitioned) {
  Table t = new Table("default", "table" + Integer.toString(nextInput++));
  if (isPartitioned) {
   FieldSchema fs = new FieldSchema();
   fs.setName("version");
   fs.setType("String");
   List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
   partCols.add(fs);
   t.setPartCols(partCols);
  }
  return t;
 }
}

代码示例来源:origin: apache/hive

private Table newTable(boolean isPartitioned) {
 Table t = new Table("default", "table" + Integer.toString(nextInput++));
 if (isPartitioned) {
  FieldSchema fs = new FieldSchema();
  fs.setName("version");
  fs.setType("String");
  List<FieldSchema> partCols = new ArrayList<FieldSchema>(1);
  partCols.add(fs);
  t.setPartCols(partCols);
 }
 Map<String, String> tblProps = t.getParameters();
 if(tblProps == null) {
  tblProps = new HashMap<>();
 }
 tblProps.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
 t.setParameters(tblProps);
 return t;
}

代码示例来源:origin: apache/hive

tempTableObj.setPartCols(new ArrayList<>());

代码示例来源:origin: apache/hive

partKeys.add(new FieldSchema(partName, serdeConstants.STRING_TYPE_NAME, ""));
table.setPartCols(partKeys);

代码示例来源:origin: apache/hive

@Test
public void testDataDeletion() throws HiveException,
 IOException, TException {
 Database db = new Database();
 db.setName(dbName);
 hive.createDatabase(db);
 Table table = new Table(dbName, tableName);
 table.setDbName(dbName);
 table.setInputFormatClass(TextInputFormat.class);
 table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
 table.setPartCols(partCols);
 hive.createTable(table);
 table = hive.getTable(dbName, tableName);
 Path fakeTable = table.getPath().getParent().suffix(
   Path.SEPARATOR + "faketable");
 fs = fakeTable.getFileSystem(hive.getConf());
 fs.mkdirs(fakeTable);
 fs.deleteOnExit(fakeTable);
 Path fakePart = new Path(table.getDataLocation().toString(),
   "fakepartition=fakevalue");
 fs.mkdirs(fakePart);
 fs.deleteOnExit(fakePart);
 hive.dropTable(dbName, tableName, true, true);
 assertFalse(fs.exists(fakePart));
 hive.dropDatabase(dbName);
 assertFalse(fs.exists(fakeTable));
}

代码示例来源:origin: apache/hive

tbl.setPartCols(oldtbl.getPartCols());

代码示例来源:origin: apache/drill

tbl.setPartCols(oldtbl.getPartCols());

代码示例来源:origin: apache/hive

tbl.setPartCols(getPartCols());

代码示例来源:origin: apache/drill

tbl.setPartCols(getPartCols());

代码示例来源:origin: apache/hive

tbl.setPartCols(getPartCols());

代码示例来源:origin: apache/hive

serdeConstants.STRING_TYPE_NAME,
  "partition column, date but in string format as date type is not yet supported in QL"));
tbl.setPartCols(partCols);

代码示例来源:origin: apache/hive

private Table createTestTable() throws HiveException, AlreadyExistsException {
 Database db = new Database();
 db.setName(dbName);
 hive.createDatabase(db, true);
 Table table = new Table(dbName, tableName);
 table.setDbName(dbName);
 table.setInputFormatClass(TextInputFormat.class);
 table.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class);
 table.setPartCols(partCols);
 hive.createTable(table);
 table = hive.getTable(dbName, tableName);
 Assert.assertTrue(table.getTTable().isSetId());
 table.getTTable().unsetId();
 for (Map<String, String> partSpec : parts) {
  hive.createPartition(table, partSpec);
 }
 return table;
}

代码示例来源:origin: apache/hive

oldview.getTTable().getParameters().putAll(crtView.getTblProps());
oldview.setPartCols(crtView.getPartCols());
if (crtView.getInputFormat() != null) {
 oldview.setInputFormatClass(crtView.getInputFormat());

代码示例来源:origin: apache/drill

oldview.getTTable().getParameters().putAll(crtView.getTblProps());
oldview.setPartCols(crtView.getPartCols());
if (crtView.getInputFormat() != null) {
 oldview.setInputFormatClass(crtView.getInputFormat());
tbl.setPartCols(crtView.getPartCols());

代码示例来源:origin: apache/lens

private Table getHiveTable() {
 Table t = new Table(LensConfConstants.DEFAULT_STATISTICS_DATABASE, EVENT_NAME);
 LinkedList<FieldSchema> partCols = new LinkedList<FieldSchema>();
 partCols.add(new FieldSchema("dt", "string", "partCol"));
 t.setPartCols(partCols);
 return t;
}

代码示例来源:origin: apache/lens

@Override
public Table getHiveTable(HiveConf conf) {
 Table table = new Table(conf.get(LensConfConstants.STATISTICS_DATABASE_KEY,
  LensConfConstants.DEFAULT_STATISTICS_DATABASE), this.getClass().getSimpleName());
 LinkedList<FieldSchema> colList = new LinkedList<FieldSchema>();
 colList.add(new FieldSchema("handle", "string", "Query Handle"));
 colList.add(new FieldSchema("userQuery", "string", "User Query before rewrite"));
 colList.add(new FieldSchema("submitter", "string", "submitter"));
 colList.add(new FieldSchema("clusterUser", "string", "Cluster User which will do all operations on hdfs"));
 colList.add(new FieldSchema("sessionId", "string", "Lens Session which ran the query"));
 colList.add(new FieldSchema("submissionTime", "bigint", "Time which query was submitted"));
 colList.add(new FieldSchema("startTime", "bigint", "Timestamp which query was Started"));
 colList.add(new FieldSchema("endTime", "bigint", "Timestamp which query was finished"));
 colList.add(new FieldSchema("result", "string", "path to result of query"));
 colList.add(new FieldSchema("cause", "string", "failure/eror cause if any"));
 colList.add(new FieldSchema("status", "map<string,string>", "status object of the query"));
 colList.add(new FieldSchema("driverStats", "map<string,string>", "driver statistics of the query"));
 table.setFields(colList);
 LinkedList<FieldSchema> partCols = new LinkedList<FieldSchema>();
 partCols.add(new FieldSchema("dt", "string", "partCol"));
 table.setPartCols(partCols);
 table.setSerializationLib(JSonSerde.class.getName());
 try {
  table.setInputFormatClass(TextInputFormat.class.getName());
 } catch (HiveException e) {
  log.error("Encountered hive exception.", e);
 }
 return table;
}

代码示例来源:origin: qubole/streamx

private Table constructAvroTable(String database, String tableName, Schema schema, Partitioner partitioner)
   throws HiveMetaStoreException {
  Table table = new Table(database, tableName);
  table.setTableType(TableType.EXTERNAL_TABLE);
  table.getParameters().put("EXTERNAL", "TRUE");
  String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
  table.setDataLocation(new Path(tablePath));
  table.setSerializationLib(avroSerde);
  try {
   table.setInputFormatClass(avroInputFormat);
   table.setOutputFormatClass(avroOutputFormat);
  } catch (HiveException e) {
   throw new HiveMetaStoreException("Cannot find input/output format:", e);
  }
  List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
  table.setFields(columns);
  table.setPartCols(partitioner.partitionFields());
  table.getParameters().put(AVRO_SCHEMA_LITERAL, avroData.fromConnectSchema(schema).toString());
  return table;
 }
}

代码示例来源:origin: apache/lens

/**
 * Creates the hive table.
 *
 * @param tableName the table name
 * @throws HiveException the hive exception
 */
public static void createHiveTable(String tableName, Map<String, String> parameters) throws HiveException {
 List<FieldSchema> columns = new ArrayList<FieldSchema>();
 columns.add(new FieldSchema("col1", "string", ""));
 List<FieldSchema> partCols = new ArrayList<FieldSchema>();
 partCols.add(new FieldSchema("pcol1", "string", ""));
 Map<String, String> params = new HashMap<String, String>();
 params.put("test.hive.table.prop", "tvalue");
 if (null != parameters && !parameters.isEmpty()) {
  params.putAll(parameters);
 }
 Table tbl = Hive.get().newTable(tableName);
 tbl.setTableType(TableType.MANAGED_TABLE);
 tbl.getTTable().getSd().setCols(columns);
 tbl.setPartCols(partCols);
 tbl.getTTable().getParameters().putAll(params);
 Hive.get().createTable(tbl);
}

代码示例来源:origin: qubole/streamx

private Table constructParquetTable(String database, String tableName, Schema schema, Partitioner partitioner) throws HiveMetaStoreException {
 Table table = new Table(database, tableName);
 table.setTableType(TableType.EXTERNAL_TABLE);
 table.getParameters().put("EXTERNAL", "TRUE");
 String tablePath = FileUtils.hiveDirectoryName(url, topicsDir, tableName);
 table.setDataLocation(new Path(tablePath));
 table.setSerializationLib(getHiveParquetSerde());
 try {
  table.setInputFormatClass(getHiveParquetInputFormat());
  table.setOutputFormatClass(getHiveParquetOutputFormat());
 } catch (HiveException e) {
  throw new HiveMetaStoreException("Cannot find input/output format:", e);
 }
 // convert copycat schema schema to Hive columns
 List<FieldSchema> columns = HiveSchemaConverter.convertSchema(schema);
 table.setFields(columns);
 table.setPartCols(partitioner.partitionFields());
 return table;
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

tbl.setPartCols(crtView.getPartCols());

相关文章

微信公众号

最新文章

更多

Table类方法