org.apache.hadoop.hive.ql.metadata.Partition.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(10.9k)|赞(0)|评价(0)|浏览(95)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Partition.<init>()方法的一些代码示例,展示了Partition.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.<init>()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Partition
类名称:Partition
方法名:<init>

Partition.<init>介绍

[英]Used only for serialization.
[中]仅用于序列化。

代码示例

代码示例来源:origin: apache/hive

private static List<Partition> convertFromMetastore(Table tbl,
  List<org.apache.hadoop.hive.metastore.api.Partition> partitions) throws HiveException {
 if (partitions == null) {
  return new ArrayList<Partition>();
 }
 List<Partition> results = new ArrayList<Partition>(partitions.size());
 for (org.apache.hadoop.hive.metastore.api.Partition tPart : partitions) {
  results.add(new Partition(tbl, tPart));
 }
 return results;
}

代码示例来源:origin: apache/drill

@Nullable
 @Override
 public Partition apply(@Nullable org.apache.hadoop.hive.metastore.api.Partition input) {
  if (input == null) {
   return null;
  }
  try {
   return new Partition(qlMdTable, input);
  } catch (HiveException e) {
   throw new IllegalArgumentException(e);
  }
 }
}

代码示例来源:origin: apache/drill

private static List<Partition> convertFromMetastore(Table tbl,
  List<org.apache.hadoop.hive.metastore.api.Partition> partitions) throws HiveException {
 if (partitions == null) {
  return new ArrayList<Partition>();
 }
 List<Partition> results = new ArrayList<Partition>(partitions.size());
 for (org.apache.hadoop.hive.metastore.api.Partition tPart : partitions) {
  results.add(new Partition(tbl, tPart));
 }
 return results;
}

代码示例来源:origin: apache/hive

private org.apache.hadoop.hive.ql.metadata.Partition partitionObject(
    org.apache.hadoop.hive.ql.metadata.Table qlMdTable, InsertMessage insertMsg) throws Exception {
 return new org.apache.hadoop.hive.ql.metadata.Partition(qlMdTable, insertMsg.getPtnObj());
}

代码示例来源:origin: apache/hive

@Override
public Object getOutput() throws HiveException {
 return new Partition(table, partition.getTPartition());
}

代码示例来源:origin: apache/hive

void updatePartitionLocation(String dbName, Table table, String partName,
  Partition part, Path newLocation) throws HiveException, TException {
 String msg = String.format("ALTER TABLE %s PARTITION (%s) SET LOCATION '%s'",
   getQualifiedName(table), partName, newLocation.toString());
 LOG.info(msg);
 org.apache.hadoop.hive.ql.metadata.Partition modifiedPart =
   new org.apache.hadoop.hive.ql.metadata.Partition(
     new org.apache.hadoop.hive.ql.metadata.Table(table),
     part);
 modifiedPart.setLocation(newLocation.toString());
 alterPartitionInternal(table, modifiedPart);
}

代码示例来源:origin: apache/hive

private WriteEntity addPartitionOutput(Table t, WriteEntity.WriteType writeType)
  throws Exception {
 Map<String, String> partSpec = new HashMap<String, String>();
 partSpec.put("version", Integer.toString(nextInput++));
 Partition p = new Partition(t, partSpec, new Path("/dev/null"));
 WriteEntity we = new WriteEntity(p, writeType);
 writeEntities.add(we);
 return we;
}

代码示例来源:origin: apache/hive

private void addPartitionInput(Table t) throws Exception {
 Map<String, String> partSpec = new HashMap<String, String>();
 partSpec.put("version", Integer.toString(nextInput++));
 Partition p = new Partition(t, partSpec, new Path("/dev/null"));
 ReadEntity re = new ReadEntity(p);
 readEntities.add(re);
}

代码示例来源:origin: apache/incubator-gobblin

private static org.apache.hadoop.hive.ql.metadata.Partition getQlPartition(final Table table, final Partition partition) {
 try {
  return new org.apache.hadoop.hive.ql.metadata.Partition(new org.apache.hadoop.hive.ql.metadata.Table(table), partition);
 } catch (HiveException e) {
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/drill

private void alterPartitionSpec(Table tbl,
                Map<String, String> partSpec,
                org.apache.hadoop.hive.metastore.api.Partition tpart,
                boolean inheritTableSpecs,
                String partPath) throws HiveException, InvalidOperationException {
 alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath);
 String fullName = tbl.getTableName();
 if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
  fullName = tbl.getDbName() + "." + tbl.getTableName();
 }
 alterPartition(fullName, new Partition(tbl, tpart), null);
}

代码示例来源:origin: apache/hive

private void alterPartitionSpec(Table tbl,
                Map<String, String> partSpec,
                org.apache.hadoop.hive.metastore.api.Partition tpart,
                boolean inheritTableSpecs,
                String partPath) throws HiveException, InvalidOperationException {
 alterPartitionSpecInMemory(tbl, partSpec, tpart, inheritTableSpecs, partPath);
 String fullName = tbl.getTableName();
 if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) {
  fullName = tbl.getFullyQualifiedName();
 }
 alterPartition(tbl.getCatalogName(), tbl.getDbName(), tbl.getTableName(),
   new Partition(tbl, tpart), null, true);
}

代码示例来源:origin: apache/incubator-gobblin

public org.apache.hadoop.hive.ql.metadata.Partition createDummyPartition(long createTime) {
  org.apache.hadoop.hive.ql.metadata.Partition partition = new org.apache.hadoop.hive.ql.metadata.Partition();
  Partition tPartition = new Partition();
  tPartition.setCreateTime((int) TimeUnit.SECONDS.convert(createTime, TimeUnit.MILLISECONDS));
  partition.setTPartition(tPartition);

  return partition;
 }
}

代码示例来源:origin: apache/incubator-gobblin

@Test
public void testDefaults() throws Exception {
 DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
 String tableName = "VfTb1";
 Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition"));
 org.apache.hadoop.hive.metastore.api.Partition tp =
   this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20"), (int) System.currentTimeMillis());
 Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
 assertThat(partition.getName(), is("datepartition=2016-01-01-20"));
 TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
 Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}

代码示例来源:origin: apache/incubator-gobblin

@Test
 public void testMultiplePartitionFields() throws Exception {
  DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, ConfigFactory.empty());
  String tableName = "VfTb3";

  Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("datepartition", "field1"));
  org.apache.hadoop.hive.metastore.api.Partition tp =
    this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016-01-01-20", "f1"), (int) System.currentTimeMillis());
  Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);

  assertThat(partition.getName(), anyOf(is("field1=f1/datepartition=2016-01-01-20"), is("datepartition=2016-01-01-20/field1=f1")));
  TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
  Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));

 }
}

代码示例来源:origin: apache/incubator-gobblin

private HiveProcessingEntity getConversionEntity(HiveWorkUnit hiveWorkUnit) throws IOException, TException,
                                          HiveException {
 try (AutoReturnableObject<IMetaStoreClient> client = this.pool.getClient()) {
  HiveDataset dataset = hiveWorkUnit.getHiveDataset();
  HiveDatasetFinder.DbAndTable dbAndTable = dataset.getDbAndTable();
  Table table = new Table(client.get().getTable(dbAndTable.getDb(), dbAndTable.getTable()));
  Partition partition = null;
  if (hiveWorkUnit.getPartitionName().isPresent()) {
   partition = new Partition(table, client.get()
     .getPartition(dbAndTable.getDb(), dbAndTable.getTable(), hiveWorkUnit.getPartitionName().get()));
  }
  return new HiveProcessingEntity(dataset, table, Optional.fromNullable(partition));
 }
}

代码示例来源:origin: apache/incubator-gobblin

@Test
public void testUserDefinedDatePattern() throws Exception {
 String tableName = "VfTb2";
 Config conf =
   ConfigFactory.parseMap(ImmutableMap.<String, String> of(DatePartitionHiveVersionFinder.PARTITION_KEY_NAME_KEY, "field1",
     DatePartitionHiveVersionFinder.PARTITION_VALUE_DATE_TIME_PATTERN_KEY, "yyyy/MM/dd/HH"));
 DatePartitionHiveVersionFinder versionFinder = new DatePartitionHiveVersionFinder(this.fs, conf);
 Table tbl = this.hiveMetastoreTestUtils.createTestAvroTable(dbName, tableName, ImmutableList.of("field1"));
 org.apache.hadoop.hive.metastore.api.Partition tp =
   this.hiveMetastoreTestUtils.addTestPartition(tbl, ImmutableList.of("2016/01/01/20"), (int) System.currentTimeMillis());
 Partition partition = new Partition(new org.apache.hadoop.hive.ql.metadata.Table(tbl), tp);
 Assert.assertEquals(URLDecoder.decode(partition.getName(), "UTF-8"), "field1=2016/01/01/20");
 TimestampedHiveDatasetVersion dv = versionFinder.getDatasetVersion(partition);
 Assert.assertEquals(dv.getDateTime(), formatter.parseDateTime("2016/01/01/20"));
}

代码示例来源:origin: apache/incubator-gobblin

private static Partition localTestPartition(Table table, List<String> values) throws Exception {
  return new Partition(table,
    LocalHiveMetastoreTestUtils.getInstance().addTestPartition(table.getTTable(), values, 0));
 }
}

代码示例来源:origin: apache/hive

@Before
public void setup() throws Exception {
 queryState = new QueryState.Builder().build();
 db = Mockito.mock(Hive.class);
 HiveConf hiveConf = queryState.getConf();
 table = new Table(DB, TABLE);
 partition = new Partition(table);
 hiveConf
 .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER,
   "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
 SessionState.start(hiveConf);
 Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table);
 Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table);
 Mockito.when(db.getPartition(table, new HashMap<String, String>(), false))
 .thenReturn(partition);
}

代码示例来源:origin: apache/incubator-gobblin

private Partition getTestPartition(Table table) throws HiveException {
  Partition partition = new Partition(table, ImmutableMap.of("partition_key", "1"), null);
  StorageDescriptor sd = new StorageDescriptor();
  sd.setSerdeInfo(new SerDeInfo("avro", AvroSerDe.class.getName(), null));
  sd.setCols(Lists.newArrayList(new FieldSchema("foo", "int", null)));
  partition.getTPartition().setSd(sd);
  return partition;
 }
}

代码示例来源:origin: apache/incubator-gobblin

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

相关文章

微信公众号

最新文章

更多