本文整理了Java中org.apache.hadoop.hive.metastore.api.Partition.getWriteId()
方法的一些代码示例,展示了Partition.getWriteId()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.getWriteId()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Partition
类名称:Partition
方法名:getWriteId
暂无
代码示例来源:origin: apache/hive
writeIdString == null ? -1 : part.getWriteId(), writeIdString);
代码示例来源:origin: apache/hive
updatePartitonColStatsInternal(tbl, newPart.getColStats(), null, newPart.getWriteId());
代码示例来源:origin: apache/hive
new AlterPartitionEvent(oldPart, newPart, tbl, false, true, newPart.getWriteId(), handler),
environmentContext);
代码示例来源:origin: apache/hive
EventType.ALTER_PARTITION,
new AlterPartitionEvent(oldPart, new_part, table, false,
true, new_part.getWriteId(), this),
envContext);
代码示例来源:origin: apache/hive
public Object getFieldValue(_Fields field) {
switch (field) {
case VALUES:
return getValues();
case DB_NAME:
return getDbName();
case TABLE_NAME:
return getTableName();
case CREATE_TIME:
return getCreateTime();
case LAST_ACCESS_TIME:
return getLastAccessTime();
case SD:
return getSd();
case PARAMETERS:
return getParameters();
case PRIVILEGES:
return getPrivileges();
case CAT_NAME:
return getCatName();
case WRITE_ID:
return getWriteId();
case IS_STATS_COMPLIANT:
return isIsStatsCompliant();
case COL_STATS:
return getColStats();
}
throw new IllegalStateException();
}
代码示例来源:origin: apache/hive
if (!isCurrentStatsValidForTheQuery(part, part.getWriteId(), writeIdList, false)) {
String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
LOG.debug("The current metastore transactional partition column statistics for " + dbName
代码示例来源:origin: apache/hive
private static void adjust(HiveMetaStoreClient client, Partition part,
String dbName, String tblName, boolean isThriftClient) throws TException {
Partition part_get = client.getPartition(dbName, tblName, part.getValues());
if (isThriftClient) {
part.setCreateTime(part_get.getCreateTime());
part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
}
part.setWriteId(part_get.getWriteId());
}
代码示例来源:origin: apache/hive
@Test
public void testAppendPartitionToExternalTable() throws Exception {
List<String> partitionValues = Lists.newArrayList("2017", "may");
Table table = externalTable;
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
Assert.assertNotNull(appendedPart);
Partition partition =
client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, partitionValues, "year=2017/month=may");
verifyPartitionNames(table, Lists.newArrayList("year=2017/month=may"));
}
代码示例来源:origin: apache/hive
EventMessage.EventType.ALTER_PARTITION,
new AlterPartitionEvent(oldPart, new_part, tbl, false,
true, new_part.getWriteId(), handler),
environmentContext);
try {
msdb.updatePartitionColumnStatistics(cs, new_part.getValues(),
validWriteIds, new_part.getWriteId());
} catch (InvalidInputException iie) {
throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
EventMessage.EventType.ALTER_PARTITION,
new AlterPartitionEvent(oldPart, new_part, tbl, false,
true, new_part.getWriteId(), handler),
environmentContext);
代码示例来源:origin: apache/hive
@Test
public void testAppendPartition() throws Exception {
List<String> partitionValues = Lists.newArrayList("2017", "may");
Table table = tableWithPartitions;
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
Assert.assertNotNull(appendedPart);
Partition partition =
client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, partitionValues, "year=2017/month=may");
verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
"year=2018/month=march", "year=2017/month=may"));
}
代码示例来源:origin: apache/hive
@Override
public Partition getPartition(String catName, String dbName, String tblName,
List<String> part_vals, String validWriteIds)
throws MetaException, NoSuchObjectException {
catName = normalizeIdentifier(catName);
dbName = StringUtils.normalizeIdentifier(dbName);
tblName = StringUtils.normalizeIdentifier(tblName);
if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && rawStore.isActiveTransaction())) {
return rawStore.getPartition(
catName, dbName, tblName, part_vals, validWriteIds);
}
Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
if (part == null) {
// The table containing the partition is not yet loaded in cache
return rawStore.getPartition(
catName, dbName, tblName, part_vals, validWriteIds);
}
if (validWriteIds != null) {
Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
if (table == null) {
// The table containing the partition is not yet loaded in cache
return rawStore.getPartition(
catName, dbName, tblName, part_vals, validWriteIds);
}
part.setParameters(adjustStatsParamsForGet(table.getParameters(),
part.getParameters(), part.getWriteId(), validWriteIds));
}
return part;
}
代码示例来源:origin: apache/hive
newPart.getWriteId(), validWriteIds, false);
if (errorMsg != null) {
throw new MetaException(errorMsg);
if (!areTxnStatsSupported) {
StatsSetupConst.setBasicStatsState(oldp.getParameters(), StatsSetupConst.FALSE);
} else if (validWriteIds != null && newPart.getWriteId() > 0) {
dbname + "." + name + "." + oldp.getPartitionName() + " will be made persistent.");
oldp.setWriteId(newPart.getWriteId());
代码示例来源:origin: apache/hive
@Test
public void testAppendPartToExternalTable() throws Exception {
Table table = externalTable;
String partitionName = "year=2017/month=may";
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
Assert.assertNotNull(appendedPart);
Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
getPartitionValues(partitionName));
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
verifyPartitionNames(table, Lists.newArrayList(partitionName));
}
代码示例来源:origin: apache/hive
@Test
public void testAppendPart() throws Exception {
Table table = tableWithPartitions;
String partitionName = "year=2017/month=may";
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
Assert.assertNotNull(appendedPart);
Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
getPartitionValues(partitionName));
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
"year=2018/month=march", partitionName));
}
代码示例来源:origin: apache/hive
Partition fetched =
client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
created.setWriteId(fetched.getWriteId());
Assert.assertEquals(created, fetched);
Assert.assertEquals("a2", created.getValues().get(0));
fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
created.setWriteId(fetched.getWriteId());
Assert.assertEquals(created, fetched);
代码示例来源:origin: apache/hive
Partition part = msc.getPartition("hive2038", "tmptbl", "b=2011");
Partition partAdded = partEvent.getPartitionIterator().next();
partAdded.setWriteId(part.getWriteId());
validateAddPartition(part, partAdded);
validateTableInAddPartition(tbl, partEvent.getTable());
代码示例来源:origin: apache/hive
@Test
public void testAlterPartition() throws HCatException {
Table t = new Table();
t.setDbName("testdb");
t.setTableName("testtable");
List<FieldSchema> pkeys = HCatSchemaUtils.getFieldSchemas(
HCatSchemaUtils.getHCatSchema("a:int,b:string").getFields());
t.setPartitionKeys(pkeys);
Partition p = createPtn(t, Arrays.asList("102", "lmn"));
NotificationEvent event = new NotificationEvent(getEventId(), getTime(),
HCatConstants.HCAT_ALTER_PARTITION_EVENT, msgFactory.buildAlterPartitionMessage(t,
p, p, p.getWriteId()).toString());
event.setDbName(t.getDbName());
event.setTableName(t.getTableName());
HCatNotificationEvent hev = new HCatNotificationEvent(event);
ReplicationTask rtask = ReplicationTask.create(client,hev);
assertEquals(hev.toString(), rtask.getEvent().toString());
verifyAlterPartitionReplicationTask(rtask, t, p);
}
内容来源于网络,如有侵权,请联系作者删除!