本文整理了Java中org.apache.hadoop.hive.metastore.api.Partition.setWriteId()
方法的一些代码示例,展示了Partition.setWriteId()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.setWriteId()
方法的具体详情如下:
包路径:org.apache.hadoop.hive.metastore.api.Partition
类名称:Partition
方法名:setWriteId
暂无
代码示例来源:origin: apache/hive
List<String> tmpPartVals = part_val_itr.next();
if (writeId > 0) {
tmpPart.setWriteId(writeId);
代码示例来源:origin: apache/hive
private void alterPartitionForTruncate(RawStore ms, String catName, String dbName, String tableName,
Table table, Partition partition, String validWriteIds, long writeId) throws Exception {
EnvironmentContext environmentContext = new EnvironmentContext();
updateStatsForTruncate(partition.getParameters(), environmentContext);
if (!transactionalListeners.isEmpty()) {
MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
EventType.ALTER_PARTITION,
new AlterPartitionEvent(partition, partition, table, true, true,
writeId, this));
}
if (!listeners.isEmpty()) {
MetaStoreListenerNotifier.notifyEvent(listeners,
EventType.ALTER_PARTITION,
new AlterPartitionEvent(partition, partition, table, true, true,
writeId, this));
}
if (writeId > 0) {
partition.setWriteId(writeId);
}
alterHandler.alterPartition(ms, wh, catName, dbName, tableName, null, partition,
environmentContext, this, validWriteIds);
}
代码示例来源:origin: apache/hive
/**
* Creates a partition.
*
* @param tbl
* table for which partition needs to be created
* @param partSpec
* partition keys and their values
* @return created partition object
* @throws HiveException
* if table doesn't exist or partition already exists
*/
@VisibleForTesting
public Partition createPartition(Table tbl, Map<String, String> partSpec) throws HiveException {
try {
org.apache.hadoop.hive.metastore.api.Partition part =
Partition.createMetaPartitionObject(tbl, partSpec, null);
AcidUtils.TableSnapshot tableSnapshot = AcidUtils.getTableSnapshot(conf, tbl);
part.setWriteId(tableSnapshot != null ? tableSnapshot.getWriteId() : 0);
return new Partition(tbl, getMSC().add_partition(part));
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw new HiveException(e);
}
}
代码示例来源:origin: apache/hive
part.setWriteId(request.getWriteId());
StatsSetupConst.clearColumnStatsState(part.getParameters());
StatsSetupConst.setBasicStatsState(part.getParameters(), StatsSetupConst.FALSE);
代码示例来源:origin: apache/hive
newPart.getTPartition().setWriteId(tableSnapshot.getWriteId());
validWriteIds = tableSnapshot.getValidWriteIdList();
代码示例来源:origin: apache/hive
Long writeId = MetastoreDirectSqlUtils.extractSqlLong(fields[14]);
if (writeId != null) {
part.setWriteId(writeId);
代码示例来源:origin: apache/hive
private Partition convertToPart(String catName, String dbName, String tblName, MPartition mpart)
throws MetaException {
if (mpart == null) {
return null;
}
Partition p = new Partition(convertList(mpart.getValues()), dbName, tblName,
mpart.getCreateTime(), mpart.getLastAccessTime(),
convertToStorageDescriptor(mpart.getSd(), false), convertMap(mpart.getParameters()));
p.setCatName(catName);
p.setWriteId(mpart.getWriteId());
return p;
}
代码示例来源:origin: apache/hive
private static void adjust(HiveMetaStoreClient client, Partition part,
String dbName, String tblName, boolean isThriftClient) throws TException {
Partition part_get = client.getPartition(dbName, tblName, part.getValues());
if (isThriftClient) {
part.setCreateTime(part_get.getCreateTime());
part.putToParameters(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME, Long.toString(part_get.getCreateTime()));
}
part.setWriteId(part_get.getWriteId());
}
代码示例来源:origin: apache/hive
private void alterPartitionInternal(Table table,
org.apache.hadoop.hive.ql.metadata.Partition modifiedPart) throws HiveException {
IMetaStoreClient msc = getMSC();
TxnCtx txnCtx = generateTxnCtxForAlter(table, msc, null);
boolean isOk = false;
try {
String validWriteIds = null;
if (txnCtx != null) {
validWriteIds = txnCtx.validWriteIds;
modifiedPart.getTPartition().setWriteId(txnCtx.writeId);
}
msc.alter_partition(table.getCatName(), table.getDbName(), table.getTableName(),
modifiedPart.getTPartition(), null, validWriteIds);
isOk = true;
} catch (TException ex) {
throw new HiveException(ex);
} finally {
closeTxnCtx(txnCtx, msc, isOk);
}
}
代码示例来源:origin: apache/hive
convertAddSpecToMetaPartition(tbl, addPartitionDesc.getPartition(i), conf);
if (tmpPart != null && tableSnapshot != null && tableSnapshot.getWriteId() > 0) {
tmpPart.setWriteId(tableSnapshot.getWriteId());
代码示例来源:origin: apache/hive
@Test
public void testAppendPartitionToExternalTable() throws Exception {
List<String> partitionValues = Lists.newArrayList("2017", "may");
Table table = externalTable;
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
Assert.assertNotNull(appendedPart);
Partition partition =
client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, partitionValues, "year=2017/month=may");
verifyPartitionNames(table, Lists.newArrayList("year=2017/month=may"));
}
代码示例来源:origin: apache/hive
@Test
public void testAppendPartition() throws Exception {
List<String> partitionValues = Lists.newArrayList("2017", "may");
Table table = tableWithPartitions;
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionValues);
Assert.assertNotNull(appendedPart);
Partition partition =
client.getPartition(table.getDbName(), table.getTableName(), partitionValues);
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, partitionValues, "year=2017/month=may");
verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
"year=2018/month=march", "year=2017/month=may"));
}
代码示例来源:origin: apache/hive
tableSnapshot = AcidUtils.getTableSnapshot(conf, newPart.getTable(), true);
if (tableSnapshot != null) {
newPart.getTPartition().setWriteId(tableSnapshot.getWriteId());
} else {
LOG.warn("Cannot get a table snapshot for " + tblName);
代码示例来源:origin: apache/hive
newTPart.getTPartition().setWriteId(tableSnapshot.getWriteId());
代码示例来源:origin: apache/hive
@Test
public void testAppendPartToExternalTable() throws Exception {
Table table = externalTable;
String partitionName = "year=2017/month=may";
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
Assert.assertNotNull(appendedPart);
Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
getPartitionValues(partitionName));
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
verifyPartitionNames(table, Lists.newArrayList(partitionName));
}
代码示例来源:origin: apache/hive
private Partition convertToPart(MPartition mpart) throws MetaException {
if (mpart == null) {
return null;
}
//its possible that MPartition is partially filled, do null checks to avoid NPE
MTable table = mpart.getTable();
String dbName =
table == null ? null : table.getDatabase() == null ? null : table.getDatabase().getName();
String tableName = table == null ? null : table.getTableName();
String catName = table == null ? null :
table.getDatabase() == null ? null : table.getDatabase().getCatalogName();
Partition p = new Partition(convertList(mpart.getValues()), dbName, tableName, mpart.getCreateTime(),
mpart.getLastAccessTime(), convertToStorageDescriptor(mpart.getSd()),
convertMap(mpart.getParameters()));
p.setCatName(catName);
p.setWriteId(mpart.getWriteId());
return p;
}
代码示例来源:origin: apache/hive
@Test
public void testAppendPart() throws Exception {
Table table = tableWithPartitions;
String partitionName = "year=2017/month=may";
Partition appendedPart =
client.appendPartition(table.getDbName(), table.getTableName(), partitionName);
Assert.assertNotNull(appendedPart);
Partition partition = client.getPartition(table.getDbName(), table.getTableName(),
getPartitionValues(partitionName));
appendedPart.setWriteId(partition.getWriteId());
Assert.assertEquals(partition, appendedPart);
verifyPartition(partition, table, getPartitionValues(partitionName), partitionName);
verifyPartitionNames(table, Lists.newArrayList("year=2017/month=march", "year=2017/month=april",
"year=2018/month=march", partitionName));
}
代码示例来源:origin: apache/hive
Partition fetched =
client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
created.setWriteId(fetched.getWriteId());
Assert.assertEquals(created, fetched);
Assert.assertEquals("a2", created.getValues().get(0));
fetched = client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
created.setWriteId(fetched.getWriteId());
Assert.assertEquals(created, fetched);
代码示例来源:origin: apache/hive
part1.setWriteId(badWriteId);
part2.setWriteId(badWriteId);
String currentWriteIds = msClient.getValidWriteIds(fqName).toString();
代码示例来源:origin: apache/hive
unsetWriteId();
} else {
setWriteId((Long)value);
内容来源于网络,如有侵权,请联系作者删除!