org.apache.hadoop.hive.ql.metadata.Partition.setLocation()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(10.4k)|赞(0)|评价(0)|浏览(117)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Partition.setLocation()方法的一些代码示例,展示了Partition.setLocation()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.setLocation()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Partition
类名称:Partition
方法名:setLocation

Partition.setLocation介绍

暂无

代码示例

代码示例来源:origin: apache/hive

void updatePartitionLocation(String dbName, Table table, String partName,
  Partition part, Path newLocation) throws HiveException, TException {
 String msg = String.format("ALTER TABLE %s PARTITION (%s) SET LOCATION '%s'",
   getQualifiedName(table), partName, newLocation.toString());
 LOG.info(msg);
 org.apache.hadoop.hive.ql.metadata.Partition modifiedPart =
   new org.apache.hadoop.hive.ql.metadata.Partition(
     new org.apache.hadoop.hive.ql.metadata.Table(table),
     part);
 modifiedPart.setLocation(newLocation.toString());
 alterPartitionInternal(table, modifiedPart);
}

代码示例来源:origin: apache/hive

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition object to modify
 * @param harPath - new location of partition (har schema URI)
 */
private void setArchived(Partition p, Path harPath, int level) {
 assert(ArchiveUtils.isArchived(p) == false);
 setIsArchived(p, true, level);
 setOriginalLocation(p, p.getLocation());
 p.setLocation(harPath.toString());
}

代码示例来源:origin: apache/drill

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition object to modify
 * @param harPath - new location of partition (har schema URI)
 */
private void setArchived(Partition p, Path harPath, int level) {
 assert(ArchiveUtils.isArchived(p) == false);
 setIsArchived(p, true, level);
 setOriginalLocation(p, p.getLocation());
 p.setLocation(harPath.toString());
}

代码示例来源:origin: apache/hive

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as not archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition to modify
 */
private void setUnArchived(Partition p) {
 assert(ArchiveUtils.isArchived(p) == true);
 String parentDir = getOriginalLocation(p);
 setIsArchived(p, false, 0);
 setOriginalLocation(p, null);
 assert(parentDir != null);
 p.setLocation(parentDir);
}

代码示例来源:origin: apache/drill

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as not archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition to modify
 */
private void setUnArchived(Partition p) {
 assert(ArchiveUtils.isArchived(p) == true);
 String parentDir = getOriginalLocation(p);
 setIsArchived(p, false, 0);
 setOriginalLocation(p, null);
 assert(parentDir != null);
 p.setLocation(parentDir);
}

代码示例来源:origin: apache/incubator-gobblin

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: apache/hive

if (location != null) {
 location = Utilities.getQualifiedPath(conf, new Path(location));
 tmpPart.setLocation(location);

代码示例来源:origin: apache/hive

if (location != null) {
 location = Utilities.getQualifiedPath(conf, new Path(location));
 newPart.setLocation(location);

代码示例来源:origin: apache/hive

part.setLocation(newLocation);
 authorize(part, Privilege.ALTER_DATA);
} else {

代码示例来源:origin: apache/lens

/**
 * Adds the partition.
 *
 * @param eventName the event name
 * @param key       the key
 * @param finalPath the final path
 * @param className the class name
 * @return true, if successful
 */
private boolean addPartition(String eventName, String key, Path finalPath, String className) {
 try {
  Table t = getTable(eventName, className);
  HashMap<String, String> partSpec = new HashMap<String, String>();
  partSpec.put("dt", key);
  Partition p = client.createPartition(t, partSpec);
  p.setLocation(finalPath.toString());
  client.alterPartition(database, eventName, p, null);
  return true;
 } catch (Exception e) {
  LOG.warn("Unable to add the partition ", e);
  return false;
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition object to modify
 * @param harPath - new location of partition (har schema URI)
 */
private void setArchived(Partition p, Path harPath, int level) {
 assert(ArchiveUtils.isArchived(p) == false);
 setIsArchived(p, true, level);
 setOriginalLocation(p, p.getLocation());
 p.setLocation(harPath.toString());
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

p.setLocation(harUri.toString());

代码示例来源:origin: com.facebook.presto.hive/hive-apache

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as not archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition to modify
 */
private void setUnArchived(Partition p) {
 assert(ArchiveUtils.isArchived(p) == true);
 String parentDir = getOriginalLocation(p);
 setIsArchived(p, false, 0);
 setOriginalLocation(p, null);
 assert(parentDir != null);
 p.setLocation(parentDir);
}

代码示例来源:origin: org.apache.hadoop.hive/hive-exec

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as not archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition to modify
 */
private void setUnArchived(Partition p) {
 assert(isArchived(p) == true);
 String parentDir = getOriginalLocation(p);
 setIsArchived(p, false);
 setOriginalLocation(p, null);
 assert(parentDir != null);
 p.setLocation(parentDir);
}

代码示例来源:origin: org.apache.lens/lens-cube

latestPart.getTPartition().getSd().getSerdeInfo().getParameters().putAll(
 partition.getTPartition().getSd().getSerdeInfo().getParameters());
latestPart.setLocation(partition.getLocation());
latestPart.setInputFormatClass(partition.getInputFormatClass());
latestPart.setOutputFormatClass(partition.getOutputFormatClass().asSubclass(HiveOutputFormat.class));

代码示例来源:origin: apache/lens

latestPart.getTPartition().getSd().getSerdeInfo().getParameters().putAll(
 partition.getTPartition().getSd().getSerdeInfo().getParameters());
latestPart.setLocation(partition.getLocation());
latestPart.setInputFormatClass(partition.getInputFormatClass());
latestPart.setOutputFormatClass(partition.getOutputFormatClass().asSubclass(HiveOutputFormat.class));

代码示例来源:origin: org.apache.lens/lens-cube

public static void updatePartitionFromXPartition(Partition partition, XPartition xp) throws ClassNotFoundException {
 partition.getParameters().putAll(mapFromXProperties(xp.getPartitionParameters()));
 partition.getTPartition().getSd().getSerdeInfo().setParameters(mapFromXProperties(xp.getSerdeParameters()));
 partition.setLocation(xp.getLocation());
 if (xp.getInputFormat() != null) {
  partition.setInputFormatClass(Class.forName(xp.getInputFormat()).asSubclass(InputFormat.class));
 }
 if (xp.getOutputFormat() != null) {
  Class<? extends HiveOutputFormat> outputFormatClass =
   Class.forName(xp.getOutputFormat()).asSubclass(HiveOutputFormat.class);
  partition.setOutputFormatClass(outputFormatClass);
 }
 partition.getParameters().put(MetastoreConstants.PARTITION_UPDATE_PERIOD, xp.getUpdatePeriod().name());
 partition.getTPartition().getSd().getSerdeInfo().setSerializationLib(xp.getSerdeClassname());
}

代码示例来源:origin: apache/lens

public static void updatePartitionFromXPartition(Partition partition, XPartition xp) throws ClassNotFoundException {
 partition.getParameters().putAll(mapFromXProperties(xp.getPartitionParameters()));
 partition.getTPartition().getSd().getSerdeInfo().setParameters(mapFromXProperties(xp.getSerdeParameters()));
 partition.setLocation(xp.getLocation());
 if (xp.getInputFormat() != null) {
  partition.setInputFormatClass(Class.forName(xp.getInputFormat()).asSubclass(InputFormat.class));
 }
 if (xp.getOutputFormat() != null) {
  Class<? extends HiveOutputFormat> outputFormatClass =
   Class.forName(xp.getOutputFormat()).asSubclass(HiveOutputFormat.class);
  partition.setOutputFormatClass(outputFormatClass);
 }
 partition.getParameters().put(MetastoreConstants.PARTITION_UPDATE_PERIOD, xp.getUpdatePeriod().name());
 partition.getTPartition().getSd().getSerdeInfo().setSerializationLib(xp.getSerdeClassname());
}

代码示例来源:origin: com.linkedin.gobblin/gobblin-data-management

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

代码示例来源:origin: org.apache.gobblin/gobblin-data-management

private Partition getTargetPartition(Partition originPartition, Path targetLocation) throws IOException {
 try {
  Partition targetPartition = new Partition(this.hiveCopyEntityHelper.getTargetTable(), originPartition.getTPartition().deepCopy());
  targetPartition.getTable().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().setDbName(this.hiveCopyEntityHelper.getTargetDatabase());
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTERER, HiveCopyEntityHelper.GOBBLIN_DISTCP);
  targetPartition.getTPartition().putToParameters(HiveDataset.REGISTRATION_GENERATION_TIME_MILLIS,
    Long.toString(this.hiveCopyEntityHelper.getStartTime()));
  targetPartition.setLocation(targetLocation.toString());
  targetPartition.getTPartition().unsetCreateTime();
  return targetPartition;
 } catch (HiveException he) {
  throw new IOException(he);
 }
}

相关文章

微信公众号

最新文章

更多