org.apache.hadoop.hive.ql.metadata.Partition.getLocation()方法的使用及代码示例

x33g5p2x  于2022-01-26 转载在 其他  
字(12.0k)|赞(0)|评价(0)|浏览(95)

本文整理了Java中org.apache.hadoop.hive.ql.metadata.Partition.getLocation()方法的一些代码示例,展示了Partition.getLocation()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Partition.getLocation()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.metadata.Partition
类名称:Partition
方法名:getLocation

Partition.getLocation介绍

暂无

代码示例

代码示例来源:origin: apache/hive

@Override
public String getLocation() {
 return partition.getLocation();
}

代码示例来源:origin: apache/hive

private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
  throws HiveException {
 // output file system information
 Path tblPath = tbl.getPath();
 List<Path> locations = new ArrayList<Path>();
 if (tbl.isPartitioned()) {
  if (par == null) {
   for (Partition curPart : db.getPartitions(tbl)) {
    if (curPart.getLocation() != null) {
     locations.add(new Path(curPart.getLocation()));
    }
   }
  } else {
   if (par.getLocation() != null) {
    locations.add(new Path(par.getLocation()));
   }
  }
 } else {
  if (tblPath != null) {
   locations.add(tblPath);
  }
 }
 return locations;
}

代码示例来源:origin: apache/drill

private List<Path> makeTableStatusLocations(Table tbl, Hive db, Partition par)
  throws HiveException {
 // output file system information
 Path tblPath = tbl.getPath();
 List<Path> locations = new ArrayList<Path>();
 if (tbl.isPartitioned()) {
  if (par == null) {
   for (Partition curPart : db.getPartitions(tbl)) {
    if (curPart.getLocation() != null) {
     locations.add(new Path(curPart.getLocation()));
    }
   }
  } else {
   if (par.getLocation() != null) {
    locations.add(new Path(par.getLocation()));
   }
  }
 } else {
  if (tblPath != null) {
   locations.add(tblPath);
  }
 }
 return locations;
}

代码示例来源:origin: apache/hive

/**
 * Decide whether should select the default directory.
 *
 * @param part
 * @param selectedPaths
 * @param nonSkewedValueMatchResult
 */
private static void decideDefaultDirSelection(Partition part, List<Path> selectedPaths,
  List<Boolean> nonSkewedValueMatchResult) {
 boolean skipDefDir = true;
 for (Boolean v : nonSkewedValueMatchResult) {
  if ((v == null) || v) {
   skipDefDir = false; // we skip default directory only if all value is false
   break;
  }
 }
 if (!skipDefDir) {
  StringBuilder builder = new StringBuilder();
  builder.append(part.getLocation());
  builder.append(Path.SEPARATOR);
  builder
    .append((FileUtils.makeDefaultListBucketingDirName(
      part.getSkewedColNames(),
      ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME)));
  selectedPaths.add(new Path(builder.toString()));
 }
}

代码示例来源:origin: apache/drill

/**
 * Decide whether should select the default directory.
 *
 * @param part
 * @param selectedPaths
 * @param nonSkewedValueMatchResult
 */
private static void decideDefaultDirSelection(Partition part, List<Path> selectedPaths,
  List<Boolean> nonSkewedValueMatchResult) {
 boolean skipDefDir = true;
 for (Boolean v : nonSkewedValueMatchResult) {
  if ((v == null) || v) {
   skipDefDir = false; // we skip default directory only if all value is false
   break;
  }
 }
 if (!skipDefDir) {
  StringBuilder builder = new StringBuilder();
  builder.append(part.getLocation());
  builder.append(Path.SEPARATOR);
  builder
    .append((FileUtils.makeDefaultListBucketingDirName(
      part.getSkewedColNames(),
      ListBucketingPrunerUtils.HIVE_LIST_BUCKETING_DEFAULT_DIR_NAME)));
  selectedPaths.add(new Path(builder.toString()));
 }
}

代码示例来源:origin: apache/hive

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition object to modify
 * @param harPath - new location of partition (har schema URI)
 */
private void setArchived(Partition p, Path harPath, int level) {
 assert(ArchiveUtils.isArchived(p) == false);
 setIsArchived(p, true, level);
 setOriginalLocation(p, p.getLocation());
 p.setLocation(harPath.toString());
}

代码示例来源:origin: apache/drill

/**
 * Sets the appropriate attributes in the supplied Partition object to mark
 * it as archived. Note that the metastore is not touched - a separate
 * call to alter_partition is needed.
 *
 * @param p - the partition object to modify
 * @param harPath - new location of partition (har schema URI)
 */
private void setArchived(Partition p, Path harPath, int level) {
 assert(ArchiveUtils.isArchived(p) == false);
 setIsArchived(p, true, level);
 setOriginalLocation(p, p.getLocation());
 p.setLocation(harPath.toString());
}

代码示例来源:origin: apache/hive

/**
 * Checks in partition is in custom (not-standard) location.
 * @param tbl - table in which partition is
 * @param p - partition
 * @return true if partition location is custom, false if it is standard
 */
boolean partitionInCustomLocation(Table tbl, Partition p)
  throws HiveException {
 String subdir = null;
 try {
  subdir = Warehouse.makePartName(tbl.getPartCols(), p.getValues());
 } catch (MetaException e) {
  throw new HiveException("Unable to get partition's directory", e);
 }
 Path tableDir = tbl.getDataLocation();
 if(tableDir == null) {
  throw new HiveException("Table has no location set");
 }
 String standardLocation = (new Path(tableDir, subdir)).toString();
 if(ArchiveUtils.isArchived(p)) {
  return !getOriginalLocation(p).equals(standardLocation);
 } else {
  return !p.getLocation().equals(standardLocation);
 }
}

代码示例来源:origin: apache/drill

/**
 * Checks in partition is in custom (not-standard) location.
 * @param tbl - table in which partition is
 * @param p - partition
 * @return true if partition location is custom, false if it is standard
 */
boolean partitionInCustomLocation(Table tbl, Partition p)
  throws HiveException {
 String subdir = null;
 try {
  subdir = Warehouse.makePartName(tbl.getPartCols(), p.getValues());
 } catch (MetaException e) {
  throw new HiveException("Unable to get partition's directory", e);
 }
 Path tableDir = tbl.getDataLocation();
 if(tableDir == null) {
  throw new HiveException("Table has no location set");
 }
 String standardLocation = (new Path(tableDir, subdir)).toString();
 if(ArchiveUtils.isArchived(p)) {
  return !getOriginalLocation(p).equals(standardLocation);
 } else {
  return !p.getLocation().equals(standardLocation);
 }
}

代码示例来源:origin: apache/incubator-gobblin

public HiveWorkUnit(HiveDataset hiveDataset, Partition partition) {
 this(hiveDataset);
 setPartitionName(partition.getName());
 setPartitionLocation(partition.getLocation());
 setPartitionKeys(partition.getTable().getPartitionKeys());
}

代码示例来源:origin: apache/hive

private static void getPartitionMetaDataInformation(StringBuilder tableInfo, Partition part) {
 formatOutput("Partition Value:", part.getValues().toString(), tableInfo);
 formatOutput("Database:", part.getTPartition().getDbName(), tableInfo);
 formatOutput("Table:", part.getTable().getTableName(), tableInfo);
 formatOutput("CreateTime:", formatDate(part.getTPartition().getCreateTime()), tableInfo);
 formatOutput("LastAccessTime:", formatDate(part.getTPartition().getLastAccessTime()),
   tableInfo);
 formatOutput("Location:", part.getLocation(), tableInfo);
 if (part.getTPartition().getParameters().size() > 0) {
  tableInfo.append("Partition Parameters:").append(LINE_DELIM);
  displayAllParameters(part.getTPartition().getParameters(), tableInfo);
 }
}

代码示例来源:origin: apache/hive

tmpPart.getParameters().remove(hive_metastoreConstants.DDL_TIME);
String location = tmpPart.getLocation();
if (location != null) {
 location = Utilities.getQualifiedPath(conf, new Path(location));

代码示例来源:origin: apache/drill

private static void getPartitionMetaDataInformation(StringBuilder tableInfo, Partition part) {
 formatOutput("Partition Value:", part.getValues().toString(), tableInfo);
 formatOutput("Database:", part.getTPartition().getDbName(), tableInfo);
 formatOutput("Table:", part.getTable().getTableName(), tableInfo);
 formatOutput("CreateTime:", formatDate(part.getTPartition().getCreateTime()), tableInfo);
 formatOutput("LastAccessTime:", formatDate(part.getTPartition().getLastAccessTime()),
   tableInfo);
 formatOutput("Location:", part.getLocation(), tableInfo);
 if (part.getTPartition().getParameters().size() > 0) {
  tableInfo.append("Partition Parameters:").append(LINE_DELIM);
  displayAllParameters(part.getTPartition().getParameters(), tableInfo);
 }
}

代码示例来源:origin: apache/drill

private static Task<? extends Serializable> alterSinglePartition(
  URI fromURI, FileSystem fs, ImportTableDesc tblDesc,
  Table table, Warehouse wh, AddPartitionDesc addPartitionDesc,
  ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn,
  EximUtil.SemanticAnalyzerWrapperContext x) {
 addPartitionDesc.setReplaceMode(true);
 if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())){
  addPartitionDesc.setReplicationSpec(replicationSpec);
 }
 addPartitionDesc.getPartition(0).setLocation(ptn.getLocation()); // use existing location
 return TaskFactory.get(new DDLWork(
   x.getInputs(),
   x.getOutputs(),
   addPartitionDesc
 ), x.getConf());
}

代码示例来源:origin: apache/drill

if (part != null) {
 if (par != null) {
  if (par.getLocation() != null) {
   tblLoc = par.getDataLocation().toString();

代码示例来源:origin: apache/hive

private static Task<? extends Serializable> alterSinglePartition(
  ImportTableDesc tblDesc, Table table, Warehouse wh, AddPartitionDesc addPartitionDesc,
  ReplicationSpec replicationSpec, org.apache.hadoop.hive.ql.metadata.Partition ptn,
  EximUtil.SemanticAnalyzerWrapperContext x) throws MetaException, IOException, HiveException {
 addPartitionDesc.setReplaceMode(true);
 if ((replicationSpec != null) && (replicationSpec.isInReplicationScope())){
  addPartitionDesc.setReplicationSpec(replicationSpec);
 }
 AddPartitionDesc.OnePartitionDesc partSpec = addPartitionDesc.getPartition(0);
 if (ptn == null) {
  fixLocationInPartSpec(tblDesc, table, wh, replicationSpec, partSpec, x);
 } else if (!externalTablePartition(tblDesc, replicationSpec)) {
  partSpec.setLocation(ptn.getLocation()); // use existing location
 }
 return TaskFactory.get(new DDLWork(x.getInputs(), x.getOutputs(), addPartitionDesc), x.getConf());
}

代码示例来源:origin: apache/hive

if (part != null) {
 if (par != null) {
  if (par.getLocation() != null) {
   tblLoc = par.getDataLocation().toString();

代码示例来源:origin: apache/hive

String location = newPart.getLocation();
if (location != null) {
 location = Utilities.getQualifiedPath(conf, new Path(location));

代码示例来源:origin: apache/hive

private void authorize(Table table, Partition part, Privilege[] readRequiredPriv,
  Privilege[] writeRequiredPriv)
  throws HiveException, AuthorizationException {
 // extract drop privileges
 DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv,
   writeRequiredPriv);
 readRequiredPriv = privExtractor.getReadReqPriv();
 writeRequiredPriv = privExtractor.getWriteReqPriv();
 // authorize drops if there was a drop privilege requirement
 if(privExtractor.hasDropPrivilege()) {
  checkDeletePermission(part.getDataLocation(), getConf(), authenticator.getUserName());
 }
 // Partition path can be null in the case of a new create partition - in this case,
 // we try to default to checking the permissions of the parent table.
 // Partition itself can also be null, in cases where this gets called as a generic
 // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887)
 if ((part == null) || (part.getLocation() == null)) {
  if (requireCreatePrivilege(readRequiredPriv) || requireCreatePrivilege(writeRequiredPriv)) {
   // this should be the case only if this is a create partition.
   // The privilege needed on the table should be ALTER_DATA, and not CREATE
   authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA});
  } else {
   authorize(table, readRequiredPriv, writeRequiredPriv);
  }
 } else {
  authorize(part.getDataLocation(), readRequiredPriv, writeRequiredPriv);
 }
}

代码示例来源:origin: apache/drill

private void authorize(Table table, Partition part, Privilege[] readRequiredPriv,
  Privilege[] writeRequiredPriv)
  throws HiveException, AuthorizationException {
 // extract drop privileges
 DropPrivilegeExtractor privExtractor = new DropPrivilegeExtractor(readRequiredPriv,
   writeRequiredPriv);
 readRequiredPriv = privExtractor.getReadReqPriv();
 writeRequiredPriv = privExtractor.getWriteReqPriv();
 // authorize drops if there was a drop privilege requirement
 if(privExtractor.hasDropPrivilege()) {
  checkDeletePermission(part.getDataLocation(), getConf(), authenticator.getUserName());
 }
 // Partition path can be null in the case of a new create partition - in this case,
 // we try to default to checking the permissions of the parent table.
 // Partition itself can also be null, in cases where this gets called as a generic
 // catch-all call in cases like those with CTAS onto an unpartitioned table (see HIVE-1887)
 if ((part == null) || (part.getLocation() == null)) {
  if (requireCreatePrivilege(readRequiredPriv) || requireCreatePrivilege(writeRequiredPriv)) {
   // this should be the case only if this is a create partition.
   // The privilege needed on the table should be ALTER_DATA, and not CREATE
   authorize(table, new Privilege[]{}, new Privilege[]{Privilege.ALTER_DATA});
  } else {
   authorize(table, readRequiredPriv, writeRequiredPriv);
  }
 } else {
  authorize(part.getDataLocation(), readRequiredPriv, writeRequiredPriv);
 }
}

相关文章

微信公众号

最新文章

更多