org.apache.hadoop.hive.ql.exec.Utilities.getTableDesc()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(7.1k)|赞(0)|评价(0)|浏览(91)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getTableDesc()方法的一些代码示例,展示了Utilities.getTableDesc()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getTableDesc()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getTableDesc

Utilities.getTableDesc介绍

暂无

代码示例

代码示例来源:origin: apache/hive

public static TableDesc getTableDesc(Table table) {
 return Utilities.getTableDesc(table);
}

代码示例来源:origin: apache/drill

private static TableDesc getTableDesc(Table table) {
 TableDesc tableDesc = Utilities.getTableDesc(table);
 internProperties(tableDesc.getProperties());
 return tableDesc;
}

代码示例来源:origin: apache/drill

private static Task<?> loadTable(URI fromURI, Table table, boolean replace, Path tgtPath,
             ReplicationSpec replicationSpec, EximUtil.SemanticAnalyzerWrapperContext x) {
 Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
 Path tmpPath = x.getCtx().getExternalTmpPath(tgtPath);
 Task<?> copyTask = ReplCopyTask.getLoadCopyTask(replicationSpec, dataPath, tmpPath, x.getConf());
 LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath,
   Utilities.getTableDesc(table), new TreeMap<String, String>(),
   replace);
 Task<?> loadTableTask = TaskFactory.get(new MoveWork(x.getInputs(),
   x.getOutputs(), loadTableWork, null, false), x.getConf());
 copyTask.addDependentTask(loadTableTask);
 x.getTasks().add(copyTask);
 return loadTableTask;
}

代码示例来源:origin: apache/drill

try {
 TableDesc desc = Utilities.getTableDesc(indexTbl);
    new PartitionDesc(Utilities.getTableDesc(baseTbl), null),
    baseTbl.getTableName(), indexTbl.getDbName());
  indexBuilderTasks.add(indexBuilder);

代码示例来源:origin: apache/hive

private FetchWork convertToWork() throws HiveException {
 inputs.clear();
 Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
 TableDesc tableDesc = Utilities.getTableDesc(table);
 if (!table.isPartitioned()) {
  inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
  FetchWork work = new FetchWork(table.getPath(), tableDesc);
  PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
  work.setSplitSample(splitSample);
  return work;
 }
 List<Path> listP = new ArrayList<Path>();
 List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
 for (Partition partition : partsList.getNotDeniedPartns()) {
  inputs.add(new ReadEntity(partition, parent, parent == null));
  listP.add(partition.getDataLocation());
  partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
 }
 Table sourceTable = partsList.getSourceTable();
 inputs.add(new ReadEntity(sourceTable, parent, parent == null));
 TableDesc table = Utilities.getTableDesc(sourceTable);
 FetchWork work = new FetchWork(listP, partP, table);
 if (!work.getPartDesc().isEmpty()) {
  PartitionDesc part0 = work.getPartDesc().get(0);
  PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
  work.setSplitSample(splitSample);
 }
 return work;
}

代码示例来源:origin: apache/hive

if (handler instanceof InputEstimator) {
 InputEstimator estimator = (InputEstimator) handler;
 TableDesc tableDesc = Utilities.getTableDesc(table);
 PlanUtils.configureInputJobPropertiesForStorageHandler(tableDesc);
 Utilities.copyTableJobPropertiesToConf(tableDesc, jobConf);

代码示例来源:origin: apache/drill

private FetchWork convertToWork() throws HiveException {
 inputs.clear();
 Utilities.addSchemaEvolutionToTableScanOperator(table, scanOp);
 TableDesc tableDesc = Utilities.getTableDesc(table);
 if (!table.isPartitioned()) {
  inputs.add(new ReadEntity(table, parent, !table.isView() && parent == null));
  FetchWork work = new FetchWork(table.getPath(), tableDesc);
  PlanUtils.configureInputJobPropertiesForStorageHandler(work.getTblDesc());
  work.setSplitSample(splitSample);
  return work;
 }
 List<Path> listP = new ArrayList<Path>();
 List<PartitionDesc> partP = new ArrayList<PartitionDesc>();
 for (Partition partition : partsList.getNotDeniedPartns()) {
  inputs.add(new ReadEntity(partition, parent, parent == null));
  listP.add(partition.getDataLocation());
  partP.add(Utilities.getPartitionDescFromTableDesc(tableDesc, partition, true));
 }
 Table sourceTable = partsList.getSourceTable();
 inputs.add(new ReadEntity(sourceTable, parent, parent == null));
 TableDesc table = Utilities.getTableDesc(sourceTable);
 FetchWork work = new FetchWork(listP, partP, table);
 if (!work.getPartDesc().isEmpty()) {
  PartitionDesc part0 = work.getPartDesc().get(0);
  PlanUtils.configureInputJobPropertiesForStorageHandler(part0.getTableDesc());
  work.setSplitSample(splitSample);
 }
 return work;
}

代码示例来源:origin: apache/hive

tmpPath, Utilities.getTableDesc(table), new TreeMap<>(),
     loadFileType, ReplUtils.REPL_BOOTSTRAP_MIGRATION_BASE_WRITE_ID
 );
    tmpPath, Utilities.getTableDesc(table), new TreeMap<>(),
    loadFileType, 0L
);

代码示例来源:origin: apache/hive

tmpPath, Utilities.getTableDesc(table), partSpec.getPartSpec(),
     loadFileType, ReplUtils.REPL_BOOTSTRAP_MIGRATION_BASE_WRITE_ID
 );
    tmpPath, Utilities.getTableDesc(table), partSpec.getPartSpec(),
    loadFileType, 0L
);

代码示例来源:origin: apache/hive

public static void addMapWork(MapredWork mr, Table tbl, String alias, Operator<?> work) {
 mr.getMapWork().addMapWork(tbl.getDataLocation(), alias, work, new PartitionDesc(
   Utilities.getTableDesc(tbl), null));
}

代码示例来源:origin: apache/drill

if (handler instanceof InputEstimator) {
 InputEstimator estimator = (InputEstimator) handler;
 TableDesc tableDesc = Utilities.getTableDesc(table);
 PlanUtils.configureInputJobPropertiesForStorageHandler(tableDesc);
 Utilities.copyTableJobPropertiesToConf(tableDesc, jobConf);

代码示例来源:origin: apache/hive

Utilities.getTableDesc(tbl),
  jobConf);
} catch (Exception e) {

代码示例来源:origin: apache/hive

AcidUtils.Operation acidOp = Operation.NOT_ACID;
if (AcidUtils.isFullAcidTable(dest_tab)) {
 acidOp = getAcidType(Utilities.getTableDesc(dest_tab).getOutputFileFormatClass(), dest);

代码示例来源:origin: apache/drill

Utilities.setColumnTypeList(jobConf, tableScanOp);
Utilities.copyTableJobPropertiesToConf(
 Utilities.getTableDesc(tbl),
 jobConf);
Deserializer deserializer = tbl.getDeserializer();

代码示例来源:origin: apache/hive

List<PartitionDesc> partDesc = new ArrayList<PartitionDesc>();
 List<Path> partLocs = new ArrayList<Path>();
 TableDesc tableDesc = Utilities.getTableDesc(tbl);
 for (Partition part : partitions) {
  partLocs.add(part.getDataLocation());
 work.setLimit(100);
} else {
 work = new FetchWork(tbl.getDataLocation(), Utilities.getTableDesc(tbl));

代码示例来源:origin: apache/hive

} else {
 LoadTableDesc loadTableWork = new LoadTableDesc(
     loadPath, Utilities.getTableDesc(table), new TreeMap<>(), lft, writeId);
 if (replicationSpec.isMigratingToTxnTable()) {
  loadTableWork.setInsertOverwrite(replace);

代码示例来源:origin: apache/hive

return;
mergeDesc.setTableDesc(Utilities.getTableDesc(tblObj));
ddlWork.setNeedLock(true);
Task<?> mergeTask = TaskFactory.get(ddlWork);
TableDesc tblDesc = Utilities.getTableDesc(tblObj);
Path queryTmpdir = ctx.getExternalTmpPath(newTblPartLoc);
mergeDesc.setOutputDir(queryTmpdir);

代码示例来源:origin: apache/drill

x.getOutputs(), addPartitionDesc), x.getConf());
LoadTableDesc loadTableWork = new LoadTableDesc(tmpPath,
  Utilities.getTableDesc(table),
  partSpec.getPartSpec(), true);
loadTableWork.setInheritTableSpecs(false);

代码示例来源:origin: apache/hive

Utilities.getTableDesc(tab), desc.getPartSpec(),

代码示例来源:origin: apache/hive

moveWork.setNeedCleanTarget(replicationSpec.isReplace());
} else {
 LoadTableDesc loadTableWork = new LoadTableDesc(moveTaskSrc, Utilities.getTableDesc(table),
     partSpec.getPartSpec(),
     loadFileType,

相关文章

微信公众号

最新文章

更多

Utilities类方法