org.apache.hadoop.hive.ql.exec.Utilities.getMapWork()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(8.4k)|赞(0)|评价(0)|浏览(143)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getMapWork()方法的一些代码示例,展示了Utilities.getMapWork()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getMapWork()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getMapWork

Utilities.getMapWork介绍

暂无

代码示例

代码示例来源:origin: apache/hive

@Override
 public Object call() {
  return Utilities.getMapWork(jconf);
 }
});

代码示例来源:origin: apache/hive

@Override
 public Object call() {
  return Utilities.getMapWork(jconf);
 }
});

代码示例来源:origin: apache/drill

@Override
 public Object call() {
  return Utilities.getMapWork(jconf);
 }
});

代码示例来源:origin: apache/drill

@Override
 public Object call() {
  return Utilities.getMapWork(jconf);
 }
});

代码示例来源:origin: apache/hive

private static MapWork populateMapWork(JobConf jobConf, String inputName) {
 MapWork work = null;
 if (inputName != null) {
  work = (MapWork) Utilities.getMergeWork(jobConf, inputName);
  // work can still be null if there is no merge work for this input
 }
 if (work == null) {
  work = Utilities.getMapWork(jobConf);
 }
 return work;
}

代码示例来源:origin: apache/hive

public static void getPartitionValues(VectorizedRowBatchCtx vrbCtx, Configuration hiveConf,
  FileSplit split, Object[] partitionValues) throws IOException {
 // TODO: this is invalid for SMB. Keep this for now for legacy reasons. See the other overload.
 MapWork mapWork = Utilities.getMapWork(hiveConf);
 getPartitionValues(vrbCtx, mapWork, split, partitionValues);
}

代码示例来源:origin: apache/drill

public static void getPartitionValues(VectorizedRowBatchCtx vrbCtx, Configuration hiveConf,
  FileSplit split, Object[] partitionValues) throws IOException {
 // TODO: this is invalid for SMB. Keep this for now for legacy reasons. See the other overload.
 MapWork mapWork = Utilities.getMapWork(hiveConf);
 getPartitionValues(vrbCtx, mapWork, split, partitionValues);
}

代码示例来源:origin: apache/drill

private static MapWork populateMapWork(JobConf jobConf, String inputName) {
 MapWork work = null;
 if (inputName != null) {
  work = (MapWork) Utilities.getMergeWork(jobConf, inputName);
  // work can still be null if there is no merge work for this input
 }
 if (work == null) {
  work = Utilities.getMapWork(jobConf);
 }
 return work;
}

代码示例来源:origin: apache/hive

protected void init(JobConf job) {
 if (mrwork == null || pathToPartitionInfo == null) {
  if (HiveConf.getVar(job, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
   mrwork = (MapWork) Utilities.getMergeWork(job);
   if (mrwork == null) {
    mrwork = Utilities.getMapWork(job);
   }
  } else {
   mrwork = Utilities.getMapWork(job);
  }
  pathToPartitionInfo = mrwork.getPathToPartitionInfo();
 }
}

代码示例来源:origin: apache/drill

/**
 * Sets the mapWork variable based on the current JobConf in order to get all partitions.
 *
 * @param job
 */
private void updateMrWork(final JobConf job) {
 final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN);
 if (mapWork == null && plan != null && plan.length() > 0) {
  mapWork = Utilities.getMapWork(job);
  pathToPartitionInfo.clear();
  for (final Map.Entry<Path, PartitionDesc> entry : mapWork.getPathToPartitionInfo().entrySet()) {
   // key contains scheme (such as pfile://) and we want only the path portion fix in HIVE-6366
   pathToPartitionInfo.put(Path.getPathWithoutSchemeAndAuthority(entry.getKey()), entry.getValue());
  }
 }
}

代码示例来源:origin: apache/hive

/**
 * Sets the mapWork variable based on the current JobConf in order to get all partitions.
 *
 * @param job
 */
private void updateMrWork(final JobConf job) {
 final String plan = HiveConf.getVar(job, HiveConf.ConfVars.PLAN);
 if (mapWork == null && plan != null && plan.length() > 0) {
  mapWork = Utilities.getMapWork(job);
  pathToPartitionInfo.clear();
  for (final Map.Entry<Path, PartitionDesc> entry : mapWork.getPathToPartitionInfo().entrySet()) {
   // key contains scheme (such as pfile://) and we want only the path portion fix in HIVE-6366
   pathToPartitionInfo.put(StringInternUtils.internUriStringsInPath(
       Path.getPathWithoutSchemeAndAuthority(entry.getKey())), entry.getValue());
  }
 }
}

代码示例来源:origin: apache/hive

private static DruidSerDe createAndInitializeSerde(Configuration jobConf) {
  DruidSerDe serDe = new DruidSerDe();
  MapWork mapWork = Preconditions.checkNotNull(Utilities.getMapWork(jobConf), "Map work is null");
  Properties
    properties =
    mapWork.getPartitionDescs()
      .stream()
      .map(partitionDesc -> partitionDesc.getTableDesc().getProperties())
      .findAny()
      .orElseThrow(() -> new RuntimeException("Can not find table property at the map work"));
  try {
   serDe.initialize(jobConf, properties, null);
  } catch (SerDeException e) {
   throw new RuntimeException("Can not initialized the serde", e);
  }
  return serDe;
 }
}

代码示例来源:origin: apache/hive

@SuppressWarnings("Duplicates") private static KafkaSerDe createAndInitializeSerde(Configuration jobConf) {
  KafkaSerDe serDe = new KafkaSerDe();
  MapWork mapWork = Preconditions.checkNotNull(Utilities.getMapWork(jobConf), "Map work is null");
  Properties
    properties =
    mapWork.getPartitionDescs()
      .stream()
      .map(partitionDesc -> partitionDesc.getTableDesc().getProperties())
      .findAny()
      .orElseThrow(() -> new RuntimeException("Can not find table property at the map work"));
  try {
   serDe.initialize(jobConf, properties, null);
  } catch (SerDeException e) {
   throw new RuntimeException("Can not initialized the serde", e);
  }
  return serDe;
 }
}

代码示例来源:origin: apache/hive

@Override
public void configure(JobConf job) {
 jc = job;
 work = (ColumnTruncateWork) Utilities.getMapWork(job);
 Path specPath = work.getOutputDir();
 Path tmpPath = Utilities.toTempPath(specPath);
 Path taskTmpPath = Utilities.toTaskTempPath(specPath);
 updatePaths(tmpPath, taskTmpPath);
 try {
  fs = specPath.getFileSystem(job);
  autoDelete = fs.deleteOnExit(outPath);
 } catch (IOException e) {
  this.exception = true;
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: apache/hive

public static MapredWork getMapRedWork(Configuration conf) {
 MapredWork w = new MapredWork();
 w.setMapWork(getMapWork(conf));
 w.setReduceWork(getReduceWork(conf));
 return w;
}

代码示例来源:origin: apache/drill

public static MapredWork getMapRedWork(Configuration conf) {
 MapredWork w = new MapredWork();
 w.setMapWork(getMapWork(conf));
 w.setReduceWork(getReduceWork(conf));
 return w;
}

代码示例来源:origin: apache/hive

/**
 * @param conf
 * @return the configured VectorizedRowBatchCtx for a MapWork task.
 */
public static VectorizedRowBatchCtx getVectorizedRowBatchCtx(Configuration conf) {
 VectorizedRowBatchCtx result = null;
 if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED) &&
   Utilities.getPlanPath(conf) != null) {
  MapWork mapWork = Utilities.getMapWork(conf);
  if (mapWork != null && mapWork.getVectorMode()) {
   result = mapWork.getVectorizedRowBatchCtx();
  }
 }
 return result;
}

代码示例来源:origin: apache/drill

/**
 * @param conf
 * @return the configured VectorizedRowBatchCtx for a MapWork task.
 */
public static VectorizedRowBatchCtx getVectorizedRowBatchCtx(Configuration conf) {
 VectorizedRowBatchCtx result = null;
 if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED) &&
   Utilities.getPlanPath(conf) != null) {
  MapWork mapWork = Utilities.getMapWork(conf);
  if (mapWork != null && mapWork.getVectorMode()) {
   result = mapWork.getVectorizedRowBatchCtx();
  }
 }
 return result;
}

代码示例来源:origin: apache/hive

public HiveSplitGenerator(InputInitializerContext initializerContext) throws IOException,
  SerDeException {
 super(initializerContext);
 Preconditions.checkNotNull(initializerContext);
 userPayloadProto =
   MRInputHelpers.parseMRInputPayload(initializerContext.getInputUserPayload());
 this.conf = TezUtils.createConfFromByteString(userPayloadProto.getConfigurationBytes());
 this.jobConf = new JobConf(conf);
 // Read all credentials into the credentials instance stored in JobConf.
 ShimLoader.getHadoopShims().getMergedCredentials(jobConf);
 this.work = Utilities.getMapWork(jobConf);
 this.splitLocationProvider =
   Utils.getSplitLocationProvider(conf, work.getCacheAffinity(), LOG);
 LOG.info("SplitLocationProvider: " + splitLocationProvider);
 // Events can start coming in the moment the InputInitializer is created. The pruner
 // must be setup and initialized here so that it sets up it's structures to start accepting events.
 // Setting it up in initialize leads to a window where events may come in before the pruner is
 // initialized, which may cause it to drop events.
 pruner = new DynamicPartitionPruner(initializerContext, work, jobConf);
}

代码示例来源:origin: apache/hive

public CombineHiveInputSplit(JobConf job, CombineFileSplit inputSplitShim,
  Map<Path, PartitionDesc> pathToPartitionInfo) throws IOException {
 this.inputSplitShim = inputSplitShim;
 this.pathToPartitionInfo = pathToPartitionInfo;
 if (job != null) {
  if (this.pathToPartitionInfo == null) {
   this.pathToPartitionInfo = Utilities.getMapWork(job).getPathToPartitionInfo();
  }
  // extract all the inputFormatClass names for each chunk in the
  // CombinedSplit.
  Path[] ipaths = inputSplitShim.getPaths();
  if (ipaths.length > 0) {
   PartitionDesc part = HiveFileFormatUtils
     .getFromPathRecursively(this.pathToPartitionInfo,
       ipaths[0], IOPrepareCache.get().getPartitionDescMap());
   inputFormatClassName = part.getInputFileFormatClass().getName();
  }
 }
}

相关文章

微信公众号

最新文章

更多

Utilities类方法