org.apache.hadoop.hive.ql.exec.Utilities.getTaskId()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(7.2k)|赞(0)|评价(0)|浏览(198)

本文整理了Java中org.apache.hadoop.hive.ql.exec.Utilities.getTaskId()方法的一些代码示例,展示了Utilities.getTaskId()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utilities.getTaskId()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.Utilities
类名称:Utilities
方法名:getTaskId

Utilities.getTaskId介绍

[英]Gets the task id if we are running as a Hadoop job. Gets a random number otherwise.
[中]获取作为Hadoop作业运行的任务id。否则获取一个随机数。

代码示例

代码示例来源:origin: apache/hive

private void updatePaths(Path tmpPath, Path taskTmpPath) {
 String taskId = Utilities.getTaskId(jc);
 this.tmpPath = tmpPath;
 this.taskTmpPath = taskTmpPath;
 String inputFile = jc.get(MRJobConfig.MAP_INPUT_FILE);
 int lastSeparator = inputFile.lastIndexOf(Path.SEPARATOR) + 1;
 finalPath = new Path(tmpPath, inputFile.substring(lastSeparator));
 outPath = new Path(taskTmpPath, Utilities.toTempPath(taskId));
}

代码示例来源:origin: apache/hive

private void testTaskIds(String [] taskIds, String expectedAttemptId, String expectedTaskId) {
 Configuration conf = new JobConf(TestOperators.class);
 for (String one: taskIds) {
  conf.set("mapred.task.id", one);
  String attemptId = Utilities.getTaskId(conf);
  assertEquals(expectedAttemptId, attemptId);
  assertEquals(Utilities.getTaskIdFromFilename(attemptId), expectedTaskId);
  assertEquals(Utilities.getTaskIdFromFilename(attemptId + ".gz"), expectedTaskId);
  assertEquals(Utilities.getTaskIdFromFilename
         (Utilities.toTempPath(new Path(attemptId + ".gz")).toString()), expectedTaskId);
 }
}

代码示例来源:origin: apache/drill

private void updatePaths(Path tmpPath, Path taskTmpPath) {
 String taskId = Utilities.getTaskId(jc);
 this.tmpPath = tmpPath;
 this.taskTmpPath = taskTmpPath;
 String inputFile = jc.get(MRJobConfig.MAP_INPUT_FILE);
 int lastSeparator = inputFile.lastIndexOf(Path.SEPARATOR) + 1;
 finalPath = new Path(tmpPath, inputFile.substring(lastSeparator));
 outPath = new Path(taskTmpPath, Utilities.toTempPath(taskId));
}

代码示例来源:origin: apache/hive

.getTaskId(hconf)));
taskId = Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), bucketNum);

代码示例来源:origin: apache/drill

private void updatePaths(Path tp, Path ttp) {
 String taskId = Utilities.getTaskId(jc);
 tmpPath = tp;
 taskTmpPath = ttp;
 finalPath = new Path(tp, taskId);
 outPath = new Path(ttp, Utilities.toTempPath(taskId));
}

代码示例来源:origin: apache/drill

.getTaskId(hconf)));
taskId = Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), bucketNum);

代码示例来源:origin: apache/hive

private void updatePaths(Path tp, Path ttp) {
 if (taskId == null) {
  taskId = Utilities.getTaskId(jc);
 }
 tmpPath = tp;
 if (isMmTable) {
  taskTmpPath = null;
  // Make sure we don't collide with the source.
  outPath = finalPath = new Path(tmpPath, taskId + ".merged");
 } else {
  taskTmpPath = ttp;
  finalPath = new Path(tp, taskId);
  outPath = new Path(ttp, Utilities.toTempPath(taskId));
 }
 if (Utilities.FILE_OP_LOGGER.isTraceEnabled()) {
  Utilities.FILE_OP_LOGGER.trace("Paths for merge " + taskId + ": tmp " + tmpPath + ", task "
    + taskTmpPath + ", final " + finalPath + ", out " + outPath);
 }
}

代码示例来源:origin: apache/hive

/**
 * @param configuration Job configs
 *
 * @return default consumer properties
 */
static Properties consumerProperties(Configuration configuration) {
 final Properties props = new Properties();
 // we are managing the commit offset
 props.setProperty(CommonClientConfigs.CLIENT_ID_CONFIG, Utilities.getTaskId(configuration));
 props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
 // we are seeking in the stream so no reset
 props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
 String brokerEndPoint = configuration.get(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName());
 if (brokerEndPoint == null || brokerEndPoint.isEmpty()) {
  throw new IllegalArgumentException("Kafka Broker End Point is missing Please set Config "
    + KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName());
 }
 props.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokerEndPoint);
 props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
 props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
 //case Kerberos is On
 if (UserGroupInformation.isSecurityEnabled()) {
  addKerberosJaasConf(configuration, props);
 }
 // user can always override stuff
 props.putAll(extractExtraProperties(configuration, CONSUMER_CONFIGURATION_PREFIX));
 return props;
}

代码示例来源:origin: apache/hive

Utilities.replaceTaskIdFromFilename(Utilities.getTaskId(hconf), bucketNum);
this.finalPaths[writerOffset] = new Path(bDynParts ? buildTmpPath() : parent, bucketName);
this.outPaths[writerOffset] = new Path(buildTaskOutputTempPath(), bucketName);

代码示例来源:origin: apache/hive

properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
properties.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokers);
properties.setProperty(CommonClientConfigs.CLIENT_ID_CONFIG, Utilities.getTaskId(getConf()));
if (UserGroupInformation.isSecurityEnabled()) {
 KafkaUtils.addKerberosJaasConf(getConf(), properties);

代码示例来源:origin: apache/hive

private Path setupTempDirWithSingleOutputFile(Configuration hconf) throws IOException {
 Path tempDirPath = new Path("file://" + temporaryFolder.newFolder().getAbsolutePath());
 Path taskOutputPath = new Path(tempDirPath, Utilities.getTaskId(hconf));
 FileSystem.getLocal(hconf).create(taskOutputPath).close();
 return tempDirPath;
}

代码示例来源:origin: apache/hive

final String taskId = Utilities.getTaskId(jc);
Path destFilePath = new Path(destDir, new Path(taskId));
for (int counter = 1; fs.exists(destFilePath); counter++) {

代码示例来源:origin: apache/hive

switch (writeSemantic) {
case AT_LEAST_ONCE:
 recordWriter = new SimpleKafkaWriter(topic, Utilities.getTaskId(jc), producerProperties);
 break;
case EXACTLY_ONCE:

代码示例来源:origin: apache/hive

tblSerializers = new HashMap<Byte, AbstractSerDe>(numAliases);
bigKeysExistingMap = new HashMap<Byte, Boolean>(numAliases);
taskId = Utilities.getTaskId(hconf);

代码示例来源:origin: apache/drill

tblSerializers = new HashMap<Byte, AbstractSerDe>(numAliases);
bigKeysExistingMap = new HashMap<Byte, Boolean>(numAliases);
taskId = Utilities.getTaskId(hconf);

代码示例来源:origin: apache/hive

Assert.assertEquals("false", kafkaProperties.get(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG));
Assert.assertEquals("none", kafkaProperties.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG));
Assert.assertEquals(Utilities.getTaskId(jobConf), kafkaProperties.get(CommonClientConfigs.CLIENT_ID_CONFIG));

代码示例来源:origin: apache/hive

fsp = prevFsp = null;
valToPaths = new HashMap<String, FSPaths>();
taskId = originalTaskId = Utilities.getTaskId(hconf);
initializeSpecPath();
fs = specPath.getFileSystem(hconf);

代码示例来源:origin: apache/drill

fsp = prevFsp = null;
valToPaths = new HashMap<String, FSPaths>();
taskId = Utilities.getTaskId(hconf);
initializeSpecPath();
fs = specPath.getFileSystem(hconf);

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private void updatePaths(Path tp, Path ttp) {
 String taskId = Utilities.getTaskId(jc);
 tmpPath = tp;
 taskTmpPath = ttp;
 finalPath = new Path(tp, taskId);
 outPath = new Path(ttp, Utilities.toTempPath(taskId));
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private void updatePaths(Path tmpPath, Path taskTmpPath) {
 String taskId = Utilities.getTaskId(jc);
 this.tmpPath = tmpPath;
 this.taskTmpPath = taskTmpPath;
 String inputFile = HiveConf.getVar(jc, HiveConf.ConfVars.HADOOPMAPFILENAME);
 int lastSeparator = inputFile.lastIndexOf(Path.SEPARATOR) + 1;
 finalPath = new Path(tmpPath, inputFile.substring(lastSeparator));
 outPath = new Path(taskTmpPath, Utilities.toTempPath(taskId));
}

相关文章

微信公众号

最新文章

更多

Utilities类方法