org.apache.hadoop.hive.ql.exec.spark.KryoSerializer.serializeJobConf()方法的使用及代码示例

x33g5p2x  于2022-01-24 转载在 其他  
字(5.2k)|赞(0)|评价(0)|浏览(98)

本文整理了Java中org.apache.hadoop.hive.ql.exec.spark.KryoSerializer.serializeJobConf()方法的一些代码示例,展示了KryoSerializer.serializeJobConf()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。KryoSerializer.serializeJobConf()方法的具体详情如下:
包路径:org.apache.hadoop.hive.ql.exec.spark.KryoSerializer
类名称:KryoSerializer
方法名:serializeJobConf

KryoSerializer.serializeJobConf介绍

暂无

代码示例

代码示例来源:origin: apache/drill

JobConf newJobConf = cloneJobConf(work);
checkSpecs(work, newJobConf);
byte[] confBytes = KryoSerializer.serializeJobConf(newJobConf);
boolean caching = isCachingWork(work, sparkWork);
if (work instanceof MapWork) {

代码示例来源:origin: apache/hive

JobConf newJobConf = cloneJobConf(work);
checkSpecs(work, newJobConf);
byte[] confBytes = KryoSerializer.serializeJobConf(newJobConf);
boolean caching = isCachingWork(work, sparkWork);
if (work instanceof MapWork) {

代码示例来源:origin: apache/hive

private SparkJobRef submit(final DriverContext driverContext, final SparkWork sparkWork) throws Exception {
 final Context ctx = driverContext.getCtx();
 final HiveConf hiveConf = (HiveConf) ctx.getConf();
 refreshLocalResources(sparkWork, hiveConf);
 final JobConf jobConf = new JobConf(hiveConf);
 //update the credential provider location in the jobConf
 HiveConfUtil.updateJobCredentialProviders(jobConf);
 // Create temporary scratch dir
 final Path emptyScratchDir = ctx.getMRTmpPath();
 FileSystem fs = emptyScratchDir.getFileSystem(jobConf);
 fs.mkdirs(emptyScratchDir);
 // make sure NullScanFileSystem can be loaded - HIVE-18442
 jobConf.set("fs." + NullScanFileSystem.getBaseScheme() + ".impl",
   NullScanFileSystem.class.getCanonicalName());
 byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf);
 byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir);
 byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork);
 JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes);
 if (driverContext.isShutdown()) {
  throw new HiveException("Operation is cancelled.");
 }
 JobHandle<Serializable> jobHandle = remoteClient.submit(job);
 RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout);
 return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus);
}

代码示例来源:origin: apache/drill

private SparkJobRef submit(final DriverContext driverContext, final SparkWork sparkWork) throws Exception {
 final Context ctx = driverContext.getCtx();
 final HiveConf hiveConf = (HiveConf) ctx.getConf();
 refreshLocalResources(sparkWork, hiveConf);
 final JobConf jobConf = new JobConf(hiveConf);
 //update the credential provider location in the jobConf
 HiveConfUtil.updateJobCredentialProviders(jobConf);
 // Create temporary scratch dir
 final Path emptyScratchDir = ctx.getMRTmpPath();
 FileSystem fs = emptyScratchDir.getFileSystem(jobConf);
 fs.mkdirs(emptyScratchDir);
 byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf);
 byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir);
 byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork);
 JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes);
 if (driverContext.isShutdown()) {
  throw new HiveException("Operation is cancelled.");
 }
 JobHandle<Serializable> jobHandle = remoteClient.submit(job);
 RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout);
 return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus);
}

代码示例来源:origin: apache/hive

sc = new JavaSparkContext(sparkConf);
byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf);
byte[] scratchDirBytes = KryoSerializer.serialize(tmpDir);
byte[] sparkWorkBytes = KryoSerializer.serialize(sparkTask.getWork());

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private SparkTran generate(BaseWork work) throws Exception {
 initStatsPublisher(work);
 JobConf newJobConf = cloneJobConf(work);
 checkSpecs(work, newJobConf);
 byte[] confBytes = KryoSerializer.serializeJobConf(newJobConf);
 if (work instanceof MapWork) {
  MapTran mapTran = new MapTran();
  HiveMapFunction mapFunc = new HiveMapFunction(confBytes, sparkReporter);
  mapTran.setMapFunction(mapFunc);
  return mapTran;
 } else if (work instanceof ReduceWork) {
  ReduceTran reduceTran = new ReduceTran();
  HiveReduceFunction reduceFunc = new HiveReduceFunction(confBytes, sparkReporter);
  reduceTran.setReduceFunction(reduceFunc);
  return reduceTran;
 } else {
  throw new IllegalStateException("AssertionError: expected either MapWork or ReduceWork, "
   + "but found " + work.getClass().getName());
 }
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public SparkJobRef execute(final DriverContext driverContext, final SparkWork sparkWork) throws Exception {
 final Context ctx = driverContext.getCtx();
 final HiveConf hiveConf = (HiveConf) ctx.getConf();
 refreshLocalResources(sparkWork, hiveConf);
 final JobConf jobConf = new JobConf(hiveConf);
 // Create temporary scratch dir
 final Path emptyScratchDir = ctx.getMRTmpPath();
 FileSystem fs = emptyScratchDir.getFileSystem(jobConf);
 fs.mkdirs(emptyScratchDir);
 byte[] jobConfBytes = KryoSerializer.serializeJobConf(jobConf);
 byte[] scratchDirBytes = KryoSerializer.serialize(emptyScratchDir);
 byte[] sparkWorkBytes = KryoSerializer.serialize(sparkWork);
 JobStatusJob job = new JobStatusJob(jobConfBytes, scratchDirBytes, sparkWorkBytes);
 JobHandle<Serializable> jobHandle = remoteClient.submit(job);
 RemoteSparkJobStatus sparkJobStatus = new RemoteSparkJobStatus(remoteClient, jobHandle, sparkClientTimtout);
 return new RemoteSparkJobRef(hiveConf, jobHandle, sparkJobStatus);
}

相关文章