org.apache.spark.SparkContext.addSparkListener()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(5.0k)|赞(0)|评价(0)|浏览(214)

本文整理了Java中org.apache.spark.SparkContext.addSparkListener()方法的一些代码示例,展示了SparkContext.addSparkListener()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。SparkContext.addSparkListener()方法的具体详情如下:
包路径:org.apache.spark.SparkContext
类名称:SparkContext
方法名:addSparkListener

SparkContext.addSparkListener介绍

暂无

代码示例

代码示例来源:origin: twosigma/beakerx

private SparkContext addListener(SparkContext sc, SparkUIApi sparkUIManager) {
 sc.addSparkListener(new SparkListener() {

代码示例来源:origin: apache/hive

sc.sc().addSparkListener(new ClientListener());
synchronized (jcLock) {
 jc = new JobContextImpl(sc, localTmpDir);

代码示例来源:origin: apache/hive

private LocalHiveSparkClient(SparkConf sparkConf, HiveConf hiveConf)
  throws FileNotFoundException, MalformedURLException {
 String regJar = null;
 // the registrator jar should already be in CP when not in test mode
 if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IN_TEST)) {
  String kryoReg = sparkConf.get("spark.kryo.registrator", "");
  if (SparkClientUtilities.HIVE_KRYO_REG_NAME.equals(kryoReg)) {
   regJar = SparkClientUtilities.findKryoRegistratorJar(hiveConf);
   SparkClientUtilities.addJarToContextLoader(new File(regJar));
  }
 }
 sc = new JavaSparkContext(sparkConf);
 if (regJar != null) {
  sc.addJar(regJar);
 }
 jobMetricsListener = new JobMetricsListener();
 sc.sc().addSparkListener(jobMetricsListener);
}

代码示例来源:origin: apache/kylin

sc.sc().addSparkListener(jobListener);
final FileSystem fs = partitionFilePath.getFileSystem(sc.hadoopConfiguration());
if (!fs.exists(partitionFilePath)) {

代码示例来源:origin: apache/kylin

sc.sc().addSparkListener(jobListener);
HadoopUtil.deletePath(sc.hadoopConfiguration(), new Path(outputPath));

代码示例来源:origin: apache/kylin

sc.sc().addSparkListener(jobListener);

代码示例来源:origin: apache/kylin

sc.sc().addSparkListener(jobListener);

代码示例来源:origin: apache/kylin

sc.sc().addSparkListener(jobListener);
HadoopUtil.deletePath(sc.hadoopConfiguration(), new Path(outputPath));

代码示例来源:origin: uber/marmaray

private void updateSparkContext(@NonNull final SparkArgs sparkArgs,
  @NonNull final SparkContext sc) {
  for (SparkListener sparkListener : getSparkEventListeners()) {
    sc.addSparkListener(sparkListener);
  }
  sc.hadoopConfiguration().addResource(sparkArgs.getHadoopConfiguration());
}

代码示例来源:origin: uber/marmaray

/**
 * Creates JavaSparkContext if its hasn't been created yet, or returns the instance. {@link #addSchema(Schema)} and
 * {@link #addSchemas(Collection)} must not be called once the JavaSparkContext has been created
 * @return the JavaSparkContext that will be used to execute the JobDags
 */
public JavaSparkContext getOrCreateSparkContext() {
  if (!this.sparkContext.isPresent()) {
    this.sparkContext = Optional.of(new JavaSparkContext(
        SparkUtil.getSparkConf(
          this.appName, Optional.of(this.schemas), this.serializationClasses, this.conf)));
    this.sparkContext.get().sc().addSparkListener(new SparkEventListener());
    // Adding hadoop configuration to default
    this.sparkContext.get().sc().hadoopConfiguration().addResource(
      new HadoopConfiguration(conf).getHadoopConf());
    this.appId = this.sparkContext.get().sc().applicationId();
  }
  return this.sparkContext.get();
}

代码示例来源:origin: org.apache.pig/pig

sparkContext.sc().addSparkListener(new StatsReportListener());
sparkContext.sc().addSparkListener(new JobLogger());
sparkContext.sc().addSparkListener(jobMetricsListener);

代码示例来源:origin: io.snappydata/snappydata-core

/**
 * Create Snappy's SQL Listener instead of SQLListener
 */
private static void createListenerAndUI(SparkContext sc) {
 SQLListener initListener = ExternalStoreUtils.getSQLListener().get();
 if (initListener == null) {
  SnappySQLListener listener = new SnappySQLListener(sc.conf());
  if (ExternalStoreUtils.getSQLListener().compareAndSet(null, listener)) {
   sc.addSparkListener(listener);
   scala.Option<SparkUI> ui = sc.ui();
   // embedded mode attaches SQLTab later via ToolsCallbackImpl that also
   // takes care of injecting any authentication module if configured
   if (ui.isDefined() &&
     !(SnappyContext.getClusterMode(sc) instanceof SnappyEmbeddedMode)) {
    new SQLTab(listener, ui.get());
   }
  }
 }
}

代码示例来源:origin: com.github.hyukjinkwon/spark-client

sc.sc().addSparkListener(new ClientListener());
synchronized (jcLock) {
 jc = new JobContextImpl(sc, localTmpDir);

代码示例来源:origin: org.spark-project.hive/spark-client

sc.sc().addSparkListener(new ClientListener());
synchronized (jcLock) {
 jc = new JobContextImpl(sc, localTmpDir);

代码示例来源:origin: org.wso2.carbon.analytics/org.wso2.carbon.analytics.spark.core

registerUDFs(this.sqlCtx);
registerUDAFs(this.sqlCtx);
this.sqlCtx.sparkContext().addSparkListener(new SparkListener() {
  @Override
  public void onStageCompleted(SparkListenerStageCompleted sparkListenerStageCompleted) {

代码示例来源:origin: org.apache.kylin/kylin-engine-spark

sc.sc().addSparkListener(jobListener);

代码示例来源:origin: org.apache.kylin/kylin-engine-spark

sc.sc().addSparkListener(jobListener);
HadoopUtil.deletePath(sc.hadoopConfiguration(), new Path(outputPath));

代码示例来源:origin: org.apache.kylin/kylin-engine-spark

sc.sc().addSparkListener(jobListener);
HadoopUtil.deletePath(sc.hadoopConfiguration(), new Path(outputPath));

代码示例来源:origin: uber/hudi

jsc.sc().addSparkListener(new SparkListener() {

代码示例来源:origin: ai.h2o/sparkling-water-ml

iterableAsScalaIterable(Arrays.asList("treeAggregate")));
sc.addSparkListener(progressBar);

相关文章