org.apache.spark.api.java.JavaSparkContext.sc()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(4.4k)|赞(0)|评价(0)|浏览(118)

本文整理了Java中org.apache.spark.api.java.JavaSparkContext.sc()方法的一些代码示例,展示了JavaSparkContext.sc()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JavaSparkContext.sc()方法的具体详情如下:
包路径:org.apache.spark.api.java.JavaSparkContext
类名称:JavaSparkContext
方法名:sc

JavaSparkContext.sc介绍

暂无

代码示例

代码示例来源:origin: apache/hive

@Override
public boolean cancelJob() {
 int id = Integer.parseInt(jobId);
 javaSparkContext.sc().cancelJob(id);
 return true;
}

代码示例来源:origin: apache/hive

@Override
public SparkConf getSparkConf() {
 return sc.sc().conf();
}

代码示例来源:origin: apache/hive

@Override
public int getDefaultParallelism() throws Exception {
 return sc.sc().defaultParallelism();
}

代码示例来源:origin: apache/hive

@Override
public String getAppID() {
 return sparkContext.sc().applicationId();
}

代码示例来源:origin: apache/hive

@Override
public String getWebUIURL() {
 try {
  if (sparkContext.sc().uiWebUrl().isDefined()) {
   return SparkUtilities.reverseDNSLookupURL(sparkContext.sc().uiWebUrl().get());
  } else {
   return "UNDEFINED";
  }
 } catch (Exception e) {
  LOG.warn("Failed to get web UI URL.", e);
 }
 return "UNKNOWN";
}

代码示例来源:origin: apache/hive

@Override
 public Integer call(JobContext jc) throws Exception {
  return jc.sc().sc().defaultParallelism();
 }
}

代码示例来源:origin: apache/hive

@Override
 public String call(JobContext jc) throws Exception {
  return jc.sc().sc().applicationId();
 }
}

代码示例来源:origin: apache/hive

@Override
public void cleanup() {
 jobMetricsListener.cleanup(jobId);
 if (cachedRDDIds != null) {
  for (Integer cachedRDDId: cachedRDDIds) {
   sparkContext.sc().unpersistRDD(cachedRDDId, false);
  }
 }
}

代码示例来源:origin: apache/hive

@Override
public int getExecutorCount() {
 return sc.sc().getExecutorMemoryStatus().size();
}

代码示例来源:origin: apache/drill

@Override
 public String call(JobContext jc) throws Exception {
  return jc.sc().sc().applicationId();
 }
}

代码示例来源:origin: apache/drill

@Override
public void cleanup() {
 jobMetricsListener.cleanup(jobId);
 if (cachedRDDIds != null) {
  for (Integer cachedRDDId: cachedRDDIds) {
   sparkContext.sc().unpersistRDD(cachedRDDId, false);
  }
 }
}

代码示例来源:origin: apache/hive

/**
 * Release cached RDDs as soon as the job is done.
 * This is different from local Spark client so as
 * to save a RPC call/trip, avoid passing cached RDD
 * id information around. Otherwise, we can follow
 * the local Spark client way to be consistent.
 */
void releaseCache() {
 if (cachedRDDIds != null) {
  for (Integer cachedRDDId: cachedRDDIds) {
   jc.sc().sc().unpersistRDD(cachedRDDId, false);
  }
 }
}

代码示例来源:origin: apache/hive

@Override
public Integer call(JobContext jc) throws Exception {
 // minus 1 here otherwise driver is also counted as an executor
 int count = jc.sc().sc().getExecutorMemoryStatus().size() - 1;
 return Integer.valueOf(count);
}

代码示例来源:origin: apache/hive

@Override
 public String call(JobContext jc) throws Exception {
  if (jc.sc().sc().uiWebUrl().isDefined()) {
   return SparkUtilities.reverseDNSLookupURL(jc.sc().sc().uiWebUrl().get());
  }
  return "UNDEFINED";
 }
}

代码示例来源:origin: apache/drill

private LocalHiveSparkClient(SparkConf sparkConf) {
 sc = new JavaSparkContext(sparkConf);
 jobMetricsListener = new JobMetricsListener();
 sc.sc().listenerBus().addListener(jobMetricsListener);
}

代码示例来源:origin: apache/hive

private SparkContext getSparkContext(SparkSession sparkSession) throws ReflectiveOperationException {
 HiveSparkClient sparkClient = getSparkClient(sparkSession);
 Assert.assertNotNull(sparkClient);
 return getSparkContext(sparkClient).sc();
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void foreachPartition() {
 LongAccumulator accum = sc.sc().longAccumulator();
 JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
 rdd.foreachPartition(iter -> {
  while (iter.hasNext()) {
   iter.next();
   accum.add(1);
  }
 });
 assertEquals(2, accum.value().intValue());
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void foreachPartition() {
 LongAccumulator accum = sc.sc().longAccumulator();
 JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
 rdd.foreachPartition(iter -> {
  while (iter.hasNext()) {
   iter.next();
   accum.add(1);
  }
 });
 assertEquals(2, accum.value().intValue());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void foreach() {
 LongAccumulator accum = sc.sc().longAccumulator();
 JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
 rdd.foreach(s -> accum.add(1));
 assertEquals(2, accum.value().intValue());
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void foreach() {
 LongAccumulator accum = sc.sc().longAccumulator();
 JavaRDD<String> rdd = sc.parallelize(Arrays.asList("Hello", "World"));
 rdd.foreach(s -> accum.add(1));
 assertEquals(2, accum.value().intValue());
}

相关文章

微信公众号

最新文章

更多