本文整理了Java中org.apache.spark.SparkContext.getConf()
方法的一些代码示例,展示了SparkContext.getConf()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。SparkContext.getConf()
方法的具体详情如下:
包路径:org.apache.spark.SparkContext
类名称:SparkContext
方法名:getConf
暂无
代码示例来源:origin: apache/tinkerpop
public static SparkContext recreateStopped() {
if (null == CONTEXT)
throw new IllegalStateException("The Spark context has not been created.");
if (!CONTEXT.isStopped())
throw new IllegalStateException("The Spark context is not stopped.");
CONTEXT = SparkContext.getOrCreate(CONTEXT.getConf());
return CONTEXT;
}
public static SparkContext getContext() {
代码示例来源:origin: apache/tinkerpop
public static SparkContext create(final SparkContext sparkContext) {
if (null != CONTEXT && !CONTEXT.isStopped() && sparkContext !=CONTEXT /*exact the same object => NOP*/
&& !sparkContext.getConf().getBoolean("spark.driver.allowMultipleContexts", false)) {
throw new IllegalStateException(
"Active Spark context exists. Call Spark.close() to close it before creating a new one");
}
CONTEXT = sparkContext;
return CONTEXT;
}
代码示例来源:origin: apache/tinkerpop
private LinkedBlockingQueue<Kryo> initialize(final Configuration configuration) {
// DCL is safe in this case due to volatility
if (!INITIALIZED) {
synchronized (UnshadedKryoShimService.class) {
if (!INITIALIZED) {
// so we don't get a WARN that a new configuration is being created within an active context
final SparkConf sparkConf = null == Spark.getContext() ? new SparkConf() : Spark.getContext().getConf().clone();
configuration.getKeys().forEachRemaining(key -> sparkConf.set(key, configuration.getProperty(key).toString()));
final KryoSerializer serializer = new KryoSerializer(sparkConf);
// Setup a pool backed by our spark.serializer instance
// Reuse Gryo poolsize for Kryo poolsize (no need to copy this to SparkConf)
KRYOS.clear();
final int poolSize = configuration.getInt(GryoPool.CONFIG_IO_GRYO_POOL_SIZE, GryoPool.CONFIG_IO_GRYO_POOL_SIZE_DEFAULT);
for (int i = 0; i < poolSize; i++) {
KRYOS.add(serializer.newKryo());
}
INITIALIZED = true;
}
}
}
return KRYOS;
}
}
代码示例来源:origin: Impetus/Kundera
public void registerTable(EntityMetadata m, SparkClient sparkClient)
{
final Class clazz = m.getEntityClazz();
SparkContext sc = sparkClient.sparkContext;
Configuration config = new Configuration();
config.set(
"mongo.input.uri",
buildMongoURIPath(sc.getConf().get("hostname"), sc.getConf().get("portname"), m.getSchema(),
m.getTableName()));
JavaRDD<Tuple2<Object, BSONObject>> mongoJavaRDD = sc.newAPIHadoopRDD(config, MongoInputFormat.class,
Object.class, BSONObject.class).toJavaRDD();
JavaRDD<Object> mongoRDD = mongoJavaRDD.flatMap(new FlatMapFunction<Tuple2<Object, BSONObject>, Object>()
{
@Override
public Iterable<Object> call(Tuple2<Object, BSONObject> arg)
{
BSONObject obj = arg._2();
Object javaObject = generateJavaObjectFromBSON(obj, clazz);
return Arrays.asList(javaObject);
}
});
sparkClient.sqlContext.createDataFrame(mongoRDD, m.getEntityClazz()).registerTempTable(m.getTableName());
}
代码示例来源:origin: Impetus/Kundera
outputConfig.set(
"mongo.output.uri",
buildMongoURIPath(sc.getConf().get("hostname"), sc.getConf().get("portname"), m.getSchema(),
m.getTableName()));
代码示例来源:origin: org.apache.tinkerpop/spark-gremlin
public static SparkContext recreateStopped() {
if (null == CONTEXT)
throw new IllegalStateException("The Spark context has not been created.");
if (!CONTEXT.isStopped())
throw new IllegalStateException("The Spark context is not stopped.");
CONTEXT = SparkContext.getOrCreate(CONTEXT.getConf());
return CONTEXT;
}
public static SparkContext getContext() {
代码示例来源:origin: org.apache.tinkerpop/spark-gremlin
public static SparkContext create(final SparkContext sparkContext) {
if (null != CONTEXT && !CONTEXT.isStopped() && sparkContext !=CONTEXT /*exact the same object => NOP*/
&& !sparkContext.getConf().getBoolean("spark.driver.allowMultipleContexts", false)) {
throw new IllegalStateException(
"Active Spark context exists. Call Spark.close() to close it before creating a new one");
}
CONTEXT = sparkContext;
return CONTEXT;
}
代码示例来源:origin: com.basho.riak/spark-riak-connector-java
public <T> RiakTSJavaRDD<T> riakTSTable(String bucketName, Class<T> targetClass) {
final ClassTag<T> classTag = getClassTag(targetClass);
final RiakTSRDD<T> rdd = RiakTSRDD$.MODULE$.apply(sparkContext, bucketName, ReadConf$.MODULE$.apply(sparkContext.getConf()),
classTag, RiakConnector$.MODULE$.apply(sparkContext.getConf()));
return new RiakTSJavaRDD<>(rdd, classTag);
}
代码示例来源:origin: com.basho.riak/spark-riak-connector
public <T> RiakTSJavaRDD<T> riakTSTable(String bucketName, Class<T> targetClass) {
final ClassTag<T> classTag = getClassTag(targetClass);
final RiakTSRDD<T> rdd = RiakTSRDD$.MODULE$.apply(sparkContext, bucketName, ReadConf$.MODULE$.apply(sparkContext.getConf()),
classTag, RiakConnector$.MODULE$.apply(sparkContext.getConf()));
return new RiakTSJavaRDD<>(rdd, classTag);
}
代码示例来源:origin: com.basho.riak/spark-riak-connector
public <T> RiakTSJavaRDD<T> riakTSTable(String bucketName, StructType schema, Class<T> targetClass) {
final ClassTag<T> classTag = getClassTag(targetClass);
final RiakTSRDD<T> rdd = RiakTSRDD$.MODULE$.apply(sparkContext, bucketName,
ReadConf$.MODULE$.apply(sparkContext.getConf()), Option.apply(schema),
classTag, RiakConnector$.MODULE$.apply(sparkContext.getConf()));
return new RiakTSJavaRDD<>(rdd, classTag);
}
}
代码示例来源:origin: com.basho.riak/spark-riak-connector-java
public <T> RiakTSJavaRDD<T> riakTSTable(String bucketName, StructType schema, Class<T> targetClass) {
final ClassTag<T> classTag = getClassTag(targetClass);
final RiakTSRDD<T> rdd = RiakTSRDD$.MODULE$.apply(sparkContext, bucketName,
ReadConf$.MODULE$.apply(sparkContext.getConf()), Option.apply(schema),
classTag, RiakConnector$.MODULE$.apply(sparkContext.getConf()));
return new RiakTSJavaRDD<>(rdd, classTag);
}
}
代码示例来源:origin: com.basho.riak/spark-riak-connector-java
@SuppressWarnings("unchecked")
public <T> RiakJavaRDD<T> riakBucket(String bucketName, String bucketType, Class<T> valueClass) {
final ClassTag<T> classTag = getClassTag(valueClass);
final String bucketTypeStr = bucketType == null || bucketType.isEmpty() ? "default" : bucketType;
final RiakRDD<T> rdd = RiakRDD$.MODULE$.apply(sparkContext, bucketTypeStr, bucketName,
ConversionFunction.create(classTag), Option.apply(null), ReadConf$.MODULE$.apply(sparkContext.getConf()), classTag);
return new RiakJavaRDD<>(rdd, classTag);
}
代码示例来源:origin: com.basho.riak/spark-riak-connector-java
public <K, V> RiakJavaPairRDD<K, V> riakBucket(String bucketName, Function2<Location, RiakObject, Tuple2<K, V>> convert, String bucketType, Class<K> keyClass, Class<V> valueClass) {
final ClassTag<K> kClassTag = getClassTag(keyClass);
final ClassTag<V> vClassTag = getClassTag(valueClass);
final String bucketTypeStr = bucketType == null || bucketType.isEmpty() ? "default" : bucketType;
final RiakRDD<Tuple2<K, V>> rdd = RiakRDD$.MODULE$.apply(sparkContext, bucketTypeStr, bucketName, convert,
Option.apply(null), ReadConf$.MODULE$.apply(sparkContext.getConf()), kClassTag, vClassTag);
return new RiakJavaPairRDD<>(rdd, kClassTag, vClassTag);
}
代码示例来源:origin: com.basho.riak/spark-riak-connector
public <T> RiakJavaRDD<T> riakBucket(String bucketName, String bucketType, ReadDataMapperFactory<T> rdmf) {
final String bucketTypeStr = StringUtils.isBlank(bucketType) ? "default" : bucketType;
final Option<QueryData<?>> queryData = Option.apply(null);
final ReadConf readConf = ReadConf$.MODULE$.apply(sparkContext.getConf());
final RiakRDD<T> rdd = RiakRDD$.MODULE$.apply(sparkContext, bucketTypeStr,
bucketName, queryData, readConf, getClassTag(rdmf.targetClass()), rdmf);
return toJavaRDD(rdd, rdmf.targetClass());
}
代码示例来源:origin: cloudera-labs/envelope
@Test
public void testDriverMemoryClusterMode() {
Properties props = new Properties();
props.setProperty(Contexts.APPLICATION_SECTION_PREFIX + "." +
Contexts.SPARK_CONF_PROPERTY_PREFIX + "." + Contexts.SPARK_DEPLOY_MODE_PROPERTY,
Contexts.SPARK_DEPLOY_MODE_CLUSTER);
props.setProperty(Contexts.APPLICATION_SECTION_PREFIX + "." + Contexts.DRIVER_MEMORY_PROPERTY, "2G");
Config config = ConfigFactory.parseProperties(props);
Contexts.initialize(config, Contexts.ExecutionMode.UNIT_TEST);
SparkConf sparkConf = Contexts.getSparkSession().sparkContext().getConf();
assertEquals(sparkConf.get(Contexts.SPARK_DRIVER_MEMORY_PROPERTY), "2G");
}
代码示例来源:origin: cloudera-labs/envelope
@Test
public void testApplicationNameProvided() {
Properties props = new Properties();
props.setProperty("application.name", "test");
Config config = ConfigFactory.parseProperties(props);
Contexts.initialize(config, Contexts.ExecutionMode.UNIT_TEST);
SparkConf sparkConf = Contexts.getSparkSession().sparkContext().getConf();
assertEquals(sparkConf.get("spark.app.name"), "test");
}
代码示例来源:origin: cloudera-labs/envelope
@Test
public void testSparkPassthroughGood() {
Config config = ConfigUtils.configFromPath(
this.getClass().getResource(RESOURCES_PATH + "/spark-passthrough-good.conf").getPath());
Contexts.initialize(config, Contexts.ExecutionMode.UNIT_TEST);
SparkConf sparkConf = Contexts.getSparkSession().sparkContext().getConf();
assertTrue(sparkConf.contains("spark.driver.allowMultipleContexts"));
assertEquals("true", sparkConf.get("spark.driver.allowMultipleContexts"));
assertTrue(sparkConf.contains("spark.master"));
assertEquals("local[1]", sparkConf.get("spark.master"));
}
代码示例来源:origin: cloudera-labs/envelope
@Test
public void testApplicationNameNotProvided() {
Config config = ConfigFactory.empty();
Contexts.initialize(config, Contexts.ExecutionMode.UNIT_TEST);
SparkConf sparkConf = Contexts.getSparkSession().sparkContext().getConf();
assertEquals(sparkConf.get("spark.app.name"), "");
}
代码示例来源:origin: cloudera-labs/envelope
@Test
public void testDefaultStreamingConfiguration() {
Config config = ConfigFactory.empty();
Contexts.initialize(config, Contexts.ExecutionMode.STREAMING);
SparkConf sparkConf = Contexts.getSparkSession().sparkContext().getConf();
assertTrue(sparkConf.contains("spark.dynamicAllocation.enabled"));
assertTrue(sparkConf.contains("spark.sql.shuffle.partitions"));
assertEquals(sparkConf.get("spark.sql.catalogImplementation"), "hive");
}
代码示例来源:origin: cloudera-labs/envelope
@Test
public void testDefaultBatchConfiguration() {
Config config = ConfigFactory.empty();
Contexts.initialize(config, Contexts.ExecutionMode.BATCH);
SparkConf sparkConf = Contexts.getSparkSession().sparkContext().getConf();
assertTrue(!sparkConf.contains("spark.dynamicAllocation.enabled"));
assertTrue(!sparkConf.contains("spark.sql.shuffle.partitions"));
assertEquals(sparkConf.get("spark.sql.catalogImplementation"), "hive");
}
内容来源于网络,如有侵权,请联系作者删除!