org.apache.spark.SparkContext.conf()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(5.3k)|赞(0)|评价(0)|浏览(101)

本文整理了Java中org.apache.spark.SparkContext.conf()方法的一些代码示例,展示了SparkContext.conf()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。SparkContext.conf()方法的具体详情如下:
包路径:org.apache.spark.SparkContext
类名称:SparkContext
方法名:conf

SparkContext.conf介绍

暂无

代码示例

代码示例来源:origin: apache/hive

@Override
public SparkConf getSparkConf() {
 return sc.sc().conf();
}

代码示例来源:origin: apache/drill

@Override
public SparkConf getSparkConf() {
 return sc.sc().conf();
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public SparkConf getSparkConf() {
 return sc.sc().conf();
}

代码示例来源:origin: apache/incubator-nemo

/**
 * Derive Spark serializer from a spark context.
 *
 * @param sparkContext spark context to derive the serializer from.
 * @return the serializer.
 */
public static Serializer deriveSerializerFrom(final org.apache.spark.SparkContext sparkContext) {
 if (sparkContext.conf().get("spark.serializer", "")
   .equals("org.apache.spark.serializer.KryoSerializer")) {
  return new KryoSerializer(sparkContext.conf());
 } else {
  return new JavaSerializer(sparkContext.conf());
 }
}

代码示例来源:origin: io.snappydata/snappydata-core

public static synchronized SnappySharedState create(SparkContext sparkContext)
  throws SparkException {
 // force in-memory catalog to avoid initializing hive for SnappyData
 final String catalogImpl = sparkContext.conf().get(CATALOG_IMPLEMENTATION, null);
 // there is a small thread-safety issue in that if multiple threads
 // are initializing normal concurrently SparkSession vs SnappySession
 // then former can land up with in-memory catalog too
 sparkContext.conf().set(CATALOG_IMPLEMENTATION, "in-memory");
 createListenerAndUI(sparkContext);
 final SnappySharedState sharedState = new SnappySharedState(sparkContext);
 // reset the catalog implementation to original
 if (catalogImpl != null) {
  sparkContext.conf().set(CATALOG_IMPLEMENTATION, catalogImpl);
 } else {
  sparkContext.conf().remove(CATALOG_IMPLEMENTATION);
 }
 return sharedState;
}

代码示例来源:origin: Netflix/iceberg

public static Seq<CatalogTablePartition> partitions(SparkSession spark, String name) {
  List<String> parts = Lists.newArrayList(Splitter.on('.').limit(2).split(name));
  String db = parts.size() == 1 ? "default" : parts.get(0);
  String table = parts.get(parts.size() == 1 ? 0 : 1);

  HiveClient client = HiveUtils$.MODULE$.newClientForMetadata(
    spark.sparkContext().conf(),
    spark.sparkContext().hadoopConfiguration());
  client.getPartitions(db, table, Option.empty());

  return client.getPartitions(db, table, Option.empty());
 }
}

代码示例来源:origin: io.snappydata/snappydata-core

/**
 * Create Snappy's SQL Listener instead of SQLListener
 */
private static void createListenerAndUI(SparkContext sc) {
 SQLListener initListener = ExternalStoreUtils.getSQLListener().get();
 if (initListener == null) {
  SnappySQLListener listener = new SnappySQLListener(sc.conf());
  if (ExternalStoreUtils.getSQLListener().compareAndSet(null, listener)) {
   sc.addSparkListener(listener);
   scala.Option<SparkUI> ui = sc.ui();
   // embedded mode attaches SQLTab later via ToolsCallbackImpl that also
   // takes care of injecting any authentication module if configured
   if (ui.isDefined() &&
     !(SnappyContext.getClusterMode(sc) instanceof SnappyEmbeddedMode)) {
    new SQLTab(listener, ui.get());
   }
  }
 }
}

代码示例来源:origin: jgperrin/net.jgp.labs.spark

private void start() {
  SparkConf conf = new SparkConf().setAppName("Concurrency Lab 001")
    .setMaster(Config.MASTER);
  JavaSparkContext sc = new JavaSparkContext(conf);
  SparkSession spark = SparkSession.builder().config(conf).getOrCreate();

  conf = spark.sparkContext().conf();
  System.out.println(conf.get("hello"));

  Dataset<Row> df = spark.sql("SELECT * from myView");
  df.show();
 }
}

代码示例来源:origin: scipr-lab/dizk

spark.sparkContext().conf().set("spark.files.overwrite", "true");
spark.sparkContext().conf()
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
spark.sparkContext().conf().registerKryoClasses(SparkUtils.zksparkClasses());
spark.sparkContext().conf().set("spark.files.overwrite", "true");
spark.sparkContext().conf()
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
spark.sparkContext().conf().registerKryoClasses(SparkUtils.zksparkClasses());
spark.sparkContext().conf().set("spark.files.overwrite", "true");
spark.sparkContext().conf()
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
spark.sparkContext().conf().registerKryoClasses(SparkUtils.zksparkClasses());
spark.sparkContext().conf().set("spark.files.overwrite", "true");
spark.sparkContext().conf()
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
spark.sparkContext().conf().registerKryoClasses(SparkUtils.zksparkClasses());

代码示例来源:origin: uber/marmaray

private void assertExpectationsOnSparkContext(
  @NonNull final SparkArgs sparkArgs,
  @NonNull final SparkContext sc) {
  final String registeredAvroSchemaStr = sc.conf().getAvroSchema().head()._2();
  final Schema expectedAvroSchema = sparkArgs.getAvroSchemas().get().get(0);
  Assert.assertEquals(expectedAvroSchema.toString(), registeredAvroSchemaStr);
  Assert.assertEquals("foo_bar", sc.appName());
  Assert.assertEquals("512", sc.hadoopConfiguration().get("mapreduce.map.memory.mb"));
}

代码示例来源:origin: io.snappydata/snappydata-core

String globalDBName = Utils.toUpperCase(sparkContext().conf().get(
  StaticSQLConf.GLOBAL_TEMP_DATABASE()));
if (this.snappyCatalog.databaseExists(globalDBName)) {

相关文章