org.apache.spark.serializer.KryoSerializer.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-24 转载在 其他  
字(6.3k)|赞(0)|评价(0)|浏览(80)

本文整理了Java中org.apache.spark.serializer.KryoSerializer.<init>()方法的一些代码示例,展示了KryoSerializer.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。KryoSerializer.<init>()方法的具体详情如下:
包路径:org.apache.spark.serializer.KryoSerializer
类名称:KryoSerializer
方法名:<init>

KryoSerializer.<init>介绍

暂无

代码示例

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void combineByKey() {
 JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6));
 Function<Integer, Integer> keyFunction = v1 -> v1 % 3;
 Function<Integer, Integer> createCombinerFunction = v1 -> v1;
 Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2;
 JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction);
 Map<Integer, Integer> results = combinedRDD.collectAsMap();
 ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7);
 assertEquals(expected, results);
 Partitioner defaultPartitioner = Partitioner.defaultPartitioner(
  combinedRDD.rdd(),
  JavaConverters.collectionAsScalaIterableConverter(
   Collections.<RDD<?>>emptyList()).asScala().toSeq());
 combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(
   createCombinerFunction,
   mergeValueFunction,
   mergeValueFunction,
   defaultPartitioner,
   false,
   new KryoSerializer(new SparkConf()));
 results = combinedRDD.collectAsMap();
 assertEquals(expected, results);
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void combineByKey() {
 JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6));
 Function<Integer, Integer> keyFunction = v1 -> v1 % 3;
 Function<Integer, Integer> createCombinerFunction = v1 -> v1;
 Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2;
 JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction);
 Map<Integer, Integer> results = combinedRDD.collectAsMap();
 ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7);
 assertEquals(expected, results);
 Partitioner defaultPartitioner = Partitioner.defaultPartitioner(
  combinedRDD.rdd(),
  JavaConverters.collectionAsScalaIterableConverter(
   Collections.<RDD<?>>emptyList()).asScala().toSeq());
 combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(
   createCombinerFunction,
   mergeValueFunction,
   mergeValueFunction,
   defaultPartitioner,
   false,
   new KryoSerializer(new SparkConf()));
 results = combinedRDD.collectAsMap();
 assertEquals(expected, results);
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void combineByKey() {
 JavaRDD<Integer> originalRDD = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5, 6));
 Function<Integer, Integer> keyFunction = v1 -> v1 % 3;
 Function<Integer, Integer> createCombinerFunction = v1 -> v1;
 Function2<Integer, Integer, Integer> mergeValueFunction = (v1, v2) -> v1 + v2;
 JavaPairRDD<Integer, Integer> combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(createCombinerFunction, mergeValueFunction, mergeValueFunction);
 Map<Integer, Integer> results = combinedRDD.collectAsMap();
 ImmutableMap<Integer, Integer> expected = ImmutableMap.of(0, 9, 1, 5, 2, 7);
 assertEquals(expected, results);
 Partitioner defaultPartitioner = Partitioner.defaultPartitioner(
  combinedRDD.rdd(),
  JavaConverters.collectionAsScalaIterableConverter(
   Collections.<RDD<?>>emptyList()).asScala().toSeq());
 combinedRDD = originalRDD.keyBy(keyFunction)
  .combineByKey(
   createCombinerFunction,
   mergeValueFunction,
   mergeValueFunction,
   defaultPartitioner,
   false,
   new KryoSerializer(new SparkConf()));
 results = combinedRDD.collectAsMap();
 assertEquals(expected, results);
}

代码示例来源:origin: apache/tinkerpop

private LinkedBlockingQueue<Kryo> initialize(final Configuration configuration) {
    // DCL is safe in this case due to volatility
    if (!INITIALIZED) {
      synchronized (UnshadedKryoShimService.class) {
        if (!INITIALIZED) {
          // so we don't get a WARN that a new configuration is being created within an active context
          final SparkConf sparkConf = null == Spark.getContext() ? new SparkConf() : Spark.getContext().getConf().clone();
          configuration.getKeys().forEachRemaining(key -> sparkConf.set(key, configuration.getProperty(key).toString()));
          final KryoSerializer serializer = new KryoSerializer(sparkConf);
          // Setup a pool backed by our spark.serializer instance
          // Reuse Gryo poolsize for Kryo poolsize (no need to copy this to SparkConf)
          KRYOS.clear();
          final int poolSize = configuration.getInt(GryoPool.CONFIG_IO_GRYO_POOL_SIZE, GryoPool.CONFIG_IO_GRYO_POOL_SIZE_DEFAULT);
          for (int i = 0; i < poolSize; i++) {
            KRYOS.add(serializer.newKryo());
          }
          INITIALIZED = true;
        }
      }
    }

    return KRYOS;
  }
}

代码示例来源:origin: uber/marmaray

/**
 * KryoSerializer is the the default serializaer
 * @return SerializerInstance
 */
public static SerializerInstance getSerializerInstance() {
  if (serializerInstance.get() == null) {
    serializerInstance.set(new KryoSerializer(SparkEnv.get().conf()).newInstance());
  }
  return serializerInstance.get();
}

代码示例来源:origin: apache/incubator-nemo

/**
 * Derive Spark serializer from a spark context.
 *
 * @param sparkContext spark context to derive the serializer from.
 * @return the serializer.
 */
public static Serializer deriveSerializerFrom(final org.apache.spark.SparkContext sparkContext) {
 if (sparkContext.conf().get("spark.serializer", "")
   .equals("org.apache.spark.serializer.KryoSerializer")) {
  return new KryoSerializer(sparkContext.conf());
 } else {
  return new JavaSerializer(sparkContext.conf());
 }
}

代码示例来源:origin: org.apache.tinkerpop/spark-gremlin

private LinkedBlockingQueue<Kryo> initialize(final Configuration configuration) {
    // DCL is safe in this case due to volatility
    if (!INITIALIZED) {
      synchronized (UnshadedKryoShimService.class) {
        if (!INITIALIZED) {
          // so we don't get a WARN that a new configuration is being created within an active context
          final SparkConf sparkConf = null == Spark.getContext() ? new SparkConf() : Spark.getContext().getConf().clone();
          configuration.getKeys().forEachRemaining(key -> sparkConf.set(key, configuration.getProperty(key).toString()));
          final KryoSerializer serializer = new KryoSerializer(sparkConf);
          // Setup a pool backed by our spark.serializer instance
          // Reuse Gryo poolsize for Kryo poolsize (no need to copy this to SparkConf)
          KRYOS.clear();
          final int poolSize = configuration.getInt(GryoPool.CONFIG_IO_GRYO_POOL_SIZE, GryoPool.CONFIG_IO_GRYO_POOL_SIZE_DEFAULT);
          for (int i = 0; i < poolSize; i++) {
            KRYOS.add(serializer.newKryo());
          }
          INITIALIZED = true;
        }
      }
    }

    return KRYOS;
  }
}

相关文章

微信公众号

最新文章

更多