org.apache.spark.api.java.JavaRDD.zip()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(7.2k)|赞(0)|评价(0)|浏览(86)

本文整理了Java中org.apache.spark.api.java.JavaRDD.zip()方法的一些代码示例,展示了JavaRDD.zip()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JavaRDD.zip()方法的具体详情如下:
包路径:org.apache.spark.api.java.JavaRDD
类名称:JavaRDD
方法名:zip

JavaRDD.zip介绍

暂无

代码示例

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void zip() {
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
 JavaDoubleRDD doubles = rdd.mapToDouble(Integer::doubleValue);
 JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
 zipped.count();
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void zip() {
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
 JavaDoubleRDD doubles = rdd.mapToDouble(x -> 1.0 * x);
 JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
 zipped.count();
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void zip() {
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
 JavaDoubleRDD doubles = rdd.mapToDouble(x -> 1.0 * x);
 JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
 zipped.count();
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void zip() {
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
 JavaDoubleRDD doubles = rdd.mapToDouble(Integer::doubleValue);
 JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
 zipped.count();
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void zip() {
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
 JavaDoubleRDD doubles = rdd.mapToDouble(Integer::doubleValue);
 JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
 zipped.count();
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void zip() {
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 2, 3, 4, 5));
 JavaDoubleRDD doubles = rdd.mapToDouble(x -> 1.0 * x);
 JavaPairRDD<Integer, Double> zipped = rdd.zip(doubles);
 zipped.count();
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@SuppressWarnings("unchecked")
@Test
public void keyByOnPairRDD() {
 // Regression test for SPARK-4459
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
 Function<Tuple2<Integer, Integer>, String> sumToString = x -> String.valueOf(x._1() + x._2());
 JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
 JavaPairRDD<String, Tuple2<Integer, Integer>> keyed = pairRDD.keyBy(sumToString);
 assertEquals(7, keyed.count());
 assertEquals(1, (long) keyed.lookup("2").get(0)._1());
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void groupByOnPairRDD() {
 // Regression test for SPARK-4459
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
 Function<Tuple2<Integer, Integer>, Boolean> areOdd =
  x -> (x._1() % 2 == 0) && (x._2() % 2 == 0);
 JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
 JavaPairRDD<Boolean, Iterable<Tuple2<Integer, Integer>>> oddsAndEvens = pairRDD.groupBy(areOdd);
 assertEquals(2, oddsAndEvens.count());
 assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0)));  // Evens
 assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
 oddsAndEvens = pairRDD.groupBy(areOdd, 1);
 assertEquals(2, oddsAndEvens.count());
 assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0)));  // Evens
 assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@SuppressWarnings("unchecked")
@Test
public void keyByOnPairRDD() {
 // Regression test for SPARK-4459
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
 Function<Tuple2<Integer, Integer>, String> sumToString = x -> String.valueOf(x._1() + x._2());
 JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
 JavaPairRDD<String, Tuple2<Integer, Integer>> keyed = pairRDD.keyBy(sumToString);
 assertEquals(7, keyed.count());
 assertEquals(1, (long) keyed.lookup("2").get(0)._1());
}

代码示例来源:origin: org.apache.spark/spark-core

@SuppressWarnings("unchecked")
@Test
public void keyByOnPairRDD() {
 // Regression test for SPARK-4459
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
 Function<Tuple2<Integer, Integer>, String> sumToString = x -> String.valueOf(x._1() + x._2());
 JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
 JavaPairRDD<String, Tuple2<Integer, Integer>> keyed = pairRDD.keyBy(sumToString);
 assertEquals(7, keyed.count());
 assertEquals(1, (long) keyed.lookup("2").get(0)._1());
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void groupByOnPairRDD() {
 // Regression test for SPARK-4459
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
 Function<Tuple2<Integer, Integer>, Boolean> areOdd =
  x -> (x._1() % 2 == 0) && (x._2() % 2 == 0);
 JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
 JavaPairRDD<Boolean, Iterable<Tuple2<Integer, Integer>>> oddsAndEvens = pairRDD.groupBy(areOdd);
 assertEquals(2, oddsAndEvens.count());
 assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0)));  // Evens
 assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
 oddsAndEvens = pairRDD.groupBy(areOdd, 1);
 assertEquals(2, oddsAndEvens.count());
 assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0)));  // Evens
 assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void groupByOnPairRDD() {
 // Regression test for SPARK-4459
 JavaRDD<Integer> rdd = sc.parallelize(Arrays.asList(1, 1, 2, 3, 5, 8, 13));
 Function<Tuple2<Integer, Integer>, Boolean> areOdd =
  x -> (x._1() % 2 == 0) && (x._2() % 2 == 0);
 JavaPairRDD<Integer, Integer> pairRDD = rdd.zip(rdd);
 JavaPairRDD<Boolean, Iterable<Tuple2<Integer, Integer>>> oddsAndEvens = pairRDD.groupBy(areOdd);
 assertEquals(2, oddsAndEvens.count());
 assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0)));  // Evens
 assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
 oddsAndEvens = pairRDD.groupBy(areOdd, 1);
 assertEquals(2, oddsAndEvens.count());
 assertEquals(2, Iterables.size(oddsAndEvens.lookup(true).get(0)));  // Evens
 assertEquals(5, Iterables.size(oddsAndEvens.lookup(false).get(0))); // Odds
}

代码示例来源:origin: com.davidbracewell/mango

@Override
public <U> SparkPairStream<T, U> zip(@NonNull MStream<U> other) {
 if (other instanceof SparkStream) {
   return new SparkPairStream<>(rdd.zip(Cast.<SparkStream<U>>as(other).rdd));
 }
 JavaSparkContext jsc = new JavaSparkContext(rdd.context());
 return new SparkPairStream<>(rdd.zip(jsc.parallelize(other.collect(), rdd.partitions().size())));
}

代码示例来源:origin: org.apache.spark/spark-mllib_2.10

.map(org.apache.spark.mllib.linalg.Vector::asML);
JavaRDD<VectorPair> featuresExpected = dataRDD.zip(expected).map(pair -> {
 VectorPair featuresExpected1 = new VectorPair();
 featuresExpected1.setFeatures(pair._1());

代码示例来源:origin: org.apache.spark/spark-mllib_2.11

.map(org.apache.spark.mllib.linalg.Vector::asML);
JavaRDD<VectorPair> featuresExpected = dataRDD.zip(expected).map(pair -> {
 VectorPair featuresExpected1 = new VectorPair();
 featuresExpected1.setFeatures(pair._1());

代码示例来源:origin: org.apache.spark/spark-mllib

.map(org.apache.spark.mllib.linalg.Vector::asML);
JavaRDD<VectorPair> featuresExpected = dataRDD.zip(expected).map(pair -> {
 VectorPair featuresExpected1 = new VectorPair();
 featuresExpected1.setFeatures(pair._1());

相关文章

微信公众号

最新文章

更多