org.apache.spark.api.java.JavaSparkContext.binaryFiles()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(5.6k)|赞(0)|评价(0)|浏览(167)

本文整理了Java中org.apache.spark.api.java.JavaSparkContext.binaryFiles()方法的一些代码示例,展示了JavaSparkContext.binaryFiles()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JavaSparkContext.binaryFiles()方法的具体详情如下:
包路径:org.apache.spark.api.java.JavaSparkContext
类名称:JavaSparkContext
方法名:binaryFiles

JavaSparkContext.binaryFiles介绍

暂无

代码示例

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void binaryFiles() throws Exception {
 // Reusing the wholeText files example
 byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
 String tempDirName = tempDir.getAbsolutePath();
 File file1 = new File(tempDirName + "/part-00000");
 FileOutputStream fos1 = new FileOutputStream(file1);
 FileChannel channel1 = fos1.getChannel();
 ByteBuffer bbuf = ByteBuffer.wrap(content1);
 channel1.write(bbuf);
 channel1.close();
 JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName, 3);
 List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
 for (Tuple2<String, PortableDataStream> res : result) {
  assertArrayEquals(content1, res._2().toArray());
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void binaryFiles() throws Exception {
 // Reusing the wholeText files example
 byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
 String tempDirName = tempDir.getAbsolutePath();
 File file1 = new File(tempDirName + "/part-00000");
 FileOutputStream fos1 = new FileOutputStream(file1);
 FileChannel channel1 = fos1.getChannel();
 ByteBuffer bbuf = ByteBuffer.wrap(content1);
 channel1.write(bbuf);
 channel1.close();
 JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName, 3);
 List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
 for (Tuple2<String, PortableDataStream> res : result) {
  assertArrayEquals(content1, res._2().toArray());
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void binaryFiles() throws Exception {
 // Reusing the wholeText files example
 byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
 String tempDirName = tempDir.getAbsolutePath();
 File file1 = new File(tempDirName + "/part-00000");
 FileOutputStream fos1 = new FileOutputStream(file1);
 FileChannel channel1 = fos1.getChannel();
 ByteBuffer bbuf = ByteBuffer.wrap(content1);
 channel1.write(bbuf);
 channel1.close();
 JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName, 3);
 List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
 for (Tuple2<String, PortableDataStream> res : result) {
  assertArrayEquals(content1, res._2().toArray());
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.11

@Test
public void binaryFilesCaching() throws Exception {
 // Reusing the wholeText files example
 byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
 String tempDirName = tempDir.getAbsolutePath();
 File file1 = new File(tempDirName + "/part-00000");
 FileOutputStream fos1 = new FileOutputStream(file1);
 FileChannel channel1 = fos1.getChannel();
 ByteBuffer bbuf = ByteBuffer.wrap(content1);
 channel1.write(bbuf);
 channel1.close();
 JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName).cache();
 readRDD.foreach(pair -> pair._2().toArray()); // force the file to read
 List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
 for (Tuple2<String, PortableDataStream> res : result) {
  assertArrayEquals(content1, res._2().toArray());
 }
}

代码示例来源:origin: org.apache.spark/spark-core_2.10

@Test
public void binaryFilesCaching() throws Exception {
 // Reusing the wholeText files example
 byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
 String tempDirName = tempDir.getAbsolutePath();
 File file1 = new File(tempDirName + "/part-00000");
 FileOutputStream fos1 = new FileOutputStream(file1);
 FileChannel channel1 = fos1.getChannel();
 ByteBuffer bbuf = ByteBuffer.wrap(content1);
 channel1.write(bbuf);
 channel1.close();
 JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName).cache();
 readRDD.foreach(pair -> pair._2().toArray()); // force the file to read
 List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
 for (Tuple2<String, PortableDataStream> res : result) {
  assertArrayEquals(content1, res._2().toArray());
 }
}

代码示例来源:origin: org.apache.spark/spark-core

@Test
public void binaryFilesCaching() throws Exception {
 // Reusing the wholeText files example
 byte[] content1 = "spark is easy to use.\n".getBytes(StandardCharsets.UTF_8);
 String tempDirName = tempDir.getAbsolutePath();
 File file1 = new File(tempDirName + "/part-00000");
 FileOutputStream fos1 = new FileOutputStream(file1);
 FileChannel channel1 = fos1.getChannel();
 ByteBuffer bbuf = ByteBuffer.wrap(content1);
 channel1.write(bbuf);
 channel1.close();
 JavaPairRDD<String, PortableDataStream> readRDD = sc.binaryFiles(tempDirName).cache();
 readRDD.foreach(pair -> pair._2().toArray()); // force the file to read
 List<Tuple2<String, PortableDataStream>> result = readRDD.collect();
 for (Tuple2<String, PortableDataStream> res : result) {
  assertArrayEquals(content1, res._2().toArray());
 }
}

代码示例来源:origin: deeplearning4j/dl4j-examples

JavaRDD<DataSet> data = sc.binaryFiles(testDir + "/*").map(new LoadDataFunction());

代码示例来源:origin: org.datavec/datavec-spark

JavaPairRDD<String, PortableDataStream> first = sc.binaryFiles(path1);
JavaPairRDD<String, PortableDataStream> second = sc.binaryFiles(path2);

代码示例来源:origin: org.nd4j/canova-spark

JavaPairRDD<String,PortableDataStream> first = sc.binaryFiles(path1);
JavaPairRDD<String,PortableDataStream> second = sc.binaryFiles(path2);

代码示例来源:origin: org.datavec/datavec-spark_2.11

JavaPairRDD<String, PortableDataStream> first = sc.binaryFiles(path1);
JavaPairRDD<String, PortableDataStream> second = sc.binaryFiles(path2);

相关文章

微信公众号

最新文章

更多