org.apache.spark.sql.DataFrameWriter.format()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(7.9k)|赞(0)|评价(0)|浏览(273)

本文整理了Java中org.apache.spark.sql.DataFrameWriter.format()方法的一些代码示例,展示了DataFrameWriter.format()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。DataFrameWriter.format()方法的具体详情如下:
包路径:org.apache.spark.sql.DataFrameWriter
类名称:DataFrameWriter
方法名:format

DataFrameWriter.format介绍

暂无

代码示例

代码示例来源:origin: com.couchbase.client/spark-connector

private CouchbaseDataFrameWriter(DataFrameWriter<Row> dfw) {
  this.dfw = dfw;
  dfw.format(SOURCE);
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void testSaveModeAPI() {
 spark
   .range(10)
   .write()
   .format("org.apache.spark.sql.test")
   .mode(SaveMode.ErrorIfExists)
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void saveAndLoad() {
 Map<String, String> options = new HashMap<>();
 options.put("path", path.toString());
 df.write().mode(SaveMode.ErrorIfExists).format("json").options(options).save();
 Dataset<Row> loadedDF = spark.read().format("json").options(options).load();
 checkAnswer(loadedDF, df.collectAsList());
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void saveAndLoad() {
 Map<String, String> options = new HashMap<>();
 options.put("path", path.toString());
 df.write().mode(SaveMode.ErrorIfExists).format("json").options(options).save();
 Dataset<Row> loadedDF = spark.read().format("json").options(options).load();
 checkAnswer(loadedDF, df.collectAsList());
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
 public void saveAndLoadWithSchema() {
  Map<String, String> options = new HashMap<>();
  options.put("path", path.toString());
  df.write().format("json").mode(SaveMode.ErrorIfExists).options(options).save();

  List<StructField> fields = new ArrayList<>();
  fields.add(DataTypes.createStructField("b", DataTypes.StringType, true));
  StructType schema = DataTypes.createStructType(fields);
  Dataset<Row> loadedDF = spark.read().format("json").schema(schema).options(options).load();

  checkAnswer(loadedDF, spark.sql("SELECT b FROM jsonTable").collectAsList());
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
 public void saveAndLoadWithSchema() {
  Map<String, String> options = new HashMap<>();
  options.put("path", path.toString());
  df.write().format("json").mode(SaveMode.ErrorIfExists).options(options).save();

  List<StructField> fields = new ArrayList<>();
  fields.add(DataTypes.createStructField("b", DataTypes.StringType, true));
  StructType schema = DataTypes.createStructType(fields);
  Dataset<Row> loadedDF = spark.read().format("json").schema(schema).options(options).load();

  checkAnswer(loadedDF, spark.sql("SELECT b FROM jsonTable").collectAsList());
 }
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void testSaveModeAPI() {
 spark
   .range(10)
   .write()
   .format("org.apache.spark.sql.test")
   .mode(SaveMode.ErrorIfExists)
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void saveAndLoad() {
 Map<String, String> options = new HashMap<>();
 options.put("path", path.toString());
 df.write().mode(SaveMode.ErrorIfExists).format("json").options(options).save();
 Dataset<Row> loadedDF = spark.read().format("json").options(options).load();
 checkAnswer(loadedDF, df.collectAsList());
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void testSaveModeAPI() {
 spark
   .range(10)
   .write()
   .format("org.apache.spark.sql.test")
   .mode(SaveMode.ErrorIfExists)
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
 public void saveAndLoadWithSchema() {
  Map<String, String> options = new HashMap<>();
  options.put("path", path.toString());
  df.write().format("json").mode(SaveMode.ErrorIfExists).options(options).save();

  List<StructField> fields = new ArrayList<>();
  fields.add(DataTypes.createStructField("b", DataTypes.StringType, true));
  StructType schema = DataTypes.createStructType(fields);
  Dataset<Row> loadedDF = spark.read().format("json").schema(schema).options(options).load();

  checkAnswer(loadedDF, spark.sql("SELECT b FROM jsonTable").collectAsList());
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void testFormatAPI() {
 spark
   .read()
   .format("org.apache.spark.sql.test")
   .load()
   .write()
   .format("org.apache.spark.sql.test")
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void testFormatAPI() {
 spark
   .read()
   .format("org.apache.spark.sql.test")
   .load()
   .write()
   .format("org.apache.spark.sql.test")
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void testFormatAPI() {
 spark
   .read()
   .format("org.apache.spark.sql.test")
   .load()
   .write()
   .format("org.apache.spark.sql.test")
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void testOptionsAPI() {
 HashMap<String, String> map = new HashMap<String, String>();
 map.put("e", "1");
 spark
   .read()
   .option("a", "1")
   .option("b", 1)
   .option("c", 1.0)
   .option("d", true)
   .options(map)
   .text()
   .write()
   .option("a", "1")
   .option("b", 1)
   .option("c", 1.0)
   .option("d", true)
   .options(map)
   .format("org.apache.spark.sql.test")
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void testOptionsAPI() {
 HashMap<String, String> map = new HashMap<String, String>();
 map.put("e", "1");
 spark
   .read()
   .option("a", "1")
   .option("b", 1)
   .option("c", 1.0)
   .option("d", true)
   .options(map)
   .text()
   .write()
   .option("a", "1")
   .option("b", 1)
   .option("c", 1.0)
   .option("d", true)
   .options(map)
   .format("org.apache.spark.sql.test")
   .save();
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void testOptionsAPI() {
 HashMap<String, String> map = new HashMap<String, String>();
 map.put("e", "1");
 spark
   .read()
   .option("a", "1")
   .option("b", 1)
   .option("c", 1.0)
   .option("d", true)
   .options(map)
   .text()
   .write()
   .option("a", "1")
   .option("b", 1)
   .option("c", 1.0)
   .option("d", true)
   .options(map)
   .format("org.apache.spark.sql.test")
   .save();
}

代码示例来源:origin: amidst/toolbox

public static void writeDataToFolder(DataSpark data, String path, SQLContext sqlContext, String formatFile) throws Exception {
  data.getDataFrame(sqlContext).write().mode(SaveMode.Overwrite).format(formatFile).save(path);
}

代码示例来源:origin: org.apache.spark/spark-hive_2.10

@Test
 public void saveTableAndQueryIt() {
  Map<String, String> options = new HashMap<>();
  df.write()
   .format("org.apache.spark.sql.json")
   .mode(SaveMode.Append)
   .options(options)
   .saveAsTable("javaSavedTable");

  checkAnswer(
   sqlContext.sql("SELECT * FROM javaSavedTable"),
   df.collectAsList());
 }
}

代码示例来源:origin: org.apache.spark/spark-hive_2.11

@Test
 public void saveTableAndQueryIt() {
  Map<String, String> options = new HashMap<>();
  df.write()
   .format("org.apache.spark.sql.json")
   .mode(SaveMode.Append)
   .options(options)
   .saveAsTable("javaSavedTable");

  checkAnswer(
   sqlContext.sql("SELECT * FROM javaSavedTable"),
   df.collectAsList());
 }
}

代码示例来源:origin: org.apache.spark/spark-hive_2.10

@Test
public void saveExternalTableAndQueryIt() {
 Map<String, String> options = new HashMap<>();
 options.put("path", path.toString());
 df.write()
  .format("org.apache.spark.sql.json")
  .mode(SaveMode.Append)
  .options(options)
  .saveAsTable("javaSavedTable");
 checkAnswer(
  sqlContext.sql("SELECT * FROM javaSavedTable"),
  df.collectAsList());
 Dataset<Row> loadedDF =
  sqlContext.createExternalTable("externalTable", "org.apache.spark.sql.json", options);
 checkAnswer(loadedDF, df.collectAsList());
 checkAnswer(
  sqlContext.sql("SELECT * FROM externalTable"),
  df.collectAsList());
}

相关文章