org.apache.spark.api.java.JavaSparkContext.addJar()方法的使用及代码示例

x33g5p2x  于2022-01-21 转载在 其他  
字(9.8k)|赞(0)|评价(0)|浏览(170)

本文整理了Java中org.apache.spark.api.java.JavaSparkContext.addJar()方法的一些代码示例,展示了JavaSparkContext.addJar()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JavaSparkContext.addJar()方法的具体详情如下:
包路径:org.apache.spark.api.java.JavaSparkContext
类名称:JavaSparkContext
方法名:addJar

JavaSparkContext.addJar介绍

暂无

代码示例

代码示例来源:origin: apache/hive

private void addJars(String addedJars) {
 for (String addedJar : CSV_SPLITTER.split(Strings.nullToEmpty(addedJars))) {
  if (!localJars.contains(addedJar)) {
   localJars.add(addedJar);
   sc.addJar(addedJar);
  }
 }
}

代码示例来源:origin: apache/hive

@Override
public Serializable call(JobContext jc) throws Exception {
 jc.sc().addJar(path);
 // Following remote job may refer to classes in this jar, and the remote job would be executed
 // in a different thread, so we add this jar path to JobContext for further usage.
 jc.getAddedJars().put(path, System.currentTimeMillis());
 return null;
}

代码示例来源:origin: apache/drill

private void addJars(String addedJars) {
 for (String addedJar : CSV_SPLITTER.split(Strings.nullToEmpty(addedJars))) {
  if (!localJars.contains(addedJar)) {
   localJars.add(addedJar);
   sc.addJar(addedJar);
  }
 }
}

代码示例来源:origin: apache/hive

private LocalHiveSparkClient(SparkConf sparkConf, HiveConf hiveConf)
  throws FileNotFoundException, MalformedURLException {
 String regJar = null;
 // the registrator jar should already be in CP when not in test mode
 if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IN_TEST)) {
  String kryoReg = sparkConf.get("spark.kryo.registrator", "");
  if (SparkClientUtilities.HIVE_KRYO_REG_NAME.equals(kryoReg)) {
   regJar = SparkClientUtilities.findKryoRegistratorJar(hiveConf);
   SparkClientUtilities.addJarToContextLoader(new File(regJar));
  }
 }
 sc = new JavaSparkContext(sparkConf);
 if (regJar != null) {
  sc.addJar(regJar);
 }
 jobMetricsListener = new JobMetricsListener();
 sc.sc().addSparkListener(jobMetricsListener);
}

代码示例来源:origin: apache/tinkerpop

@Override
protected void loadJar(final Configuration hadoopConfiguration, final File file, final Object... params) {
  final JavaSparkContext sparkContext = (JavaSparkContext) params[0];
  sparkContext.addJar(file.getAbsolutePath());
}

代码示例来源:origin: ai.grakn/grakn-kb

@Override
protected void loadJar(final Configuration hadoopConfiguration, final File file, final Object... params) {
  final JavaSparkContext sparkContext = (JavaSparkContext) params[0];
  sparkContext.addJar(file.getAbsolutePath());
}

代码示例来源:origin: org.apache.tinkerpop/spark-gremlin

@Override
protected void loadJar(final Configuration hadoopConfiguration, final File file, final Object... params) {
  final JavaSparkContext sparkContext = (JavaSparkContext) params[0];
  sparkContext.addJar(file.getAbsolutePath());
}

代码示例来源:origin: stackoverflow.com

JavaSparkContext sc = new JavaSparkContext(conf);
sc.addJar("./target/SparkPOC-0.0.1-SNAPSHOT-jar-with-dependencies.jar");

代码示例来源:origin: org.qcri.rheem/rheem-spark

private void registerJarIfNotNull(String path) {
  if (path != null) this.sparkContextReference.get().addJar(path);
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

private void addJars(String addedJars) {
 for (String addedJar : CSV_SPLITTER.split(Strings.nullToEmpty(addedJars))) {
  if (!localJars.contains(addedJar)) {
   localJars.add(addedJar);
   sc.addJar(addedJar);
  }
 }
}

代码示例来源:origin: org.spark-project.hive/spark-client

@Override
public Serializable call(JobContext jc) throws Exception {
 jc.sc().addJar(path);
 // Following remote job may refer to classes in this jar, and the remote job would be executed
 // in a different thread, so we add this jar path to JobContext for further usage.
 jc.getAddedJars().add(path);
 return null;
}

代码示例来源:origin: com.github.hyukjinkwon/spark-client

@Override
public Serializable call(JobContext jc) throws Exception {
 jc.sc().addJar(path);
 // Following remote job may refer to classes in this jar, and the remote job would be executed
 // in a different thread, so we add this jar path to JobContext for further usage.
 jc.getAddedJars().add(path);
 return null;
}

代码示例来源:origin: javachen/learning-hadoop

public static void main(String args[]) {
 if (args.length == 0) {
  System.out.println("JavaHBaseBulkDeleteExample  {master} {tableName} ");
 }
 String master = args[0];
 String tableName = args[1];
 JavaSparkContext jsc = new JavaSparkContext(master,
   "JavaHBaseBulkDeleteExample");
 jsc.addJar("SparkHBase.jar");
 List<byte[]> list = new ArrayList<byte[]>();
 list.add(Bytes.toBytes("1"));
 list.add(Bytes.toBytes("2"));
 list.add(Bytes.toBytes("3"));
 list.add(Bytes.toBytes("4"));
 list.add(Bytes.toBytes("5"));
 JavaRDD<byte[]> rdd = jsc.parallelize(list);
 Configuration conf = HBaseConfiguration.create();
 conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
 conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
 JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
 hbaseContext.bulkDelete(rdd, tableName, new DeleteFunction(), 4);
}

代码示例来源:origin: javachen/learning-hadoop

public static void main(String args[]) {
 if (args.length == 0) {
  System.out
    .println("JavaHBaseBulkGetExample  {master} {tableName}");
 }
 String master = args[0];
 String tableName = args[1];
 JavaSparkContext jsc = new JavaSparkContext(master,
   "JavaHBaseBulkGetExample");
 jsc.addJar("SparkHBase.jar");
 List<byte[]> list = new ArrayList<byte[]>();
 list.add(Bytes.toBytes("1"));
 list.add(Bytes.toBytes("2"));
 list.add(Bytes.toBytes("3"));
 list.add(Bytes.toBytes("4"));
 list.add(Bytes.toBytes("5"));
 JavaRDD<byte[]> rdd = jsc.parallelize(list);
 Configuration conf = HBaseConfiguration.create();
 conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
 conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
 JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
 hbaseContext.bulkGet(tableName, 2, rdd, new GetFunction(),
   new ResultFunction());
}

代码示例来源:origin: javachen/learning-hadoop

public static void main(String args[]) {
 if (args.length == 0) {
  System.out
    .println("JavaHBaseBulkIncrementExample  {master} {tableName} {columnFamily}");
 }
 String master = args[0];
 String tableName = args[1];
 String columnFamily = args[2];
 JavaSparkContext jsc = new JavaSparkContext(master,
   "JavaHBaseBulkIncrementExample");
 jsc.addJar("SparkHBase.jar");
 List<String> list = new ArrayList<String>();
 list.add("1," + columnFamily + ",counter,1");
 list.add("2," + columnFamily + ",counter,2");
 list.add("3," + columnFamily + ",counter,3");
 list.add("4," + columnFamily + ",counter,4");
 list.add("5," + columnFamily + ",counter,5");
 JavaRDD<String> rdd = jsc.parallelize(list);
 Configuration conf = HBaseConfiguration.create();
 conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
 conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
 JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
 hbaseContext.bulkIncrement(rdd, tableName, new IncrementFunction(), 4);
}

代码示例来源:origin: javachen/learning-hadoop

public static void main(String args[]) {
 if (args.length == 0) {
  System.out
    .println("JavaHBaseBulkPutExample  {master} {tableName} {columnFamily}");
 }
 String master = args[0];
 String tableName = args[1];
 String columnFamily = args[2];
 JavaSparkContext jsc = new JavaSparkContext(master,
   "JavaHBaseBulkPutExample");
 jsc.addJar("SparkHBase.jar");
 List<String> list = new ArrayList<String>();
 list.add("1," + columnFamily + ",a,1");
 list.add("2," + columnFamily + ",a,2");
 list.add("3," + columnFamily + ",a,3");
 list.add("4," + columnFamily + ",a,4");
 list.add("5," + columnFamily + ",a,5");
 JavaRDD<String> rdd = jsc.parallelize(list);
 Configuration conf = HBaseConfiguration.create();
 conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
 conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));
 JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);
 hbaseContext.bulkPut(rdd, tableName, new PutFunction(), true);
}

代码示例来源:origin: javachen/learning-hadoop

public static void main(String args[]) {
  if (args.length == 0) {
   System.out
     .println("JavaHBaseDistributedScan  {master} {tableName}");
  }

  String master = args[0];
  String tableName = args[1];

  JavaSparkContext jsc = new JavaSparkContext(master,
    "JavaHBaseDistributedScan");
  jsc.addJar("SparkHBase.jar");

  Configuration conf = HBaseConfiguration.create();
  conf.addResource(new Path("/etc/hbase/conf/core-site.xml"));
  conf.addResource(new Path("/etc/hbase/conf/hbase-site.xml"));

  JavaHBaseContext hbaseContext = new JavaHBaseContext(jsc, conf);

  Scan scan = new Scan();
  scan.setCaching(100);
  
  JavaRDD<Tuple2<byte[], List<Tuple3<byte[], byte[], byte[]>>>> javaRdd = hbaseContext.hbaseRDD(tableName, scan);
  
  List<Tuple2<byte[], List<Tuple3<byte[], byte[], byte[]>>>> results = javaRdd.collect();
  
  results.size();
 }
}

代码示例来源:origin: javachen/learning-hadoop

public static void setUp() {
 jsc = new JavaSparkContext("local", "JavaHBaseContextSuite");
 jsc.addJar("SparkHBase.jar");
 
 tempDir = Files.createTempDir();
 tempDir.deleteOnExit();
 htu = HBaseTestingUtility.createLocalHTU();
 try {
  System.out.println("cleaning up test dir");
  htu.cleanupTestDir();
  System.out.println("starting minicluster");
  htu.startMiniZKCluster();
  htu.startMiniHBaseCluster(1, 1);
  System.out.println(" - minicluster started");
  try {
   htu.deleteTable(Bytes.toBytes(tableName));
  } catch (Exception e) {
   System.out.println(" - no table " + tableName + " found");
  }
  System.out.println(" - creating table " + tableName);
  htu.createTable(Bytes.toBytes(tableName), Bytes.toBytes(columnFamily));
  System.out.println(" - created table");
 } catch (Exception e1) {
  throw new RuntimeException(e1);
 }
}

代码示例来源:origin: javachen/learning-hadoop

jsc.addJar("SparkHBase.jar");

代码示例来源:origin: com.cloudera.livy/livy-rsc

protected void addJarOrPyFile(String path) throws Exception {
 File localCopyDir = new File(jc.getLocalTmpDir(), "__livy__");
 File localCopy = copyFileToLocal(localCopyDir, path, jc.sc().sc());
 addLocalFileToClassLoader(localCopy);
 jc.sc().addJar(path);
}

相关文章

微信公众号

最新文章

更多