org.apache.spark.rdd.RDD.name()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(6.6k)|赞(0)|评价(0)|浏览(329)

本文整理了Java中org.apache.spark.rdd.RDD.name方法的一些代码示例,展示了RDD.name的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。RDD.name方法的具体详情如下:
包路径:org.apache.spark.rdd.RDD
类名称:RDD
方法名:name

RDD.name介绍

暂无

代码示例

代码示例来源:origin: apache/hive

RDD<Tuple2<HiveKey, BytesWritable>> reducerRdd = sparkPlan.generateGraph().rdd();
Assert.assertTrue(reducerRdd.name().contains("Reducer 2"));
Assert.assertTrue(reducerRdd instanceof MapPartitionsRDD);
Assert.assertTrue(reducerRdd.creationSite().shortForm().contains("Reducer 2"));
RDD shuffledRdd = rdds.get(0).rdd();
Assert.assertTrue(shuffledRdd.name().contains("Reducer 2"));
Assert.assertTrue(shuffledRdd.name().contains("SORT"));
Assert.assertTrue(shuffledRdd instanceof ShuffledRDD);
Assert.assertTrue(shuffledRdd.creationSite().shortForm().contains("Reducer 2"));
RDD mapRdd = rdds.get(0).rdd();
Assert.assertTrue(mapRdd.name().contains("Map 1"));
Assert.assertTrue(mapRdd instanceof MapPartitionsRDD);
Assert.assertTrue(mapRdd.creationSite().shortForm().contains("Map 1"));
RDD hadoopRdd = rdds.get(0).rdd();
Assert.assertTrue(hadoopRdd.name().contains("Map 1"));
Assert.assertTrue(hadoopRdd.name().contains("test"));
Assert.assertTrue(hadoopRdd instanceof HadoopRDD);
Assert.assertTrue(hadoopRdd.creationSite().shortForm().contains("Map 1"));

代码示例来源:origin: apache/tinkerpop

@Override
public boolean rm(final String location) {
  final List<String> rdds = new ArrayList<>();
  final String wildCardLocation = (location.endsWith("*") ? location : location + "*").replace('\\', '/').replace(".", "\\.").replace("*", ".*");
  for (final RDD<?> rdd : Spark.getRDDs()) {
    if (rdd.name().replace('\\', '/').matches(wildCardLocation))
      rdds.add(rdd.name());
  }
  rdds.forEach(Spark::removeRDD);
  return rdds.size() > 0;
}

代码示例来源:origin: apache/tinkerpop

@Override
public List<String> ls(final String location) {
  final List<String> rdds = new ArrayList<>();
  final String wildCardLocation = (location.endsWith("*") ? location : location + "*").replace('\\', '/').replace(".", "\\.").replace("*", ".*");
  for (final RDD<?> rdd : Spark.getRDDs()) {
    if (rdd.name().replace('\\', '/').matches(wildCardLocation))
      rdds.add(rdd.name() + " [" + rdd.getStorageLevel().description() + "]");
  }
  return rdds;
}

代码示例来源:origin: apache/tinkerpop

public static void refresh() {
  if (null == CONTEXT)
    throw new IllegalStateException("The Spark context has not been created.");
  if (CONTEXT.isStopped())
    recreateStopped();
  final Set<String> keepNames = new HashSet<>();
  for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) {
    if (null != rdd.name()) {
      keepNames.add(rdd.name());
      NAME_TO_RDD.put(rdd.name(), rdd);
    }
  }
  // remove all stale names in the NAME_TO_RDD map
  NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove);
}

代码示例来源:origin: apache/tinkerpop

@Override
public boolean cp(final String sourceLocation, final String targetLocation) {
  final List<String> rdds = Spark.getRDDs().stream().filter(r -> r.name().startsWith(sourceLocation)).map(RDD::name).collect(Collectors.toList());
  if (rdds.size() == 0)
    return false;
  for (final String rdd : rdds) {
    Spark.getRDD(rdd).toJavaRDD().filter(a -> true).setName(rdd.equals(sourceLocation) ? targetLocation : rdd.replace(sourceLocation, targetLocation)).cache().count();
    // TODO: this should use the original storage level
  }
  return true;
}

代码示例来源:origin: org.apache.tinkerpop/spark-gremlin

@Override
public boolean rm(final String location) {
  final List<String> rdds = new ArrayList<>();
  final String wildCardLocation = (location.endsWith("*") ? location : location + "*").replace('\\', '/').replace(".", "\\.").replace("*", ".*");
  for (final RDD<?> rdd : Spark.getRDDs()) {
    if (rdd.name().replace('\\', '/').matches(wildCardLocation))
      rdds.add(rdd.name());
  }
  rdds.forEach(Spark::removeRDD);
  return rdds.size() > 0;
}

代码示例来源:origin: org.apache.tinkerpop/spark-gremlin

@Override
public List<String> ls(final String location) {
  final List<String> rdds = new ArrayList<>();
  final String wildCardLocation = (location.endsWith("*") ? location : location + "*").replace('\\', '/').replace(".", "\\.").replace("*", ".*");
  for (final RDD<?> rdd : Spark.getRDDs()) {
    if (rdd.name().replace('\\', '/').matches(wildCardLocation))
      rdds.add(rdd.name() + " [" + rdd.getStorageLevel().description() + "]");
  }
  return rdds;
}

代码示例来源:origin: org.apache.tinkerpop/spark-gremlin

public static void refresh() {
  if (null == CONTEXT)
    throw new IllegalStateException("The Spark context has not been created.");
  if (CONTEXT.isStopped())
    recreateStopped();
  final Set<String> keepNames = new HashSet<>();
  for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) {
    if (null != rdd.name()) {
      keepNames.add(rdd.name());
      NAME_TO_RDD.put(rdd.name(), rdd);
    }
  }
  // remove all stale names in the NAME_TO_RDD map
  NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove);
}

代码示例来源:origin: org.apache.tinkerpop/spark-gremlin

@Override
public boolean cp(final String sourceLocation, final String targetLocation) {
  final List<String> rdds = Spark.getRDDs().stream().filter(r -> r.name().startsWith(sourceLocation)).map(RDD::name).collect(Collectors.toList());
  if (rdds.size() == 0)
    return false;
  for (final String rdd : rdds) {
    Spark.getRDD(rdd).toJavaRDD().filter(a -> true).setName(rdd.equals(sourceLocation) ? targetLocation : rdd.replace(sourceLocation, targetLocation)).cache().count();
    // TODO: this should use the original storage level
  }
  return true;
}

代码示例来源:origin: apache/lens

/**
 * Recreate RDD. This will work if the result object was saved. As long as the metastore and corresponding HDFS
 * directory is available result object should be able to recreate an RDD.
 *
 * @param sparkContext the spark context
 * @return the rdd
 * @throws LensException the lens exception
 */
public RDD<List<Object>> recreateRDD(JavaSparkContext sparkContext) throws LensException {
 if (resultRDD == null) {
  try {
   JavaPairRDD<WritableComparable, HCatRecord> javaPairRDD = HiveTableRDD.createHiveTableRDD(sparkContext,
    HIVE_CONF, "default", tempTableName, TEMP_TABLE_PART_COL + "='" + TEMP_TABLE_PART_VAL + "'");
   resultRDD = javaPairRDD.map(new HCatRecordToObjectListMapper()).rdd();
   log.info("Created RDD {} for table {}", resultRDD.name(), tempTableName);
  } catch (IOException e) {
   throw new LensException("Error creating RDD for table " + tempTableName, e);
  }
 }
 return resultRDD;
}

相关文章