scala.Option.get()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(8.5k)|赞(0)|评价(0)|浏览(139)

本文整理了Java中scala.Option.get()方法的一些代码示例,展示了Option.get()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Option.get()方法的具体详情如下:
包路径:scala.Option
类名称:Option
方法名:get

Option.get介绍

暂无

代码示例

代码示例来源:origin: elastic/elasticsearch-hadoop

@Override
public String getProperty(String name) {
  Option<String> op = cfg.getOption(name);
  if (!op.isDefined()) {
    op = cfg.getOption("spark." + name);
  }
  return (op.isDefined() ? op.get() : null);
}

代码示例来源:origin: apache/flink

/**
 * Update the persisted framework ID.
 * @param frameworkID the new ID or empty to remove the persisted ID.
 * @throws Exception on ZK failures, interruptions.
 */
@Override
public void setFrameworkID(Option<Protos.FrameworkID> frameworkID) throws Exception {
  synchronized (startStopLock) {
    verifyIsRunning();
    byte[] value = frameworkID.isDefined() ? frameworkID.get().getValue().getBytes(ConfigConstants.DEFAULT_CHARSET) :
      new byte[0];
    frameworkIdInZooKeeper.setValue(value);
  }
}

代码示例来源:origin: elastic/elasticsearch-hadoop

@Override
public String getProperty(String name) {
  Option<String> op = cfg.getOption(name);
  if (!op.isDefined()) {
    op = cfg.getOption("spark." + name);
  }
  return (op.isDefined() ? op.get() : null);
}

代码示例来源:origin: twitter/distributedlog

private List<Future<DLSN>> asyncWriteBulk(List<LogRecord> records) {
  final ArrayList<Future<DLSN>> results = new ArrayList<Future<DLSN>>(records.size());
  Iterator<LogRecord> iterator = records.iterator();
  while (iterator.hasNext()) {
    LogRecord record = iterator.next();
    Future<DLSN> future = asyncWrite(record, !iterator.hasNext());
    results.add(future);
    // Abort early if an individual write has already failed.
    Option<Try<DLSN>> result = future.poll();
    if (result.isDefined() && result.get().isThrow()) {
      break;
    }
  }
  if (records.size() > results.size()) {
    appendCancelledFutures(results, records.size() - results.size());
  }
  return results;
}

代码示例来源:origin: apache/hive

public static String findKryoRegistratorJar(HiveConf conf) throws FileNotFoundException {
 // find the jar in local maven repo for testing
 if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_IN_TEST)) {
  String repo = System.getProperty("maven.local.repository");
  String version = System.getProperty("hive.version");
  String jarName = HIVE_KRYO_REG_JAR_NAME + "-" + version + ".jar";
  String[] parts = new String[]{repo, "org", "apache", "hive",
    HIVE_KRYO_REG_JAR_NAME, version, jarName};
  String jar = Joiner.on(File.separator).join(parts);
  if (!new File(jar).exists()) {
   throw new FileNotFoundException(jar + " doesn't exist.");
  }
  return jar;
 }
 Option<String> option = SparkContext.jarOfClass(SparkClientUtilities.class);
 if (!option.isDefined()) {
  throw new FileNotFoundException("Cannot find the path to hive-exec.jar");
 }
 File path = new File(option.get());
 File[] jars = path.getParentFile().listFiles((dir, name) ->
   name.startsWith(HIVE_KRYO_REG_JAR_NAME));
 if (jars != null && jars.length > 0) {
  return jars[0].getAbsolutePath();
 }
 throw new FileNotFoundException("Cannot find the " + HIVE_KRYO_REG_JAR_NAME +
   " jar under " + path.getParent());
}

代码示例来源:origin: apache/hive

@Override
public String getWebUIURL() {
 try {
  if (sparkContext.sc().uiWebUrl().isDefined()) {
   return SparkUtilities.reverseDNSLookupURL(sparkContext.sc().uiWebUrl().get());
  } else {
   return "UNDEFINED";
  }
 } catch (Exception e) {
  LOG.warn("Failed to get web UI URL.", e);
 }
 return "UNKNOWN";
}

代码示例来源:origin: twosigma/beakerx

private String jobLink(int jobId) {
 if (getSparkSession().sparkContext().uiWebUrl().isDefined()) {
  return getSparkSession().sparkContext().uiWebUrl().get() + "/jobs/job/?id=" + jobId;
 } else {
  return "";
 }
}

代码示例来源:origin: twosigma/beakerx

private String stageLink(int stageId) {
 if (getSparkSession().sparkContext().uiWebUrl().isDefined()) {
  return getSparkSession().sparkContext().uiWebUrl().get() + "/stages/stage/?id=" + stageId + "&attempt=0";
 } else {
  return "";
 }
}

代码示例来源:origin: apache/hive

if (SparkContext.jarOfClass(this.getClass()).isDefined()) {
 jar = SparkContext.jarOfClass(this.getClass()).get();

代码示例来源:origin: apache/flink

int actorSystemPort = portOption.isDefined() ? (int) portOption.get() : -1;

代码示例来源:origin: apache/hive

@Override
 public String call(JobContext jc) throws Exception {
  if (jc.sc().sc().uiWebUrl().isDefined()) {
   return SparkUtilities.reverseDNSLookupURL(jc.sc().sc().uiWebUrl().get());
  }
  return "UNDEFINED";
 }
}

代码示例来源:origin: apache/flink

@Override
public boolean stopWorker(RegisteredMesosWorkerNode workerNode) {
  LOG.info("Stopping worker {}.", workerNode.getResourceID());
  try {
    if (workersInLaunch.containsKey(workerNode.getResourceID())) {
      // update persistent state of worker to Released
      MesosWorkerStore.Worker worker = workersInLaunch.remove(workerNode.getResourceID());
      worker = worker.releaseWorker();
      workerStore.putWorker(worker);
      workersBeingReturned.put(extractResourceID(worker.taskID()), worker);
      taskMonitor.tell(new TaskMonitor.TaskGoalStateUpdated(extractGoalState(worker)), selfActor);
      if (worker.hostname().isDefined()) {
        // tell the launch coordinator that the task is being unassigned from the host, for planning purposes
        launchCoordinator.tell(new LaunchCoordinator.Unassign(worker.taskID(), worker.hostname().get()), selfActor);
      }
    }
    else if (workersBeingReturned.containsKey(workerNode.getResourceID())) {
      LOG.info("Ignoring request to stop worker {} because it is already being stopped.", workerNode.getResourceID());
    }
    else {
      LOG.warn("Unrecognized worker {}.", workerNode.getResourceID());
    }
  }
  catch (Exception e) {
    onFatalError(new ResourceManagerException("Unable to release a worker.", e));
  }
  return true;
}

代码示例来源:origin: apache/flink

/**
 * Create the Mesos scheduler driver based on this configuration.
 * @param scheduler the scheduler to use.
 * @param implicitAcknowledgements whether to configure the driver for implicit acknowledgements.
 * @return a scheduler driver.
 */
public SchedulerDriver createDriver(Scheduler scheduler, boolean implicitAcknowledgements) {
  MesosSchedulerDriver schedulerDriver;
  if (this.credential().isDefined()) {
    schedulerDriver =
      new MesosSchedulerDriver(scheduler, frameworkInfo.build(), this.masterUrl(),
        implicitAcknowledgements, this.credential().get().build());
  }
  else {
    schedulerDriver =
      new MesosSchedulerDriver(scheduler, frameworkInfo.build(), this.masterUrl(),
        implicitAcknowledgements);
  }
  return schedulerDriver;
}

代码示例来源:origin: twitter/distributedlog

if (nextRequest.getPromise().isInterrupted().isDefined()) {
  setLastException(new DLInterruptedException("Interrupted on reading " + bkLedgerManager.getFullyQualifiedName() + " : ",
      nextRequest.getPromise().isInterrupted().get()));

代码示例来源:origin: apache/flink

if (taskManagerHostnameOption.isDefined()) {
    .matcher(taskManagerHostnameOption.get())
    .replaceAll(Matcher.quoteReplacement(taskID.getValue()));
if (params.bootstrapCommand().isDefined()) {
  launchCommand.append(params.bootstrapCommand().get()).append(" && ");
switch (params.containerType()) {
  case MESOS:
    if (params.containerImageName().isDefined()) {
      containerInfo
        .setMesos(Protos.ContainerInfo.MesosInfo.newBuilder()
            .setType(Protos.Image.Type.DOCKER)
            .setDocker(Protos.Image.Docker.newBuilder()
              .setName(params.containerImageName().get()))));
        .addAllParameters(params.dockerParameters())
        .setNetwork(Protos.ContainerInfo.DockerInfo.Network.HOST)
        .setImage(params.containerImageName().get())
        .setForcePullImage(params.dockerForcePullImage()));
    break;

代码示例来源:origin: com.typesafe.play/play_2.10

/**
 * Wrap a Scala Option, handling None as null.
 */
public static <T> T orNull(scala.Option<T> opt) {
  if(opt.isDefined()) {
    return opt.get();
  }
  return null;
}

代码示例来源:origin: com.typesafe.play/play_2.10

/**
 * Wrap a Scala Option, handling None by returning a defaultValue
 */
public static <T> T orElse(scala.Option<T> opt, T defaultValue) {
  if(opt.isDefined()) {
    return opt.get();
  }
  return defaultValue;
}

代码示例来源:origin: com.typesafe.play/play_2.12

/**
 * Wraps a Scala Option, handling None as null.
 *
 * @param opt the scala option.
 * @param <T> the type in the Option.
 * @return the value of the option, or null if opt.isDefined is false.
 */
public static <T> T orNull(scala.Option<T> opt) {
  if (opt.isDefined()) {
    return opt.get();
  }
  return null;
}

代码示例来源:origin: com.typesafe.play/play

/**
 * Wraps a Scala Option, handling None as null.
 *
 * @param opt the scala option.
 * @param <T> the type in the Option.
 * @return the value of the option, or null if opt.isDefined is false.
 */
public static <T> T orNull(scala.Option<T> opt) {
  if (opt.isDefined()) {
    return opt.get();
  }
  return null;
}

代码示例来源:origin: com.typesafe.play/play_2.11

/**
 * Wraps a Scala Option, handling None as null.
 *
 * @param opt the scala option.
 * @param <T> the type in the Option.
 * @return the value of the option, or null if opt.isDefined is false.
 */
public static <T> T orNull(scala.Option<T> opt) {
  if (opt.isDefined()) {
    return opt.get();
  }
  return null;
}

相关文章