scala.collection.Seq.apply()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(7.6k)|赞(0)|评价(0)|浏览(157)

本文整理了Java中scala.collection.Seq.apply()方法的一些代码示例,展示了Seq.apply()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Seq.apply()方法的具体详情如下:
包路径:scala.collection.Seq
类名称:Seq
方法名:apply

Seq.apply介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-pinot

public static void stopServer(KafkaServerStartable serverStartable) {
 serverStartable.shutdown();
 FileUtils.deleteQuietly(new File(serverStartable.serverConfig().logDirs().apply(0)));
}

代码示例来源:origin: linkedin/kafka-monitor

scala.collection.Seq<Object> replicas = partitionsToBeReassigned.apply(partition);
for (int replicaIndex = 0; replicaIndex < replicas.size(); replicaIndex++) {
 Object replica = replicas.apply(replicaIndex);
 bldr.append(replica).append(",");

代码示例来源:origin: apache/hive

@Override
public synchronized void onJobStart(SparkListenerJobStart jobStart) {
 int jobId = jobStart.jobId();
 int size = jobStart.stageIds().size();
 int[] intStageIds = new int[size];
 for (int i = 0; i < size; i++) {
  Integer stageId = (Integer) jobStart.stageIds().apply(i);
  intStageIds[i] = stageId;
  stageIdToJobId.put(stageId, jobId);
 }
 jobIdToStageId.put(jobId, intStageIds);
}

代码示例来源:origin: apache/drill

@Override
public synchronized void onJobStart(SparkListenerJobStart jobStart) {
 int jobId = jobStart.jobId();
 int size = jobStart.stageIds().size();
 int[] intStageIds = new int[size];
 for (int i = 0; i < size; i++) {
  Integer stageId = (Integer) jobStart.stageIds().apply(i);
  intStageIds[i] = stageId;
  stageIdToJobId.put(stageId, jobId);
 }
 jobIdToStageId.put(jobId, intStageIds);
}

代码示例来源:origin: apache/hive

@Override
public void onJobStart(SparkListenerJobStart jobStart) {
 synchronized (stageToJobId) {
  for (int i = 0; i < jobStart.stageIds().length(); i++) {
   stageToJobId.put((Integer) jobStart.stageIds().apply(i), jobStart.jobId());
  }
 }
}

代码示例来源:origin: confluentinc/ksql

/**
 * This broker's `metadata.broker.list` value.  Example: `127.0.0.1:9092`.
 *
 * <p>You can use this to tell Kafka producers and consumers how to connect to this instance.
 *
 * <p>This version returns the port of the first listener.
 * @return the broker list
 */
String brokerList() {
 final ListenerName listenerName = kafka.config().advertisedListeners().apply(0).listenerName();
 return kafka.config().hostName() + ":" + kafka.boundPort(listenerName);
}

代码示例来源:origin: linkedin/kafka-monitor

private static List<PartitionInfo> getPartitionInfo(KafkaZkClient zkClient, String topic) {
 scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
 scala.collection.Map<Object, scala.collection.Seq<Object>> partitionAssignments =
   zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
 List<PartitionInfo> partitionInfoList = new ArrayList<>();
 scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = partitionAssignments.iterator();
 while (it.hasNext()) {
  scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
  Integer partition = (Integer) scalaTuple._1();
  scala.Option<Object> leaderOption = zkClient.getLeaderForPartition(new TopicPartition(topic, partition));
  Node leader = leaderOption.isEmpty() ?  null : new Node((Integer) leaderOption.get(), "", -1);
  Node[] replicas = new Node[scalaTuple._2().size()];
  for (int i = 0; i < replicas.length; i++) {
   Integer brokerId = (Integer) scalaTuple._2().apply(i);
   replicas[i] = new Node(brokerId, "", -1);
  }
  partitionInfoList.add(new PartitionInfo(topic, partition, leader, replicas, null));
 }
 return partitionInfoList;
}

代码示例来源:origin: uber/chaperone

public static void stopServer(KafkaServerStartable serverStartable) {
 serverStartable.shutdown();
 FileUtils.deleteQuietly(new File(serverStartable.serverConfig().logDirs().apply(0)));
}

代码示例来源:origin: twitter/GraphJet

@Override
public Long next() {
 return (Long) seq.apply(index++);
}

代码示例来源:origin: com.github.gitssie/play-transport

public static Map<String, String> urlDecode(String body,String encode) throws IOException{
    Map<String, String> postData = Maps.newHashMap();
    scala.collection.immutable.Map<String, Seq<String>> formData = FormUrlEncodedParser.parse(body,encode);
    Map<String, Seq<String>> map = JavaConversions.mapAsJavaMap(formData);
    for(Map.Entry<String, Seq<String>> entry : map.entrySet()){
      postData.put(entry.getKey(), entry.getValue().apply(0));
    }
    return postData;
  }
}

代码示例来源:origin: kframework/k

private EnumSet<Fixity> getFixity(ProductionReference t) {
  Production p = t.production();
  EnumSet<Fixity> set = EnumSet.noneOf(Fixity.class);
  if (t instanceof Constant) {
    return set;
  }
  if (p.items().apply(0) instanceof NonTerminal)
    set.add(Fixity.BARE_LEFT);
  if (p.items().apply(p.items().size() - 1) instanceof NonTerminal)
    set.add(Fixity.BARE_RIGHT);
  return set;
}

代码示例来源:origin: org.apache.pig/pig

@Override
public synchronized void onJobStart(SparkListenerJobStart jobStart) {
  int jobId = jobStart.jobId();
  int size = jobStart.stageIds().size();
  int[] intStageIds = new int[size];
  for (int i = 0; i < size; i++) {
    Integer stageId = (Integer) jobStart.stageIds().apply(i);
    intStageIds[i] = stageId;
    stageIdToJobId.put(stageId, jobId);
  }
  jobIdToStageId.put(jobId, intStageIds);
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public synchronized void onJobStart(SparkListenerJobStart jobStart) {
 int jobId = jobStart.jobId();
 int size = jobStart.stageIds().size();
 int[] intStageIds = new int[size];
 for (int i = 0; i < size; i++) {
  Integer stageId = (Integer) jobStart.stageIds().apply(i);
  intStageIds[i] = stageId;
  stageIdToJobId.put(stageId, jobId);
 }
 jobIdToStageId.put(jobId, intStageIds);
}

代码示例来源:origin: com.github.hyukjinkwon/spark-client

@Override
public void onJobStart(SparkListenerJobStart jobStart) {
 synchronized (stageToJobId) {
  for (int i = 0; i < jobStart.stageIds().length(); i++) {
   stageToJobId.put((Integer) jobStart.stageIds().apply(i), jobStart.jobId());
  }
 }
}

代码示例来源:origin: kframework/k

boolean hasTerminalAtIdx(Production p, int position) {
  if (position < 0 || position >= p.items().size()) {
    return false;
  }
  return p.items().apply(position) instanceof TerminalLike;
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

Assert.assertEquals(bean.getB().length, result.length());
for (int i = 0; i < result.length(); i++) {
 Assert.assertEquals(bean.getB()[i], result.apply(i));
Assert.assertEquals(bean.getD().size(), d.length());
for (int i = 0; i < d.length(); i++) {
 Assert.assertEquals(bean.getD().get(i), d.apply(i));

代码示例来源:origin: org.apache.spark/spark-sql_2.10

Assert.assertEquals(bean.getB().length, result.length());
for (int i = 0; i < result.length(); i++) {
 Assert.assertEquals(bean.getB()[i], result.apply(i));
Assert.assertEquals(bean.getD().size(), d.length());
for (int i = 0; i < d.length(); i++) {
 Assert.assertEquals(bean.getD().get(i), d.apply(i));

代码示例来源:origin: org.apache.spark/spark-sql

Assert.assertEquals(bean.getB().length, result.length());
for (int i = 0; i < result.length(); i++) {
 Assert.assertEquals(bean.getB()[i], result.apply(i));
Assert.assertEquals(bean.getD().size(), d.length());
for (int i = 0; i < d.length(); i++) {
 Assert.assertEquals(bean.getD().get(i), d.apply(i));

代码示例来源:origin: kframework/k

public Either<java.util.Set<ParseFailedException>, Term> apply(TermCons tc) {
    if (tc.production().items().apply(tc.production().items().size() - 1) instanceof NonTerminal) {
      String msg = parent.production().klabel().get() + " is not allowed to be an immediate child of cast." +
          "    Use parentheses: (x):Sort to set the proper scope of the operations.";
      KException kex = new KException(KException.ExceptionType.ERROR, KException.KExceptionGroup.CRITICAL, msg, tc.source().get(), tc.location().get());
      return Left.apply(Sets.newHashSet(new PriorityException(kex)));
    }
    return Right.apply(tc);
  }
}

代码示例来源:origin: ch.epfl.gsn/gsn-core

public static WebInput webInput(WebInputCommand wi){
  WebInput w=new WebInput();
  DataField [] par=new DataField[(wi.params().size())];
  for (int i=0;i<par.length;i++){
    par[i]=dataField(wi.params().apply(i));
  }
  w.setParameters(par);
  w.setName(wi.name());
  return w;
}

相关文章