scala.collection.mutable.Buffer.toSeq()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(10.7k)|赞(0)|评价(0)|浏览(114)

本文整理了Java中scala.collection.mutable.Buffer.toSeq()方法的一些代码示例,展示了Buffer.toSeq()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Buffer.toSeq()方法的具体详情如下:
包路径:scala.collection.mutable.Buffer
类名称:Buffer
方法名:toSeq

Buffer.toSeq介绍

暂无

代码示例

代码示例来源:origin: Graylog2/graylog2-server

private long flushMessages(List<Message> messages, long payloadSize) {
  if (messages.isEmpty()) {
    LOG.debug("No messages to flush, not trying to write an empty message set.");
    return -1L;
  }
  final ByteBufferMessageSet messageSet = new ByteBufferMessageSet(JavaConversions.asScalaBuffer(messages).toSeq());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Trying to write ByteBufferMessageSet with size of {} bytes to journal", messageSet.sizeInBytes());
  }
  final LogAppendInfo appendInfo = kafkaLog.append(messageSet, true);
  long lastWriteOffset = appendInfo.lastOffset();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Wrote {} messages to journal: {} bytes (payload {} bytes), log position {} to {}",
        messages.size(), messageSet.sizeInBytes(), payloadSize, appendInfo.firstOffset(), lastWriteOffset);
  }
  writtenMessages.mark(messages.size());
  return lastWriteOffset;
}

代码示例来源:origin: com.typesafe.play/play_2.11

/**
 * Translates the first defined message.
 *
 * Uses `java.text.MessageFormat` internally to format the message.
 *
 * @param lang the message lang
 * @param keys the messages keys
 * @param args the message arguments
 * @return the formatted message or a default rendering if the key wasn't defined
 */
public String get(play.api.i18n.Lang lang, List<String> keys, Object... args) {
  Buffer<String> keyArgs = scala.collection.JavaConverters.asScalaBufferConverter(keys).asScala();
  Seq<Object> scalaArgs = convertArgsToScalaBuffer(args);
  return messages.apply(keyArgs.toSeq(), scalaArgs, lang);
}

代码示例来源:origin: com.typesafe.play/play_2.10

/**
 * Translates the first defined message.
 *
 * Uses `java.text.MessageFormat` internally to format the message.
 *
 * @param lang the message lang
 * @param keys the messages keys
 * @param args the message arguments
 * @return the formatted message or a default rendering if the key wasn't defined
 */
public String get(play.api.i18n.Lang lang, List<String> keys, Object... args) {
  Buffer<String> keyArgs = scala.collection.JavaConverters.asScalaBufferConverter(keys).asScala();
  Buffer<Object> scalaArgs = convertArgsToScalaBuffer(args);
  return messages.apply(keyArgs.toSeq(), scalaArgs, lang);
}

代码示例来源:origin: com.typesafe.play/play_2.12

/**
 * Translates the first defined message.
 *
 * Uses `java.text.MessageFormat` internally to format the message.
 *
 * @param lang the message lang
 * @param keys the messages keys
 * @param args the message arguments
 * @return the formatted message or a default rendering if the key wasn't defined
 */
public String get(play.api.i18n.Lang lang, List<String> keys, Object... args) {
  Buffer<String> keyArgs = scala.collection.JavaConverters.asScalaBufferConverter(keys).asScala();
  Seq<Object> scalaArgs = convertArgsToScalaBuffer(args);
  return messages.apply(keyArgs.toSeq(), scalaArgs, lang);
}

代码示例来源:origin: com.typesafe.play/play

/**
 * Translates the first defined message.
 *
 * Uses `java.text.MessageFormat` internally to format the message.
 *
 * @param lang the message lang
 * @param keys the messages keys
 * @param args the message arguments
 * @return the formatted message or a default rendering if the key wasn't defined
 */
public String get(play.api.i18n.Lang lang, List<String> keys, Object... args) {
  Buffer<String> keyArgs = scala.collection.JavaConverters.asScalaBufferConverter(keys).asScala();
  Seq<Object> scalaArgs = convertArgsToScalaBuffer(args);
  return messages.apply(keyArgs.toSeq(), scalaArgs, lang);
}

代码示例来源:origin: pinterest/doctorkafka

private scala.collection.Map<Object, Seq<Object>> getReplicaAssignmentForTopic(
  ZkUtils zkUtils, String topic) {
 if (topicPartitionAssignments.containsKey(topic)) {
  return topicPartitionAssignments.get(topic);
 }
 List<String> topics = new ArrayList<>();
 topics.add(topic);
 Seq<String> topicsSeq = scala.collection.JavaConverters.asScalaBuffer(topics).toSeq();
 scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> assignments;
 assignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);
 scala.collection.Map<Object, Seq<Object>> partitionAssignment = assignments.get(topic).get();
 topicPartitionAssignments.put(topic, partitionAssignment);
 return partitionAssignment;
}

代码示例来源:origin: com.github.forwardloop/glicko2s

public static Glicko2 calculateNewRating(Glicko2 baseRating, List<Tuple2<Glicko2, EloResult>> results){
    return baseRating.calculateNewRating( JavaConversions.asScalaBuffer(results).toSeq() );
  }
}

代码示例来源:origin: open-korean-text/elasticsearch-analysis-openkoreantext

public static void addUserDictionary(List<String> words) {
  OpenKoreanTextProcessor.addNounsToDictionary(JavaConverters.asScalaBuffer(words).toSeq());
}

代码示例来源:origin: open-korean-text/elasticsearch-analysis-openkoreantext

@Override
  protected Seq<KoreanToken> perform(Seq<KoreanToken> tokens) {
    List<KoreanToken> performed = new ArrayList<>();
    for(KoreanToken token : JavaConverters.seqAsJavaList(tokens)) {
      if(redundantTypes.contains(token.pos().toString())){
        continue;
      }
      if(redundantTerms.contains(token.text())){
        continue;
      }
      performed.add(token);
    }

    return JavaConverters.asScalaBuffer(performed).toSeq();
  }
}

代码示例来源:origin: com.twitter/util-core_2.10

/**
 * Creates a new `Spool` of given `elems`.
 */
public static <T> Spool<T> newSpool(Collection<T> elems) {
 Seq<T> seq = JavaConversions.asScalaBuffer(new ArrayList<T>(elems)).toSeq();
 return new Spool.ToSpool<T>(seq).toSpool();
}

代码示例来源:origin: org.graylog2/graylog2-server

private long flushMessages(List<Message> messages, long payloadSize) {
  if (messages.isEmpty()) {
    LOG.debug("No messages to flush, not trying to write an empty message set.");
    return -1L;
  }
  final ByteBufferMessageSet messageSet = new ByteBufferMessageSet(JavaConversions.asScalaBuffer(messages).toSeq());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Trying to write ByteBufferMessageSet with size of {} bytes to journal", messageSet.sizeInBytes());
  }
  final LogAppendInfo appendInfo = kafkaLog.append(messageSet, true);
  long lastWriteOffset = appendInfo.lastOffset();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Wrote {} messages to journal: {} bytes (payload {} bytes), log position {} to {}",
        messages.size(), messageSet.sizeInBytes(), payloadSize, appendInfo.firstOffset(), lastWriteOffset);
  }
  writtenMessages.mark(messages.size());
  return lastWriteOffset;
}

代码示例来源:origin: shunfei/DCMonitor

public List<PartitionInfo> getPartitionInfos(String group, String topic) {
 Seq<String> singleTopic = JavaConversions.asScalaBuffer(Collections.singletonList(topic)).toSeq();
 scala.collection.Map<String, Seq<Object>> pidMap = ZkUtils.getPartitionsForTopics(zkClient, singleTopic);
 Option<Seq<Object>> partitions = pidMap.get(topic);
 if (partitions.get() == null) {
  return Collections.emptyList();
 }
 List<PartitionInfo> infos = Lists.newArrayList();
 for (Object o : JavaConversions.asJavaList(partitions.get())) {
  PartitionInfo info = getPartitionInfo(group, topic, Int.unbox(o));
  if (info != null) {
   infos.add(info);
  }
 }
 return infos;
}

代码示例来源:origin: jetoile/hadoop-unit

private void build() {
  KafkaConfig kf = new KafkaConfig(kafkaConfig);
  Option<String> threadPrefixName = Option.apply("kafka-mini-cluster");
  kafkaServer = new KafkaServer(kf, Time.SYSTEM, threadPrefixName, JavaConversions.asScalaBuffer(Collections.EMPTY_LIST).toSeq());
}

代码示例来源:origin: au.com.dius/pact-jvm-consumer_2.11

/**
 * Terminates the DSL and builds a pact fragment to represent the interactions
 *
 * @deprecated Use toPact instead
 */
public PactFragment toFragment() {
  addInteraction();
  return new PactFragment(
      request.consumer,
      request.provider,
   JavaConversions$.MODULE$.asScalaBuffer(consumerPactBuilder.getInteractions()).toSeq());
}

代码示例来源:origin: fr.jetoile.hadoop/hadoop-unit-confluent

private void build() {
  KafkaConfig kf = new KafkaConfig(kafkaConfig);
  Option<String> threadPrefixName = Option.apply("kafka-mini-cluster");
  kafkaServer = new KafkaServer(kf, Time.SYSTEM, threadPrefixName, JavaConversions.asScalaBuffer(Collections.EMPTY_LIST).toSeq());
}

代码示例来源:origin: au.com.dius/pact-jvm-consumer

/**
 * Terminates the DSL and builds a pact fragment to represent the interactions
 *
 * @deprecated Use toPact instead
 */
public PactFragment toFragment() {
  addInteraction();
  return new PactFragment(
      request.consumer,
      request.provider,
   JavaConversions$.MODULE$.asScalaBuffer(consumerPactBuilder.getInteractions()).toSeq());
}

代码示例来源:origin: Netflix/iceberg

private static UnsafeProjection projection(Schema finalSchema, Schema readSchema) {
 StructType struct = convert(readSchema);
 List<AttributeReference> refs = seqAsJavaListConverter(struct.toAttributes()).asJava();
 List<Attribute> attrs = Lists.newArrayListWithExpectedSize(struct.fields().length);
 List<org.apache.spark.sql.catalyst.expressions.Expression> exprs =
   Lists.newArrayListWithExpectedSize(struct.fields().length);
 for (AttributeReference ref : refs) {
  attrs.add(ref.toAttribute());
 }
 for (Types.NestedField field : finalSchema.columns()) {
  int indexInReadSchema = struct.fieldIndex(field.name());
  exprs.add(refs.get(indexInReadSchema));
 }
 return UnsafeProjection.create(
   asScalaBufferConverter(exprs).asScala().toSeq(),
   asScalaBufferConverter(attrs).asScala().toSeq());
}

代码示例来源:origin: gnuhpc/Kafka-zk-restapi

public List<String> generateReassignPartition(ReassignWrapper reassignWrapper) {
 Seq brokerSeq =
   JavaConverters.asScalaBufferConverter(reassignWrapper.getBrokers()).asScala().toSeq();
 // <Proposed partition reassignment,Current partition replica assignment>
 Tuple2 resultTuple2 =
   ReassignPartitionsCommand.generateAssignment(
     zkUtils, brokerSeq, reassignWrapper.generateReassignJsonString(), false);
 List<String> result = new ArrayList<>();
 result.add(
   zkUtils.formatAsReassignmentJson(
     (scala.collection.Map<TopicAndPartition, Seq<Object>>) resultTuple2._2()));
 result.add(
   zkUtils.formatAsReassignmentJson(
     (scala.collection.Map<TopicAndPartition, Seq<Object>>) resultTuple2._1()));
 return result;
}

代码示例来源:origin: open-korean-text/elasticsearch-analysis-openkoreantext

@Override
protected Seq<KoreanToken> perform(Seq<KoreanToken> tokens) {
  KoreanToken[] performed = new KoreanToken[tokens.length()];
  int i = 0;
  Iterator<KoreanToken> tokenIterator =  tokens.iterator();
  while (tokenIterator.hasNext()) {
    KoreanToken token = tokenIterator.next();
    performed[i++] = token.stem().nonEmpty() ? stem(token) : token;
  }
  return JavaConverters.asScalaBuffer(Arrays.asList(performed)).toSeq();
}

代码示例来源:origin: open-korean-text/elasticsearch-analysis-openkoreantext

private Seq<KoreanToken> convertPhrasesToTokens(Seq<KoreanPhrase> phrases) {
    KoreanToken[] tokens = new KoreanToken[phrases.length()];

    Iterator<KoreanPhrase> iterator = phrases.iterator();
    int i = 0;
    while (iterator.hasNext()) {
      KoreanPhrase phrase = iterator.next();
      tokens[i++] = new KoreanToken(phrase.text(), phrase.pos(), phrase.offset(), phrase.length(), scala.Option.apply(null), false);
    }

    Arrays.sort(tokens, (o1, o2) -> {
      if(o1.offset()== o2.offset())
        return 0;
      return o1.offset()< o2.offset()? -1 : 1;
    });

    return JavaConverters.asScalaBuffer(Arrays.asList(tokens)).toSeq();
  }
}

相关文章

微信公众号

最新文章

更多