scala.collection.mutable.Buffer类的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(12.3k)|赞(0)|评价(0)|浏览(234)

本文整理了Java中scala.collection.mutable.Buffer类的一些代码示例,展示了Buffer类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Buffer类的具体详情如下:
包路径:scala.collection.mutable.Buffer
类名称:Buffer

Buffer介绍

暂无

代码示例

代码示例来源:origin: twitter/distributedlog

private static Seq<String> gaugeName(String name) {
  return scala.collection.JavaConversions.asScalaBuffer(Arrays.asList(name)).toList();
}

代码示例来源:origin: Graylog2/graylog2-server

private long flushMessages(List<Message> messages, long payloadSize) {
  if (messages.isEmpty()) {
    LOG.debug("No messages to flush, not trying to write an empty message set.");
    return -1L;
  }
  final ByteBufferMessageSet messageSet = new ByteBufferMessageSet(JavaConversions.asScalaBuffer(messages).toSeq());
  if (LOG.isDebugEnabled()) {
    LOG.debug("Trying to write ByteBufferMessageSet with size of {} bytes to journal", messageSet.sizeInBytes());
  }
  final LogAppendInfo appendInfo = kafkaLog.append(messageSet, true);
  long lastWriteOffset = appendInfo.lastOffset();
  if (LOG.isDebugEnabled()) {
    LOG.debug("Wrote {} messages to journal: {} bytes (payload {} bytes), log position {} to {}",
        messages.size(), messageSet.sizeInBytes(), payloadSize, appendInfo.firstOffset(), lastWriteOffset);
  }
  writtenMessages.mark(messages.size());
  return lastWriteOffset;
}

代码示例来源:origin: com.cerner.beadledom.avro/beadledom-avro-swagger

@Override
public Option<Model> read(Class<?> cls, Map<String, String> typeMap) {
 Schema schema = getSchema(cls);
 if (schema == null) {
  return Option.empty();
 }
 LinkedHashMap<String, ModelProperty> properties = new LinkedHashMap<>();
 for (Schema.Field field : schema.getFields()) {
  ModelProperty property = parseField(field);
  if (property == null) {
   LOGGER.debug(
     "Omitted field {} of schema {} from swagger docs", field.name(), schema.getName());
  } else {
   properties.update(getFieldName(field), property);
  }
 }
 return Option.apply(
   new Model(
     toName(cls),
     toName(cls),
     cls.getName(),
     properties,
     toDescriptionOpt(cls),
     Option.<String>empty(),
     Option.<String>empty(),
     JavaConversions.asScalaBuffer(Collections.<String>emptyList()).toList()));
}

代码示例来源:origin: jetoile/hadoop-unit

private void build() {
  KafkaConfig kf = new KafkaConfig(kafkaConfig);
  Option<String> threadPrefixName = Option.apply("kafka-mini-cluster");
  kafkaServer = new KafkaServer(kf, Time.SYSTEM, threadPrefixName, JavaConversions.asScalaBuffer(Collections.EMPTY_LIST).toSeq());
}

代码示例来源:origin: shunfei/DCMonitor

public List<PartitionInfo> getPartitionInfos(String group, String topic) {
 Seq<String> singleTopic = JavaConversions.asScalaBuffer(Collections.singletonList(topic)).toSeq();
 scala.collection.Map<String, Seq<Object>> pidMap = ZkUtils.getPartitionsForTopics(zkClient, singleTopic);
 Option<Seq<Object>> partitions = pidMap.get(topic);
 if (partitions.get() == null) {
  return Collections.emptyList();
 }
 List<PartitionInfo> infos = Lists.newArrayList();
 for (Object o : JavaConversions.asJavaList(partitions.get())) {
  PartitionInfo info = getPartitionInfo(group, topic, Int.unbox(o));
  if (info != null) {
   infos.add(info);
  }
 }
 return infos;
}

代码示例来源:origin: pinterest/secor

public void start() {
    Duration[] defaultLatchIntervals = {Duration.apply(1, TimeUnit.MINUTES)};
    @SuppressWarnings("deprecation")
    AdminServiceFactory adminServiceFactory = new AdminServiceFactory(
      this.mPort,
      20,
      List$.MODULE$.<StatsFactory>empty(),
      Option.<String>empty(),
      List$.MODULE$.<Regex>empty(),
      Map$.MODULE$.<String, CustomHttpHandler>empty(),
      JavaConversions
        .asScalaBuffer(Arrays.asList(defaultLatchIntervals)).toList()
    );
    RuntimeEnvironment runtimeEnvironment = new RuntimeEnvironment(this);
    adminServiceFactory.apply(runtimeEnvironment);
    try {
      Properties properties = new Properties();
      properties.load(this.getClass().getResource("build.properties").openStream());
      String buildRevision = properties.getProperty("build_revision", "unknown");
      LOG.info("build.properties build_revision: {}",
           properties.getProperty("build_revision", "unknown"));
      StatsUtil.setLabel("secor.build_revision", buildRevision);
    } catch (Throwable t) {
      LOG.error("Failed to load properties from build.properties", t);
    }
  }
}

代码示例来源:origin: Microsoft/Availability-Monitor-for-Kafka

new kafka.api.PartitionMetadata(
          part.partitionId(),
              Option.apply(part.leader()),
              JavaConversions.asScalaBuffer(part.replicas()).toList(),
              JavaConversions.asScalaBuffer(part.isr()).toList(),
              part.errorCode());
  pml.add(pm);
    new kafka.api.TopicMetadata(
        item.topic(),
        JavaConversions.asScalaBuffer(pml).toList(),
        item.errorCode());
ret.add(new kafka.javaapi.TopicMetadata(tm));

代码示例来源:origin: pinterest/doctorkafka

private scala.collection.Map<Object, Seq<Object>> getReplicaAssignmentForTopic(
  ZkUtils zkUtils, String topic) {
 if (topicPartitionAssignments.containsKey(topic)) {
  return topicPartitionAssignments.get(topic);
 }
 List<String> topics = new ArrayList<>();
 topics.add(topic);
 Seq<String> topicsSeq = scala.collection.JavaConverters.asScalaBuffer(topics).toSeq();
 scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> assignments;
 assignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);
 scala.collection.Map<Object, Seq<Object>> partitionAssignment = assignments.get(topic).get();
 topicPartitionAssignments.put(topic, partitionAssignment);
 return partitionAssignment;
}

代码示例来源:origin: cdapio/cdap

return JavaConversions.asScalaBuffer(result).toSeq();
return JavaConversions.mapAsScalaMap(result);

代码示例来源:origin: uk.gov.dstl.baleen/baleen-odin

private DirectedGraph<String> getDependencies(uk.gov.dstl.baleen.types.language.Sentence key) {

  List<WordToken> tokens = ImmutableList.copyOf(indexWords.get(key));
  Set<Object> roots = new HashSet<>();

  List<Edge<String>> edges =
    indexDependency
      .get(key)
      .stream()
      .peek(
        d -> {
         if (MaltParser.ROOT.equals(d.getDependencyType())) {
          roots.add(tokens.indexOf(d.getGovernor()));
         }
        })
      .map(
        d -> {
         int source = tokens.indexOf(d.getGovernor());
         int destination = tokens.indexOf(d.getDependent());
         return new Edge<>(source, destination, d.getDependencyType().toLowerCase());
        })
      .collect(toList());

  return new DirectedGraph<>(
    JavaConversions.asScalaBuffer(edges).toList(), JavaConversions.asScalaSet(roots).toSet());
 }
}

代码示例来源:origin: twosigma/beakerx

@SuppressWarnings("unchecked")
@Override
public Object deserialize(JsonNode n, ObjectMapper mapper) {
 org.apache.commons.lang3.tuple.Pair<String, Object> deserializeObject = TableDisplayDeSerializer.getDeserializeObject(parent, n, mapper);
 String subtype = deserializeObject.getLeft();
 if (subtype != null && subtype.equals(TableDisplay.DICTIONARY_SUBTYPE)) {
  return JavaConverters.mapAsScalaMapConverter((Map<String, Object>) deserializeObject.getRight()).asScala().toMap(Predef.<Tuple2<String, Object>>conforms());
 } else if (subtype != null && subtype.equals(TableDisplay.LIST_OF_MAPS_SUBTYPE)) {
  List<Map<String, Object>> rows = (List<Map<String, Object>>) deserializeObject.getRight();
  List<Object> oo = new ArrayList<Object>();
  for (Map<String, Object> row : rows) {
   oo.add(JavaConverters.mapAsScalaMapConverter(row).asScala().toMap(Predef.<Tuple2<String, Object>>conforms()));
  }
  return scala.collection.JavaConversions.collectionAsScalaIterable(oo);
 } else if (subtype != null && subtype.equals(TableDisplay.MATRIX_SUBTYPE)) {
  List<List<?>> matrix = (List<List<?>>) deserializeObject.getRight();
  ArrayList<Object> ll = new ArrayList<Object>();
  for (List<?> ob : matrix) {
   ll.add(scala.collection.JavaConversions.asScalaBuffer(ob).toList());
  }
  return scala.collection.JavaConversions.asScalaBuffer(ll).toList();
 }
 return deserializeObject.getRight();
}

代码示例来源:origin: apache/incubator-pinot

@Override
 public TopicMetadataResponse send(TopicMetadataRequest request) {
  java.util.List<String> topics = request.topics();
  TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()];
  for (int i = 0; i < topicMetadataArray.length; i++) {
   String topic = topics.get(i);
   if (!topic.equals(topicName)) {
    topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
   } else {
    PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
    for (int j = 0; j < partitionCount; j++) {
     java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
     List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
     partitionMetadataArray[j] =
       new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList,
         emptyScalaList, Errors.NONE.code());
    }
    Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray);
    topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code());
   }
  }
  Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
  Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);
  return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
 }
}

代码示例来源:origin: open-korean-text/elasticsearch-analysis-openkoreantext

private Seq<KoreanToken> convertPhrasesToTokens(Seq<KoreanPhrase> phrases) {
    KoreanToken[] tokens = new KoreanToken[phrases.length()];

    Iterator<KoreanPhrase> iterator = phrases.iterator();
    int i = 0;
    while (iterator.hasNext()) {
      KoreanPhrase phrase = iterator.next();
      tokens[i++] = new KoreanToken(phrase.text(), phrase.pos(), phrase.offset(), phrase.length(), scala.Option.apply(null), false);
    }

    Arrays.sort(tokens, (o1, o2) -> {
      if(o1.offset()== o2.offset())
        return 0;
      return o1.offset()< o2.offset()? -1 : 1;
    });

    return JavaConverters.asScalaBuffer(Arrays.asList(tokens)).toSeq();
  }
}

代码示例来源:origin: com.typesafe.play/play_2.10

/**
 * Converts a Java List to Scala Seq.
 */
public static <T> scala.collection.Seq<T> toSeq(java.util.List<T> list) {
  return scala.collection.JavaConverters.asScalaBufferConverter(list).asScala().toList();
}

代码示例来源:origin: com.couchbase.client/spark-connector

public static <T> Seq<T> listToSeq(List<T> source) {
    return scala.collection.JavaConversions.asScalaBuffer(source).seq();
  }
}

代码示例来源:origin: com.typesafe.play/play_2.12

/**
 * Translates the first defined message.
 *
 * Uses `java.text.MessageFormat` internally to format the message.
 *
 * @param lang the message lang
 * @param keys the messages keys
 * @param args the message arguments
 * @return the formatted message or a default rendering if the key wasn't defined
 */
public String get(play.api.i18n.Lang lang, List<String> keys, Object... args) {
  Buffer<String> keyArgs = scala.collection.JavaConverters.asScalaBufferConverter(keys).asScala();
  Seq<Object> scalaArgs = convertArgsToScalaBuffer(args);
  return messages.apply(keyArgs.toSeq(), scalaArgs, lang);
}

代码示例来源:origin: open-korean-text/elasticsearch-analysis-openkoreantext

@Override
protected Seq<KoreanToken> perform(Seq<KoreanToken> tokens) {
  KoreanToken[] performed = new KoreanToken[tokens.length()];
  int i = 0;
  Iterator<KoreanToken> tokenIterator =  tokens.iterator();
  while (tokenIterator.hasNext()) {
    KoreanToken token = tokenIterator.next();
    performed[i++] = token.stem().nonEmpty() ? stem(token) : token;
  }
  return JavaConverters.asScalaBuffer(Arrays.asList(performed)).toSeq();
}

代码示例来源:origin: cloudera-labs/envelope

val = JavaConversions.mapAsScalaMap(convertedMap);
val = JavaConverters.asScalaBufferConverter(childValues).asScala().toSeq();

代码示例来源:origin: pinterest/doctorkafka

private scala.collection.Map<TopicAndPartition, Seq<Object>> getAssignmentPlan(
  Map<TopicPartition, Integer[]> replicasMap) {
 scala.collection.mutable.HashMap<TopicAndPartition, Seq<Object>> result =
   new scala.collection.mutable.HashMap<>();
 for (Map.Entry<TopicPartition, Integer[]> entry : replicasMap.entrySet()) {
  TopicPartition tp = entry.getKey();
  TopicAndPartition tap = new TopicAndPartition(tp.topic(), tp.partition());
  List<Object> objs = Arrays.asList(entry.getValue()).stream()
    .map(val -> (Object) val).collect(Collectors.toList());
  Seq<Object> replicas = JavaConverters.asScalaBuffer(objs).seq();
  result.put(tap, replicas);
 }
 assert replicasMap.size() == result.size();
 LOG.debug("replicaMap.size = {}, result.size = {}", replicasMap.size(), result.size());
 return result;
}

代码示例来源:origin: com.cerner.beadledom.avro/beadledom-avro-swagger

0,
   true,
   Option.<String>empty(),
   new AllowableListValues(
     JavaConversions.asScalaBuffer(schema.getEnumSymbols()).toList(),
     "LIST"),
   Option.<ModelRef>empty()
 );
case ARRAY:
   elementsProperty.description(),
   elementsProperty.allowableValues(),
   Option.apply(modelRef(elementsProperty))
 );
case BOOLEAN:

相关文章

微信公众号

最新文章

更多