scala.collection.Map.size()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(11.7k)|赞(0)|评价(0)|浏览(125)

本文整理了Java中scala.collection.Map.size()方法的一些代码示例,展示了Map.size()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Map.size()方法的具体详情如下:
包路径:scala.collection.Map
类名称:Map
方法名:size

Map.size介绍

暂无

代码示例

代码示例来源:origin: linkedin/cruise-control

/**
  * Check whether there are ongoing partition reassignments and wait for the reassignments to finish.
  *
  * @param zkUtils the ZkUtils class used to check ongoing partition reassignments.
  * @return Whether there are no ongoing partition reassignments.
  */
 public static boolean ensureNoPartitionUnderPartitionReassignment(ZkUtils zkUtils) {
  int attempt = 0;
  while (zkUtils.getPartitionsBeingReassigned().size() > 0) {
   try {
    sleep(1000 << attempt);
   } catch (InterruptedException e) {
    // Let it go.
   }
   if (++attempt == 10) {
    return false;
   }
  }
  return true;
 }
}

代码示例来源:origin: apache/hive

@Override
public Integer call(JobContext jc) throws Exception {
 // minus 1 here otherwise driver is also counted as an executor
 int count = jc.sc().sc().getExecutorMemoryStatus().size() - 1;
 return Integer.valueOf(count);
}

代码示例来源:origin: apache/hive

@Override
public int getExecutorCount() {
 return sc.sc().getExecutorMemoryStatus().size();
}

代码示例来源:origin: apache/drill

@Override
public int getExecutorCount() {
 return sc.sc().getExecutorMemoryStatus().size();
}

代码示例来源:origin: linkedin/kafka-monitor

StringBuilder bldr = new StringBuilder();
bldr.append("{\"version\":1,\"partitions\":[\n");
for (int partition = 0; partition < partitionsToBeReassigned.size(); partition++) {
 bldr.append("  {\"topic\":\"").append(topic).append("\",\"partition\":").append(partition).append(",\"replicas\":[");
 scala.collection.Seq<Object> replicas = partitionsToBeReassigned.apply(partition);

代码示例来源:origin: linkedin/kafka-monitor

void maybeAddPartitions(int minPartitionNum) {
 KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), ZK_SESSION_TIMEOUT_MS,
   ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener");
 AdminZkClient adminZkClient = new AdminZkClient(zkClient);
 try {
  scala.collection.Map<Object, scala.collection.Seq<Object>> existingAssignment = getPartitionAssignment(zkClient, _topic);
  int partitionNum = existingAssignment.size();
  if (partitionNum < minPartitionNum) {
   LOG.info("MultiClusterTopicManagementService will increase partition of the topic {} "
     + "in cluster {} from {} to {}.", _topic, _zkConnect, partitionNum, minPartitionNum);
   scala.Option<scala.collection.Map<java.lang.Object, scala.collection.Seq<java.lang.Object>>> replicaAssignment = scala.Option.apply(null);
   scala.Option<Seq<Object>> brokerList = scala.Option.apply(null);
   adminZkClient.addPartitions(_topic, existingAssignment, adminZkClient.getBrokerMetadatas(RackAwareMode.Disabled$.MODULE$, brokerList), minPartitionNum, replicaAssignment, false);
  }
 } finally {
  zkClient.close();
 }
}

代码示例来源:origin: uber/chaperone

scala.collection.Map<Object, Seq<Object>> partitionsMap =
   partitionAssignmentForTopics.get(topic).get();
 TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
 _topicPartitionInfoMap.put(topic, tp);
} catch (Exception e) {

代码示例来源:origin: uber/chaperone

@Override
public void handleChildChange(String parentPath, List<String> currentChilds)
  throws Exception {
 if (!tryToRefreshCache()) {
  synchronized (_lock) {
   Set<String> newAddedTopics = new HashSet<String>(currentChilds);
   Set<String> currentServingTopics = getAllTopics();
   newAddedTopics.removeAll(currentServingTopics);
   for (String existedTopic : currentServingTopics) {
    if (!currentChilds.contains(existedTopic)) {
     _topicPartitionInfoMap.remove(existedTopic);
    }
   }
   scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
     _zkUtils.getPartitionAssignmentForTopics(
         JavaConversions.asScalaBuffer(ImmutableList.copyOf(newAddedTopics)));
   for (String topic : newAddedTopics) {
    try {
     scala.collection.Map<Object, Seq<Object>> partitionsMap =
       partitionAssignmentForTopics.get(topic).get();
     TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
     _topicPartitionInfoMap.put(topic, tp);
    } catch (Exception e) {
     LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
    }
   }
   _kafkaTopicsCounter.inc(_topicPartitionInfoMap.size() - _kafkaTopicsCounter.getCount());
  }
 }
}

代码示例来源:origin: apache/crunch

@Override
protected int getMapSize(Object map) {
 if (map instanceof scala.collection.Map) {
  return ((scala.collection.Map) map).size();
 }
 return super.getMapSize(map);
}

代码示例来源:origin: org.wso2.carbon.analytics/org.wso2.carbon.analytics.spark.core

private int currentActiveExecutors() {
  try {
    scala.collection.Map<String, Tuple2<Object, Object>> executors = this.sqlCtx
        .sparkContext().getExecutorMemoryStatus();
    return executors.size();
  } catch (Throwable e) {
    log.error("Error occurred while checking current Spark active executors.", e);
  }
  return 0;
}

代码示例来源:origin: com.facebook.presto.hive/hive-apache

@Override
public int getExecutorCount() {
 return sc.sc().getExecutorMemoryStatus().size();
}

代码示例来源:origin: com.github.hyukjinkwon/spark-client

@Override
public Integer call(JobContext jc) throws Exception {
 // minus 1 here otherwise driver is also counted as an executor
 int count = jc.sc().sc().getExecutorMemoryStatus().size() - 1;
 return Integer.valueOf(count);
}

代码示例来源:origin: org.spark-project.hive/spark-client

@Override
public Integer call(JobContext jc) throws Exception {
 // minus 1 here otherwise driver is also counted as an executor
 int count = jc.sc().sc().getExecutorMemoryStatus().size() - 1;
 return Integer.valueOf(count);
}

代码示例来源:origin: uber/uReplicator

public void tryUpdateTopic(String topic) {
 scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
   _zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
 if (partitionAssignmentForTopics.get(topic).isEmpty()
   || partitionAssignmentForTopics.get(topic).get().size() == 0) {
  LOGGER.info("try to update for topic {} but found no topic partition for it", topic);
  return;
 }
 synchronized (_lock) {
  LOGGER.info("starting to refresh for update topic {}", topic);
  try {
   _topicPartitionInfoMap.put(topic, new TopicPartition(topic,
     partitionAssignmentForTopics.get(topic).get().size()));
  } catch (Exception e) {
   LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
  }
  LOGGER.info("finished refreshing for updating topic {}", topic);
 }
}

代码示例来源:origin: uber/uReplicator

scala.collection.Map<Object, Seq<Object>> partitionsMap =
   partitionAssignmentForTopics.get(topic).get();
 TopicPartition tp = new TopicPartition(topic, partitionsMap.size());
 _topicPartitionInfoMap.put(topic, tp);
} catch (Exception e) {

代码示例来源:origin: uber/uReplicator

private void tryAddTopic(String topic) {
 scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
   _zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
 if (partitionAssignmentForTopics.get(topic).isEmpty()
   || partitionAssignmentForTopics.get(topic).get().size() == 0) {
  LOGGER.info("try to refresh for topic {} but found no topic partition for it", topic);
  return;
 }
 synchronized (_lock) {
  LOGGER.info("starting to refresh for adding topic {}", topic);
  if (!getAllTopics().contains(topic)) {
   try {
    _topicPartitionInfoMap.put(topic, new TopicPartition(topic,
      partitionAssignmentForTopics.get(topic).get().size()));
   } catch (Exception e) {
    LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
   }
  }
  LOGGER.info("finished refreshing for adding topic {}", topic);
 }
}

代码示例来源:origin: uber/uReplicator

private void tryAddTopic(String topic) {
 scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignmentForTopics =
   _zkUtils.getPartitionAssignmentForTopics(JavaConversions.asScalaBuffer(ImmutableList.of(topic)));
 if (partitionAssignmentForTopics.get(topic).isEmpty()
   || partitionAssignmentForTopics.get(topic).get().size() == 0) {
  LOGGER.info("try to refresh for topic {} but found no topic partition for it", topic);
  return;
 }
 synchronized (_lock) {
  LOGGER.info("starting to refresh for adding topic {}", topic);
  if (!getAllTopics().contains(topic)) {
   try {
    _topicPartitionInfoMap.put(topic, new TopicPartition(topic,
      partitionAssignmentForTopics.get(topic).get().size()));
   } catch (Exception e) {
    LOGGER.warn("Failed to get topicPartition info for {} from kafka zk: {}", topic, e);
   }
  }
  LOGGER.info("finished refreshing for adding topic {}", topic);
 }
}

代码示例来源:origin: com.hotels.road/road-kafka-store

@SuppressWarnings({ "rawtypes", "unchecked" })
 private static void verifyTopic(ZkUtils zkUtils, String topic) {
  Set topics = new HashSet();
  topics.add(topic);

  // check # partition and the replication factor
  scala.collection.mutable.Map partitionAssignmentForTopics = zkUtils
    .getPartitionAssignmentForTopics(JavaConversions.asScalaSet(topics).toSeq());
  scala.collection.Map partitionAssignment = (scala.collection.Map) partitionAssignmentForTopics.get(topic).get();

  if (partitionAssignment.size() != 1) {
   throw new RuntimeException(String.format("The schema topic %s should have only 1 partition.", topic));
  }

  // check the retention policy
  Properties prop = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
  String retentionPolicy = prop.getProperty(LogConfig.CleanupPolicyProp());
  if (retentionPolicy == null || "compact".compareTo(retentionPolicy) != 0) {
   throw new RuntimeException(String.format("The retention policy of the schema topic %s must be compact.", topic));
  }
 }
}

代码示例来源:origin: com.hurence.logisland/logisland-agent

private void verifySchemaTopic() {
  Set<String> topics = new HashSet<String>();
  topics.add(topic);
  // check # partition and the replication factor
  scala.collection.Map partitionAssignment = zkUtils.getPartitionAssignmentForTopics(
      JavaConversions.asScalaSet(topics).toSeq())
      .get(topic).get();
  if (partitionAssignment.size() != 1) {
    log.warn("The schema topic " + topic + " should have only 1 partition.");
  }
  if (((Seq) partitionAssignment.get(0).get()).size() < desiredReplicationFactor) {
    log.warn("The replication factor of the schema topic " + topic + " is less than the " +
        "desired one of " + desiredReplicationFactor + ". If this is a production " +
        "environment, it's crucial to add more brokers and increase the replication " +
        "factor of the topic.");
  }
  // check the retention policy
  Properties prop = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
  String retentionPolicy = prop.getProperty(LogConfig.CleanupPolicyProp());
  if (retentionPolicy == null || "compact".compareTo(retentionPolicy) != 0) {
    log.warn("The retention policy of the schema topic " + topic + " may be incorrect. " +
        "Please configure it with compact.");
  }
}

代码示例来源:origin: pinterest/doctorkafka

public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zookeeper = commandLine.getOptionValue(ZOOKEEPER);

  ZkUtils zkUtils = KafkaUtils.getZkUtils(zookeeper);
  Seq<String> topicsSeq = zkUtils.getAllTopics();
  List<String> topics = scala.collection.JavaConverters.seqAsJavaList(topicsSeq);

  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>>
    partitionAssignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);

  Map<String, Integer> replicationFactors = new HashMap<>();
  Map<String, Integer> partitionCounts = new HashMap<>();

  topics.stream().forEach(topic -> {
   int partitionCount = partitionAssignments.get(topic).get().size();
   int factor = partitionAssignments.get(topic).get().head()._2().size();
   partitionCounts.put(topic, partitionCount);
   replicationFactors.put(topic, factor);
  });

  List<PartitionInfo> urps = KafkaClusterManager.getUnderReplicatedPartitions(
    zookeeper, SecurityProtocol.PLAINTEXT, null, topics, partitionAssignments, replicationFactors, partitionCounts);

  for (PartitionInfo partitionInfo : urps) {
   LOG.info("under-replicated : {}", partitionInfo);
  }
 }
}

相关文章