kafka.cluster.Broker.id()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(10.7k)|赞(0)|评价(0)|浏览(252)

本文整理了Java中kafka.cluster.Broker.id()方法的一些代码示例,展示了Broker.id()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Broker.id()方法的具体详情如下:
包路径:kafka.cluster.Broker
类名称:Broker
方法名:id

Broker.id介绍

暂无

代码示例

代码示例来源:origin: linkedin/kafka-monitor

static boolean someBrokerNotPreferredLeader(List<PartitionInfo> partitionInfoList, Collection<Broker> brokers) {
 Set<Integer> brokersNotPreferredLeader = new HashSet<>(brokers.size());
 for (Broker broker: brokers)
  brokersNotPreferredLeader.add(broker.id());
 for (PartitionInfo partitionInfo : partitionInfoList)
  brokersNotPreferredLeader.remove(partitionInfo.replicas()[0].id());
 return !brokersNotPreferredLeader.isEmpty();
}

代码示例来源:origin: linkedin/kafka-monitor

static boolean someBrokerNotElectedLeader(List<PartitionInfo> partitionInfoList, Collection<Broker> brokers) {
 Set<Integer> brokersNotElectedLeader = new HashSet<>(brokers.size());
 for (Broker broker: brokers)
  brokersNotElectedLeader.add(broker.id());
 for (PartitionInfo partitionInfo : partitionInfoList) {
  if (partitionInfo.leader() != null)
   brokersNotElectedLeader.remove(partitionInfo.leader().id());
 }
 return !brokersNotElectedLeader.isEmpty();
}

代码示例来源:origin: apache/flink

/**
 * Turn a broker instance into a node instance.
 *
 * @param broker broker instance
 * @return Node representing the given broker
 */
private static Node brokerToNode(Broker broker) {
  return new Node(broker.id(), broker.host(), broker.port());
}

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
     + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   LOG.error(
     "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: apache/incubator-gobblin

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : this.brokers) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata
       .leader().port());
     break;
    }
   }
   break;
  }
 }
}

代码示例来源:origin: linkedin/kafka-monitor

private static void reassignPartitions(KafkaZkClient zkClient, Collection<Broker> brokers, String topic, int partitionCount, int replicationFactor) {
 scala.collection.mutable.ArrayBuffer<BrokerMetadata> brokersMetadata = new scala.collection.mutable.ArrayBuffer<>(brokers.size());
 for (Broker broker : brokers) {
  brokersMetadata.$plus$eq(new BrokerMetadata(broker.id(), broker.rack()));
 }
 scala.collection.Map<Object, Seq<Object>> assignedReplicas =
   AdminUtils.assignReplicasToBrokers(brokersMetadata, partitionCount, replicationFactor, 0, 0);
 scala.collection.immutable.Map<TopicPartition, Seq<Object>> newAssignment = new scala.collection.immutable.HashMap<>();
 scala.collection.Iterator<scala.Tuple2<Object, scala.collection.Seq<Object>>> it = assignedReplicas.iterator();
 while (it.hasNext()) {
  scala.Tuple2<Object, scala.collection.Seq<Object>> scalaTuple = it.next();
  TopicPartition tp = new TopicPartition(topic, (Integer) scalaTuple._1);
  newAssignment = newAssignment.$plus(new scala.Tuple2<>(tp, scalaTuple._2));
 }
 scala.collection.immutable.Set<String> topicList = new scala.collection.immutable.Set.Set1<>(topic);
 scala.collection.Map<Object, scala.collection.Seq<Object>> currentAssignment = zkClient.getPartitionAssignmentForTopics(topicList).apply(topic);
 String currentAssignmentJson = formatAsReassignmentJson(topic, currentAssignment);
 String newAssignmentJson = formatAsReassignmentJson(topic, assignedReplicas);
 LOG.info("Reassign partitions for topic " + topic);
 LOG.info("Current partition replica assignment " + currentAssignmentJson);
 LOG.info("New partition replica assignment " + newAssignmentJson);
 zkClient.createPartitionReassignment(newAssignment);
}

代码示例来源:origin: apache/incubator-gobblin

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : KafkaWrapper.this.getBrokers()) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
       partitionMetadata.leader().port());
     break;
    }
   }
   break;
  }
 }
}

代码示例来源:origin: SiftScience/kafka-assigner

@Override
  public Integer apply(Broker broker) {
    return broker.id();
  }
}));

代码示例来源:origin: linkedin/camus

partitionMetadata.leader().id());
if (offsetRequestInfo.containsKey(leader)) {
 ArrayList<TopicAndPartition> topicAndPartitions = offsetRequestInfo.get(leader);

代码示例来源:origin: pinterest/doctorkafka

/**
 *   return the list of brokers that do not have stats
 */
public List<Broker> getNoStatsBrokers() {
 Seq<Broker> brokerSeq = zkUtils.getAllBrokersInCluster();
 List<Broker> brokers = scala.collection.JavaConverters.seqAsJavaList(brokerSeq);
 List<Broker> noStatsBrokers = new ArrayList<>();
 brokers.stream().forEach(broker -> {
  if (kafkaCluster.getBroker(broker.id()) == null) {
   noStatsBrokers.add(broker);
  }
 });
 return noStatsBrokers;
}

代码示例来源:origin: HomeAdvisor/Kafdrop

private TopicPartitionVO parsePartitionMetadata(String topic, PartitionMetadata pmd)
{
 TopicPartitionVO partition = new TopicPartitionVO(pmd.partitionId());
 if (pmd.leader() != null)
 {
   partition.addReplica(new TopicPartitionVO.PartitionReplica(pmd.leader().id(), true, true));
 }
 final List<Integer> isr = getIsr(topic, pmd);
 pmd.replicas().stream()
   .map(replica -> new TopicPartitionVO.PartitionReplica(replica.id(), isr.contains(replica.id()), false))
   .forEach(partition::addReplica);
 return partition;
}

代码示例来源:origin: SiftScience/kafka-assigner

private Map<Integer, String> getRackAssignment(ZkUtils zkUtils) {
  List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
  Map<Integer, String> rackAssignment = Maps.newHashMap();
  if (!disableRackAwareness) {
    for (Broker broker : brokers) {
      scala.Option<String> rack = broker.rack();
      if (rack.isDefined()) {
        rackAssignment.put(broker.id(), rack.get());
      }
    }
  }
  return rackAssignment;
}

代码示例来源:origin: shunfei/DCMonitor

@Override
 public BrokerInfo apply(Broker input) {
  BrokerInfo info = new BrokerInfo();
  info.host = input.host();
  info.port = input.port();
  info.id = input.id();
  return info;
 }
}

代码示例来源:origin: org.apache.flink/flink-connector-kafka-0.8_2.11

/**
 * Turn a broker instance into a node instance.
 *
 * @param broker broker instance
 * @return Node representing the given broker
 */
private static Node brokerToNode(Broker broker) {
  return new Node(broker.id(), broker.host(), broker.port());
}

代码示例来源:origin: org.apache.flink/flink-connector-kafka-0.8

/**
 * Turn a broker instance into a node instance.
 *
 * @param broker broker instance
 * @return Node representing the given broker
 */
private static Node brokerToNode(Broker broker) {
  return new Node(broker.id(), broker.host(), broker.port());
}

代码示例来源:origin: org.apache.flink/flink-connector-kafka-0.8_2.10

/**
 * Turn a broker instance into a node instance
 * @param broker broker instance
 * @return Node representing the given broker
 */
private static Node brokerToNode(Broker broker) {
  return new Node(broker.id(), broker.host(), broker.port());
}

代码示例来源:origin: HomeAdvisor/Kafdrop

private Integer offsetManagerBroker(BlockingChannel channel, String groupId)
{
 final ConsumerMetadataRequest request =
   new ConsumerMetadataRequest(groupId, (short) 0, 0, clientId());
 LOG.debug("Sending consumer metadata request: {}", request);
 channel.send(request);
 ConsumerMetadataResponse response =
   ConsumerMetadataResponse.readFrom(channel.receive().buffer());
 LOG.debug("Received consumer metadata response: {}", response);
 return (response.errorCode() == ErrorMapping.NoError()) ? response.coordinator().id() : null;
}

代码示例来源:origin: org.apache.gobblin/gobblin-kafka-08

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : KafkaWrapper.this.getBrokers()) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
       partitionMetadata.leader().port());
     break;
    }
   }
   break;
  }
 }
}

代码示例来源:origin: com.linkedin.gobblin/gobblin-kafka-08

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : KafkaWrapper.this.getBrokers()) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
       partitionMetadata.leader().port());
     break;
    }
   }
   break;
  }
 }
}

相关文章