kafka.javaapi.TopicMetadata.partitionsMetadata()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(12.1k)|赞(0)|评价(0)|浏览(74)

本文整理了Java中kafka.javaapi.TopicMetadata.partitionsMetadata()方法的一些代码示例,展示了TopicMetadata.partitionsMetadata()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TopicMetadata.partitionsMetadata()方法的具体详情如下:
包路径:kafka.javaapi.TopicMetadata
类名称:TopicMetadata
方法名:partitionsMetadata

TopicMetadata.partitionsMetadata介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-pinot

try {
 _leader = null;
 List<PartitionMetadata> pMetaList = response.topicsMetadata().get(0).partitionsMetadata();
 for (PartitionMetadata pMeta : pMetaList) {
  if (pMeta.partitionId() == _partition) {

代码示例来源:origin: pinterest/secor

@Override
public int getNumPartitions(String topic) {
  SimpleConsumer consumer = null;
  try {
    consumer = createConsumer(
      mConfig.getKafkaSeedBrokerHost(),
      mConfig.getKafkaSeedBrokerPort(),
      "partitionLookup");
    List<String> topics = new ArrayList<String>();
    topics.add(topic);
    TopicMetadataRequest request = new TopicMetadataRequest(topics);
    TopicMetadataResponse response = consumer.send(request);
    if (response.topicsMetadata().size() != 1) {
      throw new RuntimeException("Expected one metadata for topic " + topic + " found " +
        response.topicsMetadata().size());
    }
    TopicMetadata topicMetadata = response.topicsMetadata().get(0);
    return topicMetadata.partitionsMetadata().size();
  } finally {
    if (consumer != null) {
      consumer.close();
    }
  }
}

代码示例来源:origin: apache/flink

for (PartitionMetadata part : item.partitionsMetadata()) {
  Node leader = brokerToNode(part.leader());
  KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());

代码示例来源:origin: alibaba/jstorm

for (PartitionMetadata part : item.partitionsMetadata()) {
  if (part.partitionId() == partition) {
    returnMetaData = part;

代码示例来源:origin: apache/incubator-pinot

return topicMetadata.partitionsMetadata().size();
} else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
     + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   LOG.error(
     "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: apache/incubator-gobblin

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : this.brokers) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata
       .leader().port());
     break;
    }
   }
   break;
  }
 }
}

代码示例来源:origin: apache/incubator-gobblin

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : KafkaWrapper.this.getBrokers()) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
       partitionMetadata.leader().port());
     break;
    }
   }
   break;
  }
 }
}

代码示例来源:origin: apache/incubator-druid

for (TopicMetadata item : metaData) {
 if (topic.equals(item.topic())) {
  for (PartitionMetadata part : item.partitionsMetadata()) {
   if (part.partitionId() == partitionId) {
    return part;

代码示例来源:origin: pinterest/secor

private HostAndPort findLeader(TopicPartition topicPartition) {
  SimpleConsumer consumer = null;
  try {
    LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition());
    consumer = createConsumer(
      mConfig.getKafkaSeedBrokerHost(),
      mConfig.getKafkaSeedBrokerPort(),
      "leaderLookup");
    List<String> topics = new ArrayList<String>();
    topics.add(topicPartition.getTopic());
    TopicMetadataRequest request = new TopicMetadataRequest(topics);
    TopicMetadataResponse response = consumer.send(request);
    List<TopicMetadata> metaData = response.topicsMetadata();
    for (TopicMetadata item : metaData) {
      for (PartitionMetadata part : item.partitionsMetadata()) {
        if (part.partitionId() == topicPartition.getPartition()) {
          return HostAndPort.fromParts(part.leader().host(), part.leader().port());
        }
      }
    }
  } finally {
    if (consumer != null) {
      consumer.close();
    }
  }
  return null;
}

代码示例来源:origin: prestodb/presto

for (PartitionMetadata part : metadata.partitionsMetadata()) {
  log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());

代码示例来源:origin: linkedin/camus

topicMetadata = topicMetadataList.get(0);
boolean partitionFound = false;
for (PartitionMetadata metadataPerPartition : topicMetadata.partitionsMetadata()) {
 if (metadataPerPartition.partitionId() == partitionMetadata.partitionId()) {
  partitionFound = true;

代码示例来源:origin: rakam-io/rakam

private Map<String, Long> getTopicOffsets(List<String> topics) {
    ArrayList<HostAndPort> nodes = new ArrayList<>(config.getNodes());
    Collections.shuffle(nodes);

    SimpleConsumer simpleConsumer = consumerManager.getConsumer(nodes.get(0));
    TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(topics);
    TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);

    ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();

    for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
      for (PartitionMetadata part : metadata.partitionsMetadata()) {
        LOGGER.debug(format("Adding Partition %s/%s", metadata.topic(), part.partitionId()));
        Broker leader = part.leader();
        if (leader == null) { // Leader election going on...
          LOGGER.warn(format("No leader for partition %s/%s found!", metadata.topic(), part.partitionId()));
        } else {
          HostAndPort leaderHost = HostAndPort.fromParts(leader.host(), leader.port());
          SimpleConsumer leaderConsumer = consumerManager.getConsumer(leaderHost);

          long offset = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId())[0];
          builder.put(metadata.topic(), offset);
        }
      }
    }

    return builder.build();
  }
}

代码示例来源:origin: uber/chaperone

private void updateLeaderMap() {
 for (String broker : brokerList) {
  try {
   SimpleConsumer consumer = getSimpleConsumer(broker);
   TopicMetadataRequest req = new TopicMetadataRequest(auditTopics);
   kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
   List<TopicMetadata> metaData = resp.topicsMetadata();
   for (TopicMetadata tmd : metaData) {
    for (PartitionMetadata pmd : tmd.partitionsMetadata()) {
     TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(), pmd.partitionId());
     partitionLeader.put(topicAndPartition, getHostPort(pmd.leader()));
    }
   }
  } catch (Exception e) {
   logger.warn("Got exception to get metadata from broker=" + broker, e);
  }
 }
}

代码示例来源:origin: linkedin/camus

private TopicMetadataResponse mockTopicMetaDataResponse() {
 PartitionMetadata pMeta = EasyMock.createMock(PartitionMetadata.class);
 mocks.add(pMeta);
 EasyMock.expect(pMeta.errorCode()).andReturn((short)0).anyTimes();
 Broker broker = new Broker(0, "localhost", 2121);
 EasyMock.expect(pMeta.leader()).andReturn(broker).anyTimes();
 EasyMock.expect(pMeta.partitionId()).andReturn(PARTITION_1_ID).anyTimes();
 List<PartitionMetadata> partitionMetadatas = new ArrayList<PartitionMetadata>();
 partitionMetadatas.add(pMeta);    
 TopicMetadata tMeta = EasyMock.createMock(TopicMetadata.class);
 mocks.add(tMeta);
 EasyMock.expect(tMeta.topic()).andReturn(TOPIC_1).anyTimes();
 EasyMock.expect(tMeta.errorCode()).andReturn((short)0).anyTimes();
 EasyMock.expect(tMeta.partitionsMetadata()).andReturn(partitionMetadatas).anyTimes();
 List<TopicMetadata> topicMetadatas = new ArrayList<TopicMetadata>();
 topicMetadatas.add(tMeta);
 TopicMetadataResponse metadataResponse = EasyMock.createMock(TopicMetadataResponse.class);
 mocks.add(metadataResponse);
 EasyMock.expect(metadataResponse.topicsMetadata()).andReturn(topicMetadatas).anyTimes();
 return metadataResponse;
}

代码示例来源:origin: linkedin/camus

private void refreshTopicMetadata() {
 TopicMetadataRequest request = new TopicMetadataRequest(Collections.singletonList(kafkaRequest.getTopic()));
 TopicMetadataResponse response;
 try {
  response = simpleConsumer.send(request);
 } catch (Exception e) {
  log.error("Exception caught when refreshing metadata for topic " + request.topics().get(0) + ": "
    + e.getMessage());
  return;
 }
 TopicMetadata metadata = response.topicsMetadata().get(0);
 for (PartitionMetadata partitionMetadata : metadata.partitionsMetadata()) {
  if (partitionMetadata.partitionId() == kafkaRequest.getPartition()) {
   simpleConsumer =
     new SimpleConsumer(partitionMetadata.leader().host(), partitionMetadata.leader().port(),
       CamusJob.getKafkaTimeoutValue(context), CamusJob.getKafkaBufferSize(context),
       CamusJob.getKafkaClientName(context));
   break;
  }
 }
}

代码示例来源:origin: linkedin/camus

/**
 * Test only refreshing the paritionMetadata when the error code is LeaderNotAvailable.
 * @throws Exception 
 */
@Test
public void testRefreshPartitioMetadataWithThreeRetries() throws Exception {
 JobContext dummyContext = null;
 //A partitionMetadata with errorCode LeaderNotAvailable
 PartitionMetadata partitionMetadata = createMock(PartitionMetadata.class);
 expect(partitionMetadata.errorCode()).andReturn(ErrorMapping.LeaderNotAvailableCode()).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2);
 expect(partitionMetadata.partitionId()).andReturn(0).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2);
 replay(partitionMetadata);
 TopicMetadata mockedTopicMetadata = createMock(TopicMetadata.class);
 expect(mockedTopicMetadata.topic()).andReturn("testTopic").times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 expect(mockedTopicMetadata.partitionsMetadata()).andReturn(Collections.singletonList(partitionMetadata)).times(
   EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 replay(mockedTopicMetadata);
 EtlInputFormat etlInputFormat =
   createMock(EtlInputFormat.class,
     EtlInputFormat.class.getMethod("getKafkaMetadata", new Class[] { JobContext.class, List.class }));
 EasyMock.expect(etlInputFormat.getKafkaMetadata(dummyContext, Collections.singletonList("testTopic"))).andReturn(
   Collections.singletonList(mockedTopicMetadata)).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 etlInputFormat.setLogger(Logger.getLogger(getClass()));
 replay(etlInputFormat);
 etlInputFormat.refreshPartitionMetadataOnLeaderNotAvailable(partitionMetadata, mockedTopicMetadata, dummyContext,
   EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 
 verify(mockedTopicMetadata);
 verify(etlInputFormat);
}

代码示例来源:origin: linkedin/camus

expect(mockedTopicMetadata.partitionsMetadata()).andReturn(
  Collections.singletonList(mockedReturnedPartitionMetadata));
replay(mockedTopicMetadata);

代码示例来源:origin: linkedin/camus

+ ErrorMapping.exceptionFor(topicMetadata.errorCode()));
} else {
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {

相关文章

微信公众号

最新文章

更多