kafka.javaapi.TopicMetadata.topic()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(13.1k)|赞(0)|评价(0)|浏览(153)

本文整理了Java中kafka.javaapi.TopicMetadata.topic()方法的一些代码示例,展示了TopicMetadata.topic()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TopicMetadata.topic()方法的具体详情如下:
包路径:kafka.javaapi.TopicMetadata
类名称:TopicMetadata
方法名:topic

TopicMetadata.topic介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-gobblin

private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, List<Pattern> blacklist,
  List<Pattern> whitelist) {
 List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker);
 if (topicMetadataList == null) {
  return null;
 }
 List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList();
 for (TopicMetadata topicMetadata : topicMetadataList) {
  if (DatasetFilterUtils.survived(topicMetadata.topic(), blacklist, whitelist)) {
   filteredTopicMetadataList.add(topicMetadata);
  }
 }
 return filteredTopicMetadataList;
}

代码示例来源:origin: apache/incubator-gobblin

@Override
public List<KafkaTopic> getTopics() {
 List<TopicMetadata> topicMetadataList = getFilteredMetadataList();
 List<KafkaTopic> filteredTopics = Lists.newArrayList();
 for (TopicMetadata topicMetadata : topicMetadataList) {
  List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata);
  filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions));
 }
 return filteredTopics;
}

代码示例来源:origin: apache/incubator-gobblin

@Override
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) {
 List<TopicMetadata> topicMetadataList = getFilteredMetadataList(blacklist, whitelist);
 List<KafkaTopic> filteredTopics = Lists.newArrayList();
 for (TopicMetadata topicMetadata : topicMetadataList) {
  List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata);
  filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions));
 }
 return filteredTopics;
}

代码示例来源:origin: apache/flink

topics.add(item.topic());

代码示例来源:origin: apache/flink

if (!topics.contains(item.topic())) {
  LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");
  KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
  KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
  partitions.add(pInfo);

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
     + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   LOG.error(
     "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: prestodb/presto

log.debug("Adding Partition %s/%s", metadata.topic(), part.partitionId());
  throw new PrestoException(GENERIC_INTERNAL_ERROR, format("Leader election in progress for Kafka topic '%s' partition %s", metadata.topic(), part.partitionId()));
long[] offsets = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId());
      metadata.topic(),
      kafkaTableHandle.getKeyDataFormat(),
      kafkaTableHandle.getMessageDataFormat(),

代码示例来源:origin: apache/incubator-druid

if (topic.equals(item.topic())) {
 for (PartitionMetadata part : item.partitionsMetadata()) {
  if (part.partitionId() == partitionId) {

代码示例来源:origin: linkedin/camus

public List<TopicMetadata> filterWhitelistTopics(List<TopicMetadata> topicMetadataList,
  HashSet<String> whiteListTopics) {
 ArrayList<TopicMetadata> filteredTopics = new ArrayList<TopicMetadata>();
 String regex = createTopicRegEx(whiteListTopics);
 for (TopicMetadata topicMetadata : topicMetadataList) {
  if (Pattern.matches(regex, topicMetadata.topic())) {
   filteredTopics.add(topicMetadata);
  } else {
   log.info("Discarding topic : " + topicMetadata.topic());
  }
 }
 return filteredTopics;
}

代码示例来源:origin: rakam-io/rakam

private Map<String, Long> getTopicOffsets(List<String> topics) {
    ArrayList<HostAndPort> nodes = new ArrayList<>(config.getNodes());
    Collections.shuffle(nodes);

    SimpleConsumer simpleConsumer = consumerManager.getConsumer(nodes.get(0));
    TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(topics);
    TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);

    ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();

    for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
      for (PartitionMetadata part : metadata.partitionsMetadata()) {
        LOGGER.debug(format("Adding Partition %s/%s", metadata.topic(), part.partitionId()));
        Broker leader = part.leader();
        if (leader == null) { // Leader election going on...
          LOGGER.warn(format("No leader for partition %s/%s found!", metadata.topic(), part.partitionId()));
        } else {
          HostAndPort leaderHost = HostAndPort.fromParts(leader.host(), leader.port());
          SimpleConsumer leaderConsumer = consumerManager.getConsumer(leaderHost);

          long offset = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId())[0];
          builder.put(metadata.topic(), offset);
        }
      }
    }

    return builder.build();
  }
}

代码示例来源:origin: linkedin/camus

log.info("Retry to referesh the topicMetadata on LeaderNotAvailable...");
List<TopicMetadata> topicMetadataList =
  this.getKafkaMetadata(context, Collections.singletonList(topicMetadata.topic()));
if (topicMetadataList == null || topicMetadataList.size() == 0) {
 log.warn("The topicMetadataList for topic " + topicMetadata.topic() + " is empty.");
} else {
 topicMetadata = topicMetadataList.get(0);

代码示例来源:origin: linkedin/camus

private TopicMetadataResponse mockTopicMetaDataResponse() {
 PartitionMetadata pMeta = EasyMock.createMock(PartitionMetadata.class);
 mocks.add(pMeta);
 EasyMock.expect(pMeta.errorCode()).andReturn((short)0).anyTimes();
 Broker broker = new Broker(0, "localhost", 2121);
 EasyMock.expect(pMeta.leader()).andReturn(broker).anyTimes();
 EasyMock.expect(pMeta.partitionId()).andReturn(PARTITION_1_ID).anyTimes();
 List<PartitionMetadata> partitionMetadatas = new ArrayList<PartitionMetadata>();
 partitionMetadatas.add(pMeta);    
 TopicMetadata tMeta = EasyMock.createMock(TopicMetadata.class);
 mocks.add(tMeta);
 EasyMock.expect(tMeta.topic()).andReturn(TOPIC_1).anyTimes();
 EasyMock.expect(tMeta.errorCode()).andReturn((short)0).anyTimes();
 EasyMock.expect(tMeta.partitionsMetadata()).andReturn(partitionMetadatas).anyTimes();
 List<TopicMetadata> topicMetadatas = new ArrayList<TopicMetadata>();
 topicMetadatas.add(tMeta);
 TopicMetadataResponse metadataResponse = EasyMock.createMock(TopicMetadataResponse.class);
 mocks.add(metadataResponse);
 EasyMock.expect(metadataResponse.topicsMetadata()).andReturn(topicMetadatas).anyTimes();
 return metadataResponse;
}

代码示例来源:origin: uber/chaperone

private void updateLeaderMap() {
 for (String broker : brokerList) {
  try {
   SimpleConsumer consumer = getSimpleConsumer(broker);
   TopicMetadataRequest req = new TopicMetadataRequest(auditTopics);
   kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
   List<TopicMetadata> metaData = resp.topicsMetadata();
   for (TopicMetadata tmd : metaData) {
    for (PartitionMetadata pmd : tmd.partitionsMetadata()) {
     TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(), pmd.partitionId());
     partitionLeader.put(topicAndPartition, getHostPort(pmd.leader()));
    }
   }
  } catch (Exception e) {
   logger.warn("Got exception to get metadata from broker=" + broker, e);
  }
 }
}

代码示例来源:origin: linkedin/camus

/**
 * Test only refreshing the paritionMetadata when the error code is LeaderNotAvailable.
 * @throws Exception 
 */
@Test
public void testRefreshPartitioMetadataWithThreeRetries() throws Exception {
 JobContext dummyContext = null;
 //A partitionMetadata with errorCode LeaderNotAvailable
 PartitionMetadata partitionMetadata = createMock(PartitionMetadata.class);
 expect(partitionMetadata.errorCode()).andReturn(ErrorMapping.LeaderNotAvailableCode()).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2);
 expect(partitionMetadata.partitionId()).andReturn(0).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA * 2);
 replay(partitionMetadata);
 TopicMetadata mockedTopicMetadata = createMock(TopicMetadata.class);
 expect(mockedTopicMetadata.topic()).andReturn("testTopic").times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 expect(mockedTopicMetadata.partitionsMetadata()).andReturn(Collections.singletonList(partitionMetadata)).times(
   EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 replay(mockedTopicMetadata);
 EtlInputFormat etlInputFormat =
   createMock(EtlInputFormat.class,
     EtlInputFormat.class.getMethod("getKafkaMetadata", new Class[] { JobContext.class, List.class }));
 EasyMock.expect(etlInputFormat.getKafkaMetadata(dummyContext, Collections.singletonList("testTopic"))).andReturn(
   Collections.singletonList(mockedTopicMetadata)).times(EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 etlInputFormat.setLogger(Logger.getLogger(getClass()));
 replay(etlInputFormat);
 etlInputFormat.refreshPartitionMetadataOnLeaderNotAvailable(partitionMetadata, mockedTopicMetadata, dummyContext,
   EtlInputFormat.NUM_TRIES_PARTITION_METADATA);
 
 verify(mockedTopicMetadata);
 verify(etlInputFormat);
}

代码示例来源:origin: linkedin/camus

if (Pattern.matches(regex, topicMetadata.topic())) {
 log.info("Discarding topic (blacklisted): " + topicMetadata.topic());
} else if (!createMessageDecoder(context, topicMetadata.topic())) {
 log.info("Discarding topic (Decoder generation failed) : " + topicMetadata.topic());
} else if (topicMetadata.errorCode() != ErrorMapping.NoError()) {
 log.info("Skipping the creation of ETL request for Whole Topic : " + topicMetadata.topic() + " Exception : "
   + ErrorMapping.exceptionFor(topicMetadata.errorCode()));
} else {
   log.info("Skipping the creation of ETL request for Topic : " + topicMetadata.topic()
     + " and Partition : " + partitionMetadata.partitionId() + " Exception : "
     + ErrorMapping.exceptionFor(partitionMetadata.errorCode()));
   if (partitionMetadata.errorCode() != ErrorMapping.NoError()) {
    log.warn("Receiving non-fatal error code, Continuing the creation of ETL request for Topic : "
      + topicMetadata.topic() + " and Partition : " + partitionMetadata.partitionId() + " Exception : "
      + ErrorMapping.exceptionFor(partitionMetadata.errorCode()));
   if (offsetRequestInfo.containsKey(leader)) {
    ArrayList<TopicAndPartition> topicAndPartitions = offsetRequestInfo.get(leader);
    topicAndPartitions.add(new TopicAndPartition(topicMetadata.topic(), partitionMetadata.partitionId()));
    offsetRequestInfo.put(leader, topicAndPartitions);
   } else {
    ArrayList<TopicAndPartition> topicAndPartitions = new ArrayList<TopicAndPartition>();
    topicAndPartitions.add(new TopicAndPartition(topicMetadata.topic(), partitionMetadata.partitionId()));
    offsetRequestInfo.put(leader, topicAndPartitions);

代码示例来源:origin: linkedin/camus

expect(mockedTopicMetadata.topic()).andReturn("testTopic");
expect(mockedTopicMetadata.partitionsMetadata()).andReturn(
  Collections.singletonList(mockedReturnedPartitionMetadata));

代码示例来源:origin: Microsoft/Availability-Monitor-for-Kafka

@Override
  public int compare(TopicMetadata t1, TopicMetadata t2) {
    return (t1.topic()).compareToIgnoreCase((t2.topic()));
  }
}

代码示例来源:origin: com.linkedin.camus/camus-etl-kafka

public List<TopicMetadata> filterWhitelistTopics(List<TopicMetadata> topicMetadataList,
  HashSet<String> whiteListTopics) {
 ArrayList<TopicMetadata> filteredTopics = new ArrayList<TopicMetadata>();
 String regex = createTopicRegEx(whiteListTopics);
 for (TopicMetadata topicMetadata : topicMetadataList) {
  if (Pattern.matches(regex, topicMetadata.topic())) {
   filteredTopics.add(topicMetadata);
  } else {
   log.info("Discarding topic : " + topicMetadata.topic());
  }
 }
 return filteredTopics;
}

代码示例来源:origin: Microsoft/Availability-Monitor-for-Kafka

@Override
  public String toString() {
    StringBuilder result = new StringBuilder();
    String NEW_LINE = System.getProperty("line.separator");

    result.append(this.getClass().getName() + " Object {" + NEW_LINE);
    result.append(" TopicName: " + m_TopicMetadata.topic() + NEW_LINE);
    result.append(" PartitionId: " + m_PartitionMetadata.partitionId() + NEW_LINE);
    result.append("}");

    return result.toString();
  }
}

相关文章

微信公众号

最新文章

更多