本文整理了Java中kafka.javaapi.TopicMetadata
类的一些代码示例,展示了TopicMetadata
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TopicMetadata
类的具体详情如下:
包路径:kafka.javaapi.TopicMetadata
类名称:TopicMetadata
暂无
代码示例来源:origin: apache/incubator-gobblin
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
List<KafkaPartition> partitions = Lists.newArrayList();
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (null == partitionMetadata) {
log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
return Collections.emptyList();
}
if (null == partitionMetadata.leader()) {
log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
+ partitionMetadata);
return Collections.emptyList();
}
partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
.withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
.withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
}
return partitions;
}
代码示例来源:origin: apache/flink
for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) {
if (item.errorCode() != ErrorMapping.NoError()) {
seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());
topics.add(item.topic());
代码示例来源:origin: apache/incubator-pinot
topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(_topic)));
} catch (Exception e) {
_currentState.handleConsumerException(e);
final short errorCode = topicMetadata.errorCode();
return topicMetadata.partitionsMetadata().size();
} else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
代码示例来源:origin: apache/apex-malhar
private void initializeLastProcessingOffset()
{
// read last received kafka message
TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
if (tm == null) {
throw new RuntimeException("Failed to retrieve topic metadata");
}
partitionNum = tm.partitionsMetadata().size();
lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);
for (PartitionMetadata pm : tm.partitionsMetadata()) {
String leadBroker = pm.leader().host();
int port = pm.leader().port();
String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
Message m = messageAndOffset.message();
ByteBuffer payload = m.payload();
ByteBuffer key = m.key();
byte[] valueBytes = new byte[payload.limit()];
byte[] keyBytes = new byte[key.limit()];
payload.get(valueBytes);
key.get(keyBytes);
lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
}
}
}
代码示例来源:origin: linkedin/camus
private TopicMetadataResponse mockTopicMetaDataResponse() {
PartitionMetadata pMeta = EasyMock.createMock(PartitionMetadata.class);
mocks.add(pMeta);
EasyMock.expect(pMeta.errorCode()).andReturn((short)0).anyTimes();
Broker broker = new Broker(0, "localhost", 2121);
EasyMock.expect(pMeta.leader()).andReturn(broker).anyTimes();
EasyMock.expect(pMeta.partitionId()).andReturn(PARTITION_1_ID).anyTimes();
List<PartitionMetadata> partitionMetadatas = new ArrayList<PartitionMetadata>();
partitionMetadatas.add(pMeta);
TopicMetadata tMeta = EasyMock.createMock(TopicMetadata.class);
mocks.add(tMeta);
EasyMock.expect(tMeta.topic()).andReturn(TOPIC_1).anyTimes();
EasyMock.expect(tMeta.errorCode()).andReturn((short)0).anyTimes();
EasyMock.expect(tMeta.partitionsMetadata()).andReturn(partitionMetadatas).anyTimes();
List<TopicMetadata> topicMetadatas = new ArrayList<TopicMetadata>();
topicMetadatas.add(tMeta);
TopicMetadataResponse metadataResponse = EasyMock.createMock(TopicMetadataResponse.class);
mocks.add(metadataResponse);
EasyMock.expect(metadataResponse.topicsMetadata()).andReturn(topicMetadatas).anyTimes();
return metadataResponse;
}
代码示例来源:origin: org.apache.flink/flink-connector-kafka-0.8_2.10
SimpleConsumer consumer = null;
try {
consumer = new SimpleConsumer(brokerUrl.getHost(), brokerUrl.getPort(), soTimeout, bufferSize, clientId);
kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
if (item.errorCode() != ErrorMapping.NoError()) {
"for " + topics.toString() + ". Error: " + ErrorMapping.exceptionFor(item.errorCode()).getMessage());
continue brokersLoop;
if (!topics.contains(item.topic())) {
LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");
continue brokersLoop;
for (PartitionMetadata part : item.partitionsMetadata()) {
Node leader = brokerToNode(part.leader());
KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
partitions.add(pInfo);
consumer.close();
代码示例来源:origin: apache/incubator-gobblin
private void refreshTopicMetadata(KafkaPartition partition) {
for (String broker : this.brokers) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
TopicMetadata topicMetadata = topicMetadataList.get(0);
for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
if (partitionMetadata.partitionId() == partition.getId()) {
partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata
.leader().port());
break;
}
}
break;
}
}
}
代码示例来源:origin: apache/crunch
final SimpleConsumer consumer = getSimpleConsumer(broker);
try {
topicMetadataResponse = consumer.send(topicMetadataRequest);
break;
} catch (Exception err) {
Arrays.toString(topics), endpoint.host()), err);
} finally {
consumer.close();
for (PartitionMetadata partition : metadata.partitionsMetadata()) {
Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
new HashMap<>();
BrokerEndPoint brokerEndPoint = partition.leader();
if(brokerEndPoint == null){
throw new CrunchRuntimeException("Unable to find leader for topic:"+metadata.topic()
+" partition:"+partition.partitionId());
requestInfo = brokerRequests.get(leader);
requestInfo.put(new TopicAndPartition(metadata.topic(), partition.partitionId()), new PartitionOffsetRequestInfo(
time, 1));
代码示例来源:origin: linkedin/camus
public PartitionMetadata refreshPartitionMetadataOnLeaderNotAvailable(PartitionMetadata partitionMetadata,
TopicMetadata topicMetadata, JobContext context, int numTries) throws InterruptedException {
int tryCounter = 0;
while (tryCounter < numTries && partitionMetadata.errorCode() == ErrorMapping.LeaderNotAvailableCode()) {
log.info("Retry to referesh the topicMetadata on LeaderNotAvailable...");
List<TopicMetadata> topicMetadataList =
this.getKafkaMetadata(context, Collections.singletonList(topicMetadata.topic()));
if (topicMetadataList == null || topicMetadataList.size() == 0) {
log.warn("The topicMetadataList for topic " + topicMetadata.topic() + " is empty.");
} else {
topicMetadata = topicMetadataList.get(0);
boolean partitionFound = false;
for (PartitionMetadata metadataPerPartition : topicMetadata.partitionsMetadata()) {
if (metadataPerPartition.partitionId() == partitionMetadata.partitionId()) {
partitionFound = true;
if (metadataPerPartition.errorCode() != ErrorMapping.LeaderNotAvailableCode()) {
代码示例来源:origin: pinterest/secor
@Override
public int getNumPartitions(String topic) {
SimpleConsumer consumer = null;
try {
consumer = createConsumer(
mConfig.getKafkaSeedBrokerHost(),
mConfig.getKafkaSeedBrokerPort(),
"partitionLookup");
List<String> topics = new ArrayList<String>();
topics.add(topic);
TopicMetadataRequest request = new TopicMetadataRequest(topics);
TopicMetadataResponse response = consumer.send(request);
if (response.topicsMetadata().size() != 1) {
throw new RuntimeException("Expected one metadata for topic " + topic + " found " +
response.topicsMetadata().size());
}
TopicMetadata topicMetadata = response.topicsMetadata().get(0);
return topicMetadata.partitionsMetadata().size();
} finally {
if (consumer != null) {
consumer.close();
}
}
}
代码示例来源:origin: Microsoft/Availability-Monitor-for-Kafka
if (Pattern.matches(regex, item.topic())) {
m_logger.debug("Discarding topic (blacklisted): " + item.topic());
continue;
for (PartitionMetadata part : item.partitionsMetadata())
if (!exploredTopicPartition.contains(new TopicPartition(item.topic(), part.partitionId())))
part.partitionId(),
Option.apply(part.leader()),
JavaConversions.asScalaBuffer(part.replicas()).toList(),
JavaConversions.asScalaBuffer(part.isr()).toList(),
part.errorCode());
pml.add(pm);
exploredTopicPartition.add(new TopicPartition(item.topic(), part.partitionId()));
item.topic(),
JavaConversions.asScalaBuffer(pml).toList(),
item.errorCode());
ret.add(new kafka.javaapi.TopicMetadata(tm));
代码示例来源:origin: Microsoft/Availability-Monitor-for-Kafka
for (kafka.javaapi.TopicMetadata item : data)
m_logger.info("Topic: " + item.topic());
for (kafka.javaapi.PartitionMetadata part : item.partitionsMetadata())
for (kafka.cluster.Broker replica : part.replicas())
for (kafka.cluster.Broker replica : part.isr())
if (part.leader() != null)
if (part.leader().host() != null)
代码示例来源:origin: Stratio/Decision
@Override
public Integer getNumPartitionsForTopic(String topic){
TopicMetadataRequest topicRequest = new TopicMetadataRequest(Arrays.asList(topic));
TopicMetadataResponse topicResponse = simpleConsumer.send(topicRequest);
for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) {
if (topic.equals(topicMetadata.topic())) {
int partitionSize = topicMetadata.partitionsMetadata().size();
logger.debug("Partition size found ({}) for {} topic", partitionSize, topic);
return partitionSize;
}
}
logger.warn("Metadata info not found!. TOPIC {}", topic);
return null;
}
代码示例来源:origin: Microsoft/Availability-Monitor-for-Kafka
rString.append(sep).append(topic.topic() + "-->" + client);
numPartitionsConsumers += topic.partitionsMetadata().size();
m_logger.info("Reading from Topic: {};", item.topic());
final SlidingWindowReservoir topicLatency = new SlidingWindowReservoir(item.partitionsMetadata().size());
Histogram histogramConsumerTopicLatency = new Histogram(topicLatency);
MetricNameEncoded consumerTopicLatency = metricNameFactory.createWithTopic("Consumer.Latency", item.topic());
if (!metrics.getNames().contains(new Gson().toJson(consumerTopicLatency))) {
if (appProperties.sendConsumerTopicLatency)
for (kafka.javaapi.PartitionMetadata part : item.partitionsMetadata()) {
m_logger.debug("Reading from Topic: {}; Partition: {};", item.topic(), part.partitionId());
Future<Long> future = newFixedThreadPool.submit(new JobManager(consumerPartitionTimeoutInSeconds, TimeUnit.SECONDS, consumerPartitionJob, "Consumer-" + item.topic() + "-P#" + part.partitionId()));
CommonUtils.shutdownAndAwaitTermination(newFixedThreadPool, item.topic());
m_logger.error("Error Reading from Topic: {}; Partition: {}; Exception: {}", item.topic(), key, e);
MetricNameEncoded consumerPartitionLatency = metricNameFactory.createWithPartition("Consumer.Latency", item.topic() + "##" + key);
Histogram histogramConsumerPartitionLatency = new Histogram(new SlidingWindowReservoir(1));
if (!metrics.getNames().contains(new Gson().toJson(consumerPartitionLatency))) {
histogramConsumerLatency.update(elapsedTime);
if (appProperties.sendConsumerPartitionAvailability) {
MetricNameEncoded consumerPartitionAvailability = metricNameFactory.createWithPartition("Consumer.Availability", item.topic() + "##" + key);
if (!metrics.getNames().contains(new Gson().toJson(consumerPartitionAvailability))) {
代码示例来源:origin: apache/incubator-gobblin
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, List<Pattern> blacklist,
List<Pattern> whitelist) {
List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker);
if (topicMetadataList == null) {
return null;
}
List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList();
for (TopicMetadata topicMetadata : topicMetadataList) {
if (DatasetFilterUtils.survived(topicMetadata.topic(), blacklist, whitelist)) {
filteredTopicMetadataList.add(topicMetadata);
}
}
return filteredTopicMetadataList;
}
代码示例来源:origin: Microsoft/Availability-Monitor-for-Kafka
@Override
public String toString() {
StringBuilder result = new StringBuilder();
String NEW_LINE = System.getProperty("line.separator");
result.append(this.getClass().getName() + " Object {" + NEW_LINE);
result.append(" TopicName: " + m_TopicMetadata.topic() + NEW_LINE);
result.append(" PartitionId: " + m_PartitionMetadata.partitionId() + NEW_LINE);
result.append("}");
return result.toString();
}
}
代码示例来源:origin: HomeAdvisor/Kafdrop
private TopicVO processTopicMetadata(TopicMetadata tmd)
{
TopicVO topic = new TopicVO(tmd.topic());
topic.setConfig(
Optional.ofNullable(topicConfigPathCache.getCurrentData(ZkUtils.TopicConfigPath() + "/" + topic.getName()))
.map(this::readTopicConfig)
.orElse(Collections.emptyMap()));
topic.setPartitions(
tmd.partitionsMetadata().stream()
.map((pmd) -> parsePartitionMetadata(tmd.topic(), pmd))
.collect(Collectors.toMap(TopicPartitionVO::getId, p -> p))
);
return topic;
}
代码示例来源:origin: org.apache.apex/malhar-contrib
/**
* @param brokerList brokers in same cluster
* @param topic
* @return Get the partition metadata list for the specific topic via the brokerList <br>
* null if topic is not found
*/
public static List<PartitionMetadata> getPartitionsForTopic(Set<String> brokerList, String topic)
{
TopicMetadata tmd = getTopicMetadata(brokerList, topic);
if (tmd == null) {
return null;
}
return tmd.partitionsMetadata();
}
代码示例来源:origin: HomeAdvisor/Kafdrop
private Map<String, TopicVO> getTopicMetadata(BlockingChannel channel, String... topics)
{
final TopicMetadataRequest request =
new TopicMetadataRequest((short) 0, 0, clientId(), Arrays.asList(topics));
LOG.debug("Sending topic metadata request: {}", request);
channel.send(request);
final kafka.api.TopicMetadataResponse underlyingResponse =
kafka.api.TopicMetadataResponse.readFrom(channel.receive().buffer());
LOG.debug("Received topic metadata response: {}", underlyingResponse);
TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse);
return response.topicsMetadata().stream()
.filter(tmd -> tmd.errorCode() == ErrorMapping.NoError())
.map(this::processTopicMetadata)
.collect(Collectors.toMap(TopicVO::getName, t -> t));
}
代码示例来源:origin: org.apache.apex/malhar-contrib
private void initializeLastProcessingOffset()
{
// read last received kafka message
TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());
if (tm == null) {
throw new RuntimeException("Failed to retrieve topic metadata");
}
partitionNum = tm.partitionsMetadata().size();
lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);
for (PartitionMetadata pm : tm.partitionsMetadata()) {
String leadBroker = pm.leader().host();
int port = pm.leader().port();
String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);
long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);
FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();
FetchResponse fetchResponse = consumer.fetch(req);
for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {
Message m = messageAndOffset.message();
ByteBuffer payload = m.payload();
ByteBuffer key = m.key();
byte[] valueBytes = new byte[payload.limit()];
byte[] keyBytes = new byte[key.limit()];
payload.get(valueBytes);
key.get(keyBytes);
lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
}
}
}
内容来源于网络,如有侵权,请联系作者删除!