kafka.cluster.Broker.port()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(9.6k)|赞(0)|评价(0)|浏览(96)

本文整理了Java中kafka.cluster.Broker.port()方法的一些代码示例,展示了Broker.port()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Broker.port()方法的具体详情如下:
包路径:kafka.cluster.Broker
类名称:Broker
方法名:port

Broker.port介绍

暂无

代码示例

代码示例来源:origin: apache/incubator-druid

private boolean isValidNewLeader(Broker broker)
{
 // broker is considered valid new leader if it is not the same as old leaderBroker
 return !(leaderBroker.host().equalsIgnoreCase(broker.host()) && leaderBroker.port() == broker.port());
}

代码示例来源:origin: alibaba/jstorm

public void commitState() {
  try {
    long lastOffset = 0;
    if (pendingOffsets.isEmpty() || pendingOffsets.size() <= 0) {
      lastOffset = emittingOffset;
    } else {
      lastOffset = pendingOffsets.first();
    }
    if (lastOffset != lastCommittedOffset) {
      Map<Object, Object> data = new HashMap<Object, Object>();
      data.put("topology", stormConf.get(Config.TOPOLOGY_NAME));
      data.put("offset", lastOffset);
      data.put("partition", partition);
      data.put("broker", ImmutableMap.of("host", consumer.getLeaderBroker().host(), "port", consumer.getLeaderBroker().port()));
      data.put("topic", config.topic);
      zkState.writeJSON(zkPath(), data);
      lastCommittedOffset = lastOffset;
    }
  } catch (Exception e) {
    LOG.error(e.getMessage(), e);
  }
}

代码示例来源:origin: apache/incubator-druid

private void ensureConsumer(Broker leader) throws InterruptedException
{
 if (consumer == null) {
  while (leaderBroker == null) {
   leaderBroker = findNewLeader(leader);
  }
  log.info(
    "making SimpleConsumer[%s][%s], leader broker[%s:%s]",
    topic, partitionId, leaderBroker.host(), leaderBroker.port()
  );
  consumer = new SimpleConsumer(
    leaderBroker.host(), leaderBroker.port(), SO_TIMEOUT_MILLIS, BUFFER_SIZE, clientId
  );
 }
}

代码示例来源:origin: apache/flink

/**
 * Turn a broker instance into a node instance.
 *
 * @param broker broker instance
 * @return Node representing the given broker
 */
private static Node brokerToNode(Broker broker) {
  return new Node(broker.id(), broker.host(), broker.port());
}

代码示例来源:origin: alibaba/jstorm

private SimpleConsumer findLeaderConsumer(int partition) {
  try {
    if (consumer != null) {
      return consumer;
    }
    PartitionMetadata metadata = findLeader(partition);
    if (metadata == null) {
      leaderBroker = null;
      consumer = null;
      return null;
    }
    leaderBroker = metadata.leader();
    consumer = new SimpleConsumer(leaderBroker.host(), leaderBroker.port(), config.socketTimeoutMs, config.socketReceiveBufferBytes,
        config.clientId);
    return consumer;
  } catch (Exception e) {
    LOG.error(e.getMessage(), e);
  }
  return null;
}

代码示例来源:origin: apache/incubator-druid

for (Broker replica : metadata.replicas()) {
 replicaBrokers.add(
   HostAndPort.fromParts(replica.host(), replica.port())
 );

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
     + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: apache/incubator-gobblin

private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
 List<KafkaPartition> partitions = Lists.newArrayList();
 for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
  if (null == partitionMetadata) {
   LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
   return Collections.emptyList();
  }
  if (null == partitionMetadata.leader()) {
   LOG.error(
     "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata);
   return Collections.emptyList();
  }
  partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
    .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
    .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
 }
 return partitions;
}

代码示例来源:origin: apache/incubator-gobblin

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : this.brokers) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata
       .leader().port());
     break;
    }
   }
   break;
  }
 }
}

代码示例来源:origin: apache/incubator-gobblin

private void refreshTopicMetadata(KafkaPartition partition) {
 for (String broker : KafkaWrapper.this.getBrokers()) {
  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
  if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
   TopicMetadata topicMetadata = topicMetadataList.get(0);
   for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (partitionMetadata.partitionId() == partition.getId()) {
     partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
       partitionMetadata.leader().port());
     break;
    }
   }
   break;
  }
 }
}

代码示例来源:origin: prestodb/presto

HostAddress partitionLeader = HostAddress.fromParts(leader.host(), leader.port());

代码示例来源:origin: alibaba/jstorm

LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
    + partition + "] error:" + code);
}else {

代码示例来源:origin: rakam-io/rakam

private Map<String, Long> getTopicOffsets(List<String> topics) {
    ArrayList<HostAndPort> nodes = new ArrayList<>(config.getNodes());
    Collections.shuffle(nodes);

    SimpleConsumer simpleConsumer = consumerManager.getConsumer(nodes.get(0));
    TopicMetadataRequest topicMetadataRequest = new TopicMetadataRequest(topics);
    TopicMetadataResponse topicMetadataResponse = simpleConsumer.send(topicMetadataRequest);

    ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();

    for (TopicMetadata metadata : topicMetadataResponse.topicsMetadata()) {
      for (PartitionMetadata part : metadata.partitionsMetadata()) {
        LOGGER.debug(format("Adding Partition %s/%s", metadata.topic(), part.partitionId()));
        Broker leader = part.leader();
        if (leader == null) { // Leader election going on...
          LOGGER.warn(format("No leader for partition %s/%s found!", metadata.topic(), part.partitionId()));
        } else {
          HostAndPort leaderHost = HostAndPort.fromParts(leader.host(), leader.port());
          SimpleConsumer leaderConsumer = consumerManager.getConsumer(leaderHost);

          long offset = findAllOffsets(leaderConsumer, metadata.topic(), part.partitionId())[0];
          builder.put(metadata.topic(), offset);
        }
      }
    }

    return builder.build();
  }
}

代码示例来源:origin: linkedin/camus

private void refreshTopicMetadata() {
 TopicMetadataRequest request = new TopicMetadataRequest(Collections.singletonList(kafkaRequest.getTopic()));
 TopicMetadataResponse response;
 try {
  response = simpleConsumer.send(request);
 } catch (Exception e) {
  log.error("Exception caught when refreshing metadata for topic " + request.topics().get(0) + ": "
    + e.getMessage());
  return;
 }
 TopicMetadata metadata = response.topicsMetadata().get(0);
 for (PartitionMetadata partitionMetadata : metadata.partitionsMetadata()) {
  if (partitionMetadata.partitionId() == kafkaRequest.getPartition()) {
   simpleConsumer =
     new SimpleConsumer(partitionMetadata.leader().host(), partitionMetadata.leader().port(),
       CamusJob.getKafkaTimeoutValue(context), CamusJob.getKafkaBufferSize(context),
       CamusJob.getKafkaClientName(context));
   break;
  }
 }
}

代码示例来源:origin: apache/apex-malhar

private ConsumerThread(Broker broker, Set<KafkaPartition> kpl, SimpleKafkaConsumer consumer)
{
 this.broker = broker;
 this.clientName = consumer.getClientName(broker.host() + "_" + broker.port());
 this.consumer = consumer;
 this.kpS = Collections.newSetFromMap(new ConcurrentHashMap<KafkaPartition, Boolean>());
 this.kpS.addAll(kpl);
}

代码示例来源:origin: org.apache.apex/malhar-contrib

private ConsumerThread(Broker broker, Set<KafkaPartition> kpl, SimpleKafkaConsumer consumer)
{
 this.broker = broker;
 this.clientName = consumer.getClientName(broker.host() + "_" + broker.port());
 this.consumer = consumer;
 this.kpS = Collections.newSetFromMap(new ConcurrentHashMap<KafkaPartition, Boolean>());
 this.kpS.addAll(kpl);
}

代码示例来源:origin: shunfei/DCMonitor

@Override
 public BrokerInfo apply(Broker input) {
  BrokerInfo info = new BrokerInfo();
  info.host = input.host();
  info.port = input.port();
  info.id = input.id();
  return info;
 }
}

代码示例来源:origin: org.apache.flink/flink-connector-kafka-0.8_2.11

/**
 * Turn a broker instance into a node instance.
 *
 * @param broker broker instance
 * @return Node representing the given broker
 */
private static Node brokerToNode(Broker broker) {
  return new Node(broker.id(), broker.host(), broker.port());
}

代码示例来源:origin: org.apache.flink/flink-connector-kafka-0.8

/**
 * Turn a broker instance into a node instance.
 *
 * @param broker broker instance
 * @return Node representing the given broker
 */
private static Node brokerToNode(Broker broker) {
  return new Node(broker.id(), broker.host(), broker.port());
}

代码示例来源:origin: com.ebay.jetstream/jetstream-messaging

private void init() {
  m_taken.set(true);
  this.m_leader = getLeader();
  this.m_consumer = new SimpleConsumer(m_leader.host(), m_leader.port(),
      m_config.getSocketTimeoutMs(),
      m_config.getSocketReceiveBufferBytes(), m_clientId);
  this.m_nextBatchSizeBytes = m_config.getBatchSizeBytes();
}

相关文章