org.apache.kafka.clients.consumer.Consumer.seekToBeginning()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(8.6k)|赞(0)|评价(0)|浏览(219)

本文整理了Java中org.apache.kafka.clients.consumer.Consumer.seekToBeginning()方法的一些代码示例,展示了Consumer.seekToBeginning()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Consumer.seekToBeginning()方法的具体详情如下:
包路径:org.apache.kafka.clients.consumer.Consumer
类名称:Consumer
方法名:seekToBeginning

Consumer.seekToBeginning介绍

暂无

代码示例

代码示例来源:origin: openzipkin/brave

@Override public void seekToBeginning(Collection<TopicPartition> partitions) {
 delegate.seekToBeginning(partitions);
}

代码示例来源:origin: apache/incubator-gobblin

@Override
public long getEarliestOffset(KafkaPartition partition) throws KafkaOffsetRetrievalFailureException {
 TopicPartition topicPartition = new TopicPartition(partition.getTopicName(), partition.getId());
 this.consumer.assign(Collections.singletonList(topicPartition));
 this.consumer.seekToBeginning(topicPartition);
 return this.consumer.position(topicPartition);
}

代码示例来源:origin: confluentinc/ksql

public List<QueuedCommand> getRestoreCommands(final Duration duration) {
 final List<QueuedCommand> restoreCommands = Lists.newArrayList();
 commandConsumer.seekToBeginning(
   Collections.singletonList(commandTopicPartition));
 log.debug("Reading prior command records");
 ConsumerRecords<CommandId, Command> records =
   commandConsumer.poll(duration);
 while (!records.isEmpty()) {
  log.debug("Received {} records from poll", records.count());
  for (final ConsumerRecord<CommandId, Command> record : records) {
   if (record.value() == null) {
    continue;
   }
   restoreCommands.add(
     new QueuedCommand(
       record.key(),
       record.value(),
       Optional.empty()));
  }
  records = commandConsumer.poll(duration);
 }
 return restoreCommands;
}

代码示例来源:origin: apache/storm

if (firstPollOffsetStrategy == EARLIEST && isFirstPollSinceTopologyWasDeployed) {
  LOG.debug("First poll for topic partition [{}], seeking to partition beginning", tp);
  consumer.seekToBeginning(Collections.singleton(tp));
} else if (firstPollOffsetStrategy == LATEST && isFirstPollSinceTopologyWasDeployed) {
  LOG.debug("First poll for topic partition [{}], seeking to partition end", tp);
} else if (firstPollOffsetStrategy == UNCOMMITTED_EARLIEST) {
  LOG.debug("First poll for topic partition [{}] with no last batch metadata, seeking to partition beginning", tp);
  consumer.seekToBeginning(Collections.singleton(tp));
} else if (firstPollOffsetStrategy == UNCOMMITTED_LATEST) {
  LOG.debug("First poll for topic partition [{}] with no last batch metadata, seeking to partition end", tp);

代码示例来源:origin: apache/hive

consumer.seekToBeginning(Collections.singleton(topicPartition));

代码示例来源:origin: apache/incubator-gobblin

@Override
public void start(WatermarkStorage watermarkStorage)
  throws IOException {
 Preconditions.checkArgument(watermarkStorage != null, "Watermark Storage should not be null");
 Map<String, CheckpointableWatermark> watermarkMap =
   watermarkStorage.getCommittedWatermarks(KafkaWatermark.class, Collections.singletonList(_partition.toString()));
 KafkaWatermark watermark = (KafkaWatermark) watermarkMap.get(_partition.toString());
 if (watermark == null) {
  LOG.info("Offset is null - seeking to beginning of topic and partition for {} ", _partition.toString());
  _consumer.seekToBeginning(_partition);
 } else {
  // seek needs to go one past the last committed offset
  LOG.info("Offset found in consumer for partition {}. Seeking to one past what we found : {}",
    _partition.toString(), watermark.getLwm().getValue() + 1);
  _consumer.seek(_partition, watermark.getLwm().getValue() + 1);
 }
 _isStarted.set(true);
}

代码示例来源:origin: confluentinc/ksql

@Test
public void shouldGetRestoreCommandsCorrectly() {
 // Given:
 when(commandConsumer.poll(any(Duration.class)))
   .thenReturn(someConsumerRecords(
     new ConsumerRecord<>("topic", 0, 0, commandId1, command1),
     new ConsumerRecord<>("topic", 0, 0, commandId2, command2)))
   .thenReturn(someConsumerRecords(
     new ConsumerRecord<>("topic", 0, 0, commandId3, command3)))
   .thenReturn(new ConsumerRecords<>(Collections.emptyMap()));
 // When:
 final List<QueuedCommand> queuedCommandList = commandTopic
   .getRestoreCommands(Duration.ofMillis(1));
 // Then:
 verify(commandConsumer).seekToBeginning(topicPartitionsCaptor.capture());
 assertThat(topicPartitionsCaptor.getValue(),
   equalTo(Collections.singletonList(new TopicPartition(COMMAND_TOPIC_NAME, 0))));
 assertThat(queuedCommandList, equalTo(ImmutableList.of(
   new QueuedCommand(commandId1, command1, Optional.empty()),
   new QueuedCommand(commandId2, command2, Optional.empty()),
   new QueuedCommand(commandId3, command3, Optional.empty()))));
}

代码示例来源:origin: spring-projects/spring-kafka

.collect(Collectors.toList()));
consumer.seekToBeginning(records.partitions());

代码示例来源:origin: org.apache.kafka/kafka_2.12

resetOffsetsTo(client, inputTopicPartitions, options.valueOf(toOffsetOption));
} else if (options.has(toEarliestOption)) {
  client.seekToBeginning(inputTopicPartitions);
} else if (options.has(toLatestOption)) {
  client.seekToEnd(inputTopicPartitions);
  resetOffsetsFromResetPlan(client, inputTopicPartitions, topicPartitionsAndOffset);
} else {
  client.seekToBeginning(inputTopicPartitions);

代码示例来源:origin: org.apache.kafka/kafka_2.11

resetOffsetsTo(client, inputTopicPartitions, options.valueOf(toOffsetOption));
} else if (options.has(toEarliestOption)) {
  client.seekToBeginning(inputTopicPartitions);
} else if (options.has(toLatestOption)) {
  client.seekToEnd(inputTopicPartitions);
  resetOffsetsFromResetPlan(client, inputTopicPartitions, topicPartitionsAndOffset);
} else {
  client.seekToBeginning(inputTopicPartitions);

代码示例来源:origin: org.apache.kafka/kafka

resetOffsetsTo(client, inputTopicPartitions, options.valueOf(toOffsetOption));
} else if (options.has(toEarliestOption)) {
  client.seekToBeginning(inputTopicPartitions);
} else if (options.has(toLatestOption)) {
  client.seekToEnd(inputTopicPartitions);
  resetOffsetsFromResetPlan(client, inputTopicPartitions, topicPartitionsAndOffset);
} else {
  client.seekToBeginning(inputTopicPartitions);

代码示例来源:origin: spring-projects/spring-kafka

assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue();
ArgumentCaptor<Collection<TopicPartition>> captor = ArgumentCaptor.forClass(List.class);
verify(consumer).seekToBeginning(captor.capture());
assertThat(captor.getValue())
    .isEqualTo(new HashSet<>(Arrays.asList(new TopicPartition("foo", 0), new TopicPartition("foo", 4))));

代码示例来源:origin: org.streampipes/streampipes-connect

@Override
  public void onPartitionsAssigned(Collection<TopicPartition> collection) {
    consumer.seekToBeginning(collection);
  }
});

代码示例来源:origin: streampipes/streampipes-ce

@Override
  public void onPartitionsAssigned(Collection<TopicPartition> collection) {
    consumer.seekToBeginning(collection);
  }
});

代码示例来源:origin: spring-projects/spring-kafka

@SuppressWarnings("unchecked")
ArgumentCaptor<Collection<TopicPartition>> captor = ArgumentCaptor.forClass(Collection.class);
verify(consumer).seekToBeginning(captor.capture());
TopicPartition next = captor.getValue().iterator().next();
assertThat(next.topic()).isEqualTo(topic);

代码示例来源:origin: org.springframework.cloud/spring-cloud-stream-binder-kafka

@Override
  public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> tps) {
    if (initialAssignment.getAndSet(false)) {
      if ("earliest".equals(resetTo)) {
        consumer.seekToBeginning(tps);
      }
      else if ("latest".equals(resetTo)) {
        consumer.seekToEnd(tps);
      }
    }
  }
});

代码示例来源:origin: spring-cloud/spring-cloud-stream-binder-kafka

@Override
  public void onPartitionsAssigned(Consumer<?, ?> consumer, Collection<TopicPartition> tps) {
    if (initialAssignment.getAndSet(false)) {
      if ("earliest".equals(resetTo)) {
        consumer.seekToBeginning(tps);
      }
      else if ("latest".equals(resetTo)) {
        consumer.seekToEnd(tps);
      }
    }
  }
});

代码示例来源:origin: apache/incubator-rya

@Override
public CloseableIteration<ChangeLogEntry<QueryChange>, QueryChangeLogException> readFromStart() throws QueryChangeLogException {
  final TopicPartition part = new TopicPartition(topic, 0);
  consumer.assign(Lists.newArrayList(part));
  consumer.seekToBeginning(Lists.newArrayList(part));
  return new QueryChangeLogEntryIter(consumer);
}

代码示例来源:origin: linkedin/li-apache-kafka-clients

@Override
public void seekToBeginning(Collection<TopicPartition> partitions) {
 _kafkaConsumer.seekToBeginning(partitions);
 for (TopicPartition tp : partitions) {
  _consumerRecordsProcessor.clear(tp);
  // We set the high watermark to 0 if user is seeking to beginning.
  _consumerRecordsProcessor.setPartitionConsumerHighWaterMark(tp, 0L);
 }
}

代码示例来源:origin: org.apache.kafka/kafka-streams

@Override
public void reinitializeStateStoresForPartitions(final Collection<TopicPartition> partitions,
                         final InternalProcessorContext processorContext) {
  super.reinitializeStateStoresForPartitions(
    log,
    globalStores,
    topology.storeToChangelogTopic(),
    partitions,
    processorContext);
  globalConsumer.assign(partitions);
  globalConsumer.seekToBeginning(partitions);
}

相关文章