kafka.message.Message.<init>()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(6.4k)|赞(0)|评价(0)|浏览(124)

本文整理了Java中kafka.message.Message.<init>()方法的一些代码示例,展示了Message.<init>()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Message.<init>()方法的具体详情如下:
包路径:kafka.message.Message
类名称:Message
方法名:<init>

Message.<init>介绍

暂无

代码示例

代码示例来源:origin: Graylog2/graylog2-server

final Message newMessage = new Message(messageBytes, idBytes);

代码示例来源:origin: apache/incubator-gobblin

public void pushToStream(String message) {
 int streamNo = (int) this.nextStream.incrementAndGet() % this.queues.size();
 AtomicLong offset = this.offsets.get(streamNo);
 BlockingQueue<FetchedDataChunk> queue = this.queues.get(streamNo);
 AtomicLong thisOffset = new AtomicLong(offset.incrementAndGet());
 List<Message> seq = Lists.newArrayList();
 seq.add(new Message(message.getBytes(Charsets.UTF_8)));
 ByteBufferMessageSet messageSet = new ByteBufferMessageSet(NoCompressionCodec$.MODULE$, offset, JavaConversions.asScalaBuffer(seq));
 FetchedDataChunk chunk = new FetchedDataChunk(messageSet,
   new PartitionTopicInfo("topic", streamNo, queue, thisOffset, thisOffset, new AtomicInteger(1), "clientId"),
   thisOffset.get());
 queue.add(chunk);
}

代码示例来源:origin: linkedin/camus

public void validate() throws IOException {
  // check the checksum of message.
  Message readMessage;
  if (key == null){
    readMessage = new Message(payload);
  } else {
    readMessage = new Message(payload, key);
  }
  if (checksum != readMessage.checksum()) {
    throw new ChecksumException("Invalid message checksum : " + readMessage.checksum() + ". Expected " + checksum,
        offset);
  }
}

代码示例来源:origin: linkedin/camus

private FetchResponse mockFetchResponse(List<MyMessage> myMessages) {
 FetchResponse fetchResponse = EasyMock.createMock(FetchResponse.class);
 EasyMock.expect(fetchResponse.hasError()).andReturn(false).times(1);
 List<Message> messages = new ArrayList<Message>();
 for (MyMessage myMessage:myMessages) {
  String payload = gson.toJson(myMessage);
  String msgKey = Integer.toString(PARTITION_1_ID);
  Message message = new Message(payload.getBytes(), msgKey.getBytes());
  messages.add(message);
 }
 ByteBufferMessageSet messageSet = new ByteBufferMessageSet(messages);
 EasyMock.expect(fetchResponse.messageSet(EasyMock.anyString(), EasyMock.anyInt())).andReturn(messageSet).times(1);
 mocks.add(fetchResponse);
 return fetchResponse;
}

代码示例来源:origin: jacklund/mqttKafkaBridge

@Override
public void messageArrived(String topic, MqttMessage message) throws Exception {
  byte[] payload = message.getPayload();
  ProducerData<String, Message> data = new ProducerData<String, Message>(topic, new Message(payload));
  kafkaProducer.send(data);
}

代码示例来源:origin: com.linkedin.camus/camus-etl-kafka

public void validate() throws IOException {
  // check the checksum of message.
  Message readMessage;
  if (key == null){
    readMessage = new Message(payload);
  } else {
    readMessage = new Message(payload, key);
  }
  if (checksum != readMessage.checksum()) {
    throw new ChecksumException("Invalid message checksum : " + readMessage.checksum() + ". Expected " + checksum,
        offset);
  }
}

代码示例来源:origin: org.apache.apex/malhar-contrib

public void run()
 {
  ConsumerIterator<byte[], byte[]> itr = stream.iterator();
  logger.debug("Thread {} starts consuming message...", Thread.currentThread().getName());
  while (itr.hasNext() && isAlive) {
   MessageAndMetadata<byte[], byte[]> mam = itr.next();
   try {
    kp.setPartitionId(mam.partition());
    putMessage(kp, new Message(mam.message()), mam.offset());
   } catch (InterruptedException e) {
    logger.error("Message Enqueue has been interrupted", e);
   }
  }
  logger.debug("Thread {} stops consuming message...", Thread.currentThread().getName());
 }
});

代码示例来源:origin: apache/apex-malhar

public void run()
 {
  ConsumerIterator<byte[], byte[]> itr = stream.iterator();
  logger.debug("Thread {} starts consuming message...", Thread.currentThread().getName());
  while (itr.hasNext() && isAlive) {
   MessageAndMetadata<byte[], byte[]> mam = itr.next();
   try {
    kp.setPartitionId(mam.partition());
    putMessage(kp, new Message(mam.message()), mam.offset());
   } catch (InterruptedException e) {
    logger.error("Message Enqueue has been interrupted", e);
   }
  }
  logger.debug("Thread {} stops consuming message...", Thread.currentThread().getName());
 }
});

代码示例来源:origin: org.graylog2/graylog2-server

final Message newMessage = new Message(messageBytes, idBytes);

代码示例来源:origin: org.graylog2/graylog2-shared

/**
 * Writes the list of entries to the journal.
 *
 * @param entries journal entries to be written
 * @return the last position written to in the journal
 */
@Override
public long write(List<Entry> entries) {
  try (Timer.Context ignored = writeTime.time()) {
    long payloadSize = 0L;
    final List<Message> messages = Lists.newArrayListWithCapacity(entries.size());
    for (final Entry entry : entries) {
      final byte[] messageBytes = entry.getMessageBytes();
      final byte[] idBytes = entry.getIdBytes();
      payloadSize += messageBytes.length;
      messages.add(new Message(messageBytes, idBytes));
      if (LOG.isTraceEnabled()) {
        LOG.trace("Message {} contains bytes {}", bytesToHex(idBytes), bytesToHex(messageBytes));
      }
    }
    final ByteBufferMessageSet messageSet = new ByteBufferMessageSet(JavaConversions.asScalaBuffer(messages));
    final Log.LogAppendInfo appendInfo = kafkaLog.append(messageSet, true);
    long lastWriteOffset = appendInfo.lastOffset();
    LOG.debug("Wrote {} messages to journal: {} bytes, log position {} to {}",
        entries.size(), payloadSize, appendInfo.firstOffset(), lastWriteOffset);
    writtenMessages.mark(entries.size());
    return lastWriteOffset;
  }
}

代码示例来源:origin: apache/apex-malhar

@Override
public void run()
{
 Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
 topicCountMap.put(topic, new Integer(1));
 Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
 KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
 ConsumerIterator<byte[], byte[]> it = stream.iterator();
 logger.debug("Inside consumer::run receiveCount= {}", receiveCount);
 while (it.hasNext() & isAlive) {
  Message msg = new Message(it.next().message());
  if (latch != null) {
   latch.countDown();
  }
  if (getMessage(msg).equals(KafkaOperatorTestBase.END_TUPLE)) {
   break;
  }
  holdingBuffer.add(msg);
  receiveCount++;
  logger.debug("Consuming {}, receiveCount= {}", getMessage(msg), receiveCount);
  try {
   Thread.sleep(50);
  } catch (InterruptedException e) {
   break;
  }
 }
 logger.debug("DONE consuming");
}

代码示例来源:origin: HiveKa/HiveKa

Message messageWithKey = new Message(bytes,keyBytes);
Message messageWithoutKey = new Message(bytes);
long checksum = key.getChecksum();
if (checksum != messageWithKey.checksum() && checksum != messageWithoutKey.checksum()) {

相关文章