org.apache.kafka.common.utils.Utils.toArray()方法的使用及代码示例

x33g5p2x  于2022-02-01 转载在 其他  
字(10.2k)|赞(0)|评价(0)|浏览(156)

本文整理了Java中org.apache.kafka.common.utils.Utils.toArray()方法的一些代码示例,展示了Utils.toArray()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Utils.toArray()方法的具体详情如下:
包路径:org.apache.kafka.common.utils.Utils
类名称:Utils
方法名:toArray

Utils.toArray介绍

[英]Read the given byte buffer from its current position to its limit into a byte array.
[中]将给定字节缓冲区从其当前位置读取到其极限,并将其读入字节数组。

代码示例

代码示例来源:origin: apache/kafka

/**
 * Read a byte array from its current position given the size in the buffer
 * @param buffer The buffer to read from
 * @param size The number of bytes to read into the array
 */
public static byte[] toArray(ByteBuffer buffer, int size) {
  return toArray(buffer, 0, size);
}

代码示例来源:origin: apache/kafka

/**
 * Convert a ByteBuffer to a nullable array.
 * @param buffer The buffer to convert
 * @return The resulting array or null if the buffer is null
 */
public static byte[] toNullableArray(ByteBuffer buffer) {
  return buffer == null ? null : toArray(buffer);
}

代码示例来源:origin: apache/kafka

public byte[] value() {
  if (value == null && valueBuffer != null) {
    value = Utils.toArray(valueBuffer);
    valueBuffer = null;
  }
  return value;
}

代码示例来源:origin: apache/kafka

/**
 * Read the given byte buffer from its current position to its limit into a byte array.
 * @param buffer The buffer to read from
 */
public static byte[] toArray(ByteBuffer buffer) {
  return toArray(buffer, 0, buffer.remaining());
}

代码示例来源:origin: apache/kafka

/**
 * Read a UTF8 string from a byte buffer at a given offset. Note that the position of the byte buffer
 * is not affected by this method.
 *
 * @param buffer The buffer to read from
 * @param offset The offset relative to the current position in the buffer
 * @param length The length of the string in bytes
 * @return The UTF8 string
 */
public static String utf8(ByteBuffer buffer, int offset, int length) {
  if (buffer.hasArray())
    return new String(buffer.array(), buffer.arrayOffset() + buffer.position() + offset, length, StandardCharsets.UTF_8);
  else
    return utf8(toArray(buffer, offset, length));
}

代码示例来源:origin: apache/kafka

protected String asString(NetworkReceive receive) {
  return new String(Utils.toArray(receive.payload()));
}

代码示例来源:origin: apache/kafka

@Test
public void toArray() {
  byte[] input = {0, 1, 2, 3, 4};
  ByteBuffer buffer = ByteBuffer.wrap(input);
  assertArrayEquals(input, Utils.toArray(buffer));
  assertEquals(0, buffer.position());
  assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2));
  assertEquals(0, buffer.position());
  buffer.position(2);
  assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer));
  assertEquals(2, buffer.position());
}

代码示例来源:origin: apache/kafka

@Test
public void toArrayDirectByteBuffer() {
  byte[] input = {0, 1, 2, 3, 4};
  ByteBuffer buffer = ByteBuffer.allocateDirect(5);
  buffer.put(input);
  buffer.rewind();
  assertArrayEquals(input, Utils.toArray(buffer));
  assertEquals(0, buffer.position());
  assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2));
  assertEquals(0, buffer.position());
  buffer.position(2);
  assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer));
  assertEquals(2, buffer.position());
}

代码示例来源:origin: apache/kafka

private void assertVarintSerde(int value, byte[] expectedEncoding) throws IOException {
  ByteBuffer buf = ByteBuffer.allocate(32);
  ByteUtils.writeVarint(value, buf);
  buf.flip();
  assertArrayEquals(expectedEncoding, Utils.toArray(buf));
  assertEquals(value, ByteUtils.readVarint(buf.duplicate()));
  buf.rewind();
  DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buf));
  ByteUtils.writeVarint(value, out);
  buf.flip();
  assertArrayEquals(expectedEncoding, Utils.toArray(buf));
  DataInputStream in = new DataInputStream(new ByteBufferInputStream(buf));
  assertEquals(value, ByteUtils.readVarint(in));
}

代码示例来源:origin: apache/kafka

@Test(expected = InvalidRecordException.class)
public void testCompressedIterationWithEmptyRecords() throws Exception {
  ByteBuffer emptyCompressedValue = ByteBuffer.allocate(64);
  OutputStream gzipOutput = CompressionType.GZIP.wrapForOutput(new ByteBufferOutputStream(emptyCompressedValue),
      RecordBatch.MAGIC_VALUE_V1);
  gzipOutput.close();
  emptyCompressedValue.flip();
  ByteBuffer buffer = ByteBuffer.allocate(128);
  DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer));
  AbstractLegacyRecordBatch.writeHeader(out, 0L, LegacyRecord.RECORD_OVERHEAD_V1 + emptyCompressedValue.remaining());
  LegacyRecord.write(out, RecordBatch.MAGIC_VALUE_V1, 1L, null, Utils.toArray(emptyCompressedValue),
      CompressionType.GZIP, TimestampType.CREATE_TIME);
  buffer.flip();
  MemoryRecords records = MemoryRecords.readableRecords(buffer);
  if (records.records().iterator().hasNext())
    fail("Iteration should have caused invalid record error");
}

代码示例来源:origin: apache/kafka

private void assertVarlongSerde(long value, byte[] expectedEncoding) throws IOException {
  ByteBuffer buf = ByteBuffer.allocate(32);
  ByteUtils.writeVarlong(value, buf);
  buf.flip();
  assertEquals(value, ByteUtils.readVarlong(buf.duplicate()));
  assertArrayEquals(expectedEncoding, Utils.toArray(buf));
  buf.rewind();
  DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buf));
  ByteUtils.writeVarlong(value, out);
  buf.flip();
  assertArrayEquals(expectedEncoding, Utils.toArray(buf));
  DataInputStream in = new DataInputStream(new ByteBufferInputStream(buf));
  assertEquals(value, ByteUtils.readVarlong(in));
}

代码示例来源:origin: apache/kafka

public static void checkClientConnection(Selector selector, String node, int minMessageSize, int messageCount) throws Exception {
  waitForChannelReady(selector, node);
  String prefix = TestUtils.randomString(minMessageSize);
  int requests = 0;
  int responses = 0;
  selector.send(new NetworkSend(node, ByteBuffer.wrap((prefix + "-0").getBytes(StandardCharsets.UTF_8))));
  requests++;
  while (responses < messageCount) {
    selector.poll(0L);
    assertEquals("No disconnects should have occurred.", 0, selector.disconnected().size());
    for (NetworkReceive receive : selector.completedReceives()) {
      assertEquals(prefix + "-" + responses, new String(Utils.toArray(receive.payload()), StandardCharsets.UTF_8));
      responses++;
    }
    for (int i = 0; i < selector.completedSends().size() && requests < messageCount && selector.isChannelReady(node); i++, requests++) {
      selector.send(new NetworkSend(node, ByteBuffer.wrap((prefix + "-" + requests).getBytes())));
    }
  }
}

代码示例来源:origin: apache/kafka

/**
 * Parse the record entry, deserializing the key / value fields if necessary
 */
private ConsumerRecord<K, V> parseRecord(TopicPartition partition,
                     RecordBatch batch,
                     Record record) {
  try {
    long offset = record.offset();
    long timestamp = record.timestamp();
    Optional<Integer> leaderEpoch = maybeLeaderEpoch(batch.partitionLeaderEpoch());
    TimestampType timestampType = batch.timestampType();
    Headers headers = new RecordHeaders(record.headers());
    ByteBuffer keyBytes = record.key();
    byte[] keyByteArray = keyBytes == null ? null : Utils.toArray(keyBytes);
    K key = keyBytes == null ? null : this.keyDeserializer.deserialize(partition.topic(), headers, keyByteArray);
    ByteBuffer valueBytes = record.value();
    byte[] valueByteArray = valueBytes == null ? null : Utils.toArray(valueBytes);
    V value = valueBytes == null ? null : this.valueDeserializer.deserialize(partition.topic(), headers, valueByteArray);
    return new ConsumerRecord<>(partition.topic(), partition.partition(), offset,
                  timestamp, timestampType, record.checksumOrNull(),
                  keyByteArray == null ? ConsumerRecord.NULL_SIZE : keyByteArray.length,
                  valueByteArray == null ? ConsumerRecord.NULL_SIZE : valueByteArray.length,
                  key, value, headers, leaderEpoch);
  } catch (RuntimeException e) {
    throw new SerializationException("Error deserializing key/value for partition " + partition +
        " at offset " + record.offset() + ". If needed, please seek past the record to continue consumption.", e);
  }
}

代码示例来源:origin: apache/kafka

assertEquals(message, new String(Utils.toArray(receiveList.get(0).payload())));

代码示例来源:origin: me.jeffshaw.kafka/kafka-clients

/**
 * Read the given byte buffer into a byte array
 */
public static byte[] toArray(ByteBuffer buffer) {
  return toArray(buffer, 0, buffer.limit());
}

代码示例来源:origin: ucarGroup/DataLink

private void handleSyncGroupRequest(ChannelHandlerContext ctx, Request request) {
  SyncGroupRequest syncGroupRequest = (SyncGroupRequest) request.getBody();
  coordinator.handleSyncGroup(
      syncGroupRequest.groupId(),
      syncGroupRequest.generationId(),
      syncGroupRequest.memberId(),
      syncGroupRequest.groupAssignment().entrySet().stream().collect(Collectors.toMap(k -> k.getKey(), k -> Utils.toArray(k.getValue()))),
      (memberState, errorCode) -> {
        SyncGroupResponse responseBody = new SyncGroupResponse(errorCode, ByteBuffer.wrap(memberState));
        ResponseHeader responseHeader = new ResponseHeader(request.getHeader().correlationId());
        sendResponse(ctx, new Response(responseHeader, responseBody));
      }
  );
}

代码示例来源:origin: org.springframework.cloud/spring-cloud-stream-binder-kafka11

final ConsumerRecord<?, ?> record = message.getHeaders()
    .get(KafkaMessageDrivenChannelAdapter.KAFKA_RAW_DATA, ConsumerRecord.class);
final byte[] key = record.key() != null ? Utils.toArray(ByteBuffer.wrap((byte[]) record.key()))
    : null;
final byte[] payload = record.value() != null
    ? Utils.toArray(ByteBuffer.wrap((byte[]) record.value())) : null;
String dlqName = StringUtils.hasText(extendedConsumerProperties.getExtension().getDlqName())
    ? extendedConsumerProperties.getExtension().getDlqName()

代码示例来源:origin: ucarGroup/DataLink

private void handleJoinGroupRequest(ChannelHandlerContext ctx, Request request) {
  JoinGroupRequest joinGroupRequest = (JoinGroupRequest) request.getBody();
  ResponseHeader responseHeader = new ResponseHeader(request.getHeader().correlationId());
  List<ProtocolEntry> protocols = joinGroupRequest.groupProtocols().stream().map(protocol -> new ProtocolEntry(protocol.name(), Utils.toArray(protocol.metadata()))).collect(Collectors.toList());
  coordinator.handleJoinGroup(
      joinGroupRequest.groupId(),
      joinGroupRequest.memberId(),
      request.getHeader().clientId(),
      request.getClientAddress().toString(),
      joinGroupRequest.rebalanceTimeout(),
      joinGroupRequest.sessionTimeout(),
      joinGroupRequest.protocolType(),
      protocols,
      (joinResult) -> {
        Map<String, ByteBuffer> members = joinResult.getMembers().entrySet().stream().collect(Collectors.toMap(k -> k.getKey(), k -> ByteBuffer.wrap(k.getValue())));
        JoinGroupResponse responseBody = new JoinGroupResponse(request.getHeader().apiVersion(), joinResult.getErrorCode(), joinResult.getGenerationId(),
            joinResult.getSubProtocol(), joinResult.getMemberId(), joinResult.getLeaderId(), members);
        logger.trace(String.format("Sending join group response %s for correlation id %d to client %s.",
            responseBody, request.getHeader().correlationId(), request.getHeader().clientId()));
        sendResponse(ctx, new Response(responseHeader, responseBody));
      }
  );
}

代码示例来源:origin: org.apache.kafka/connect-api

if (Decimal.LOGICAL_NAME.equals(toSchema.name())) {
  if (value instanceof ByteBuffer) {
    value = Utils.toArray((ByteBuffer) value);
  return Utils.toArray((ByteBuffer) value);

相关文章