org.apache.cassandra.cql3.QueryProcessor.executeInternalWithPaging()方法的使用及代码示例

x33g5p2x  于2022-01-28 转载在 其他  
字(9.7k)|赞(0)|评价(0)|浏览(69)

本文整理了Java中org.apache.cassandra.cql3.QueryProcessor.executeInternalWithPaging方法的一些代码示例,展示了QueryProcessor.executeInternalWithPaging的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。QueryProcessor.executeInternalWithPaging方法的具体详情如下:
包路径:org.apache.cassandra.cql3.QueryProcessor
类名称:QueryProcessor
方法名:executeInternalWithPaging

QueryProcessor.executeInternalWithPaging介绍

暂无

代码示例

代码示例来源:origin: jsevellec/cassandra-unit

private void migrateLegacyHints(UUID hostId, ByteBuffer buffer)
{
  String query = String.format("SELECT target_id, hint_id, message_version, mutation, ttl(mutation) AS ttl, writeTime(mutation) AS write_time " +
                 "FROM %s.%s " +
                 "WHERE target_id = ?",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.LEGACY_HINTS);
  // read all the old hints (paged iterator), write them in the new format
  UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize, hostId);
  migrateLegacyHints(hostId, rows, buffer);
  // delete the whole partition in the legacy table; we would truncate the whole table afterwards, but this allows
  // to not lose progress in case of a terminated conversion
  deleteLegacyHintsPartition(hostId);
}

代码示例来源:origin: org.apache.cassandra/cassandra-all

private void migrateLegacyHints(UUID hostId, ByteBuffer buffer)
{
  String query = String.format("SELECT target_id, hint_id, message_version, mutation, ttl(mutation) AS ttl, writeTime(mutation) AS write_time " +
                 "FROM %s.%s " +
                 "WHERE target_id = ?",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.LEGACY_HINTS);
  // read all the old hints (paged iterator), write them in the new format
  UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize, hostId);
  migrateLegacyHints(hostId, rows, buffer);
  // delete the whole partition in the legacy table; we would truncate the whole table afterwards, but this allows
  // to not lose progress in case of a terminated conversion
  deleteLegacyHintsPartition(hostId);
}

代码示例来源:origin: com.strapdata.cassandra/cassandra-all

private void migrateLegacyHints(UUID hostId, ByteBuffer buffer)
{
  String query = String.format("SELECT target_id, hint_id, message_version, mutation, ttl(mutation) AS ttl, writeTime(mutation) AS write_time " +
                 "FROM %s.%s " +
                 "WHERE target_id = ?",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.LEGACY_HINTS);
  // read all the old hints (paged iterator), write them in the new format
  UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize, hostId);
  migrateLegacyHints(hostId, rows, buffer);
  // delete the whole partition in the legacy table; we would truncate the whole table afterwards, but this allows
  // to not lose progress in case of a terminated conversion
  deleteLegacyHintsPartition(hostId);
}

代码示例来源:origin: jsevellec/cassandra-unit

private void replayFailedBatches()
{
  logger.trace("Started replayFailedBatches");
  // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
  // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
  int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size();
  if (endpointsCount <= 0)
  {
    logger.trace("Replay cancelled as there are no peers in the ring.");
    return;
  }
  int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount;
  RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
  UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
  ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
  int pageSize = calculatePageSize(store);
  // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
  // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify
  // token(id) > token(lastReplayedUuid) as part of the query.
  String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.BATCHES);
  UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid);
  processBatchlogEntries(batches, pageSize, rateLimiter);
  lastReplayedUuid = limitUuid;
  logger.trace("Finished replayFailedBatches");
}

代码示例来源:origin: org.apache.cassandra/cassandra-all

private void replayFailedBatches()
{
  logger.trace("Started replayFailedBatches");
  // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
  // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
  int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size();
  if (endpointsCount <= 0)
  {
    logger.trace("Replay cancelled as there are no peers in the ring.");
    return;
  }
  int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount;
  RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
  UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
  ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
  int pageSize = calculatePageSize(store);
  // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
  // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify
  // token(id) > token(lastReplayedUuid) as part of the query.
  String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.BATCHES);
  UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid);
  processBatchlogEntries(batches, pageSize, rateLimiter);
  lastReplayedUuid = limitUuid;
  logger.trace("Finished replayFailedBatches");
}

代码示例来源:origin: com.strapdata.cassandra/cassandra-all

private void replayFailedBatches()
{
  logger.trace("Started replayFailedBatches");
  // rate limit is in bytes per second. Uses Double.MAX_VALUE if disabled (set to 0 in cassandra.yaml).
  // max rate is scaled by the number of nodes in the cluster (same as for HHOM - see CASSANDRA-5272).
  int endpointsCount = StorageService.instance.getTokenMetadata().getAllEndpoints().size();
  if (endpointsCount <= 0)
  {
    logger.trace("Replay cancelled as there are no peers in the ring.");
    return;
  }
  int throttleInKB = DatabaseDescriptor.getBatchlogReplayThrottleInKB() / endpointsCount;
  RateLimiter rateLimiter = RateLimiter.create(throttleInKB == 0 ? Double.MAX_VALUE : throttleInKB * 1024);
  UUID limitUuid = UUIDGen.maxTimeUUID(System.currentTimeMillis() - getBatchlogTimeout());
  ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.BATCHES);
  int pageSize = calculatePageSize(store);
  // There cannot be any live content where token(id) <= token(lastReplayedUuid) as every processed batch is
  // deleted, but the tombstoned content may still be present in the tables. To avoid walking over it we specify
  // token(id) > token(lastReplayedUuid) as part of the query.
  String query = String.format("SELECT id, mutations, version FROM %s.%s WHERE token(id) > token(?) AND token(id) <= token(?)",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.BATCHES);
  UntypedResultSet batches = executeInternalWithPaging(query, pageSize, lastReplayedUuid, limitUuid);
  processBatchlogEntries(batches, pageSize, rateLimiter);
  lastReplayedUuid = limitUuid;
  logger.trace("Finished replayFailedBatches");
}

代码示例来源:origin: org.apache.cassandra/cassandra-all

@SuppressWarnings("deprecation")
public static void migrate()
{
  ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG);
  // nothing to migrate
  if (store.isEmpty())
    return;
  logger.info("Migrating legacy batchlog to new storage");
  int convertedBatches = 0;
  String query = String.format("SELECT id, data, written_at, version FROM %s.%s",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.LEGACY_BATCHLOG);
  int pageSize = BatchlogManager.calculatePageSize(store);
  UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize);
  for (UntypedResultSet.Row row : rows)
  {
    if (apply(row, convertedBatches))
      convertedBatches++;
  }
  if (convertedBatches > 0)
    Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking();
}

代码示例来源:origin: jsevellec/cassandra-unit

@SuppressWarnings("deprecation")
public static void migrate()
{
  ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG);
  // nothing to migrate
  if (store.isEmpty())
    return;
  logger.info("Migrating legacy batchlog to new storage");
  int convertedBatches = 0;
  String query = String.format("SELECT id, data, written_at, version FROM %s.%s",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.LEGACY_BATCHLOG);
  int pageSize = BatchlogManager.calculatePageSize(store);
  UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize);
  for (UntypedResultSet.Row row : rows)
  {
    if (apply(row, convertedBatches))
      convertedBatches++;
  }
  if (convertedBatches > 0)
    Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking();
}

代码示例来源:origin: com.strapdata.cassandra/cassandra-all

@SuppressWarnings("deprecation")
public static void migrate()
{
  ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG);
  // nothing to migrate
  if (store.isEmpty())
    return;
  logger.info("Migrating legacy batchlog to new storage");
  int convertedBatches = 0;
  String query = String.format("SELECT id, data, written_at, version FROM %s.%s",
                 SchemaConstants.SYSTEM_KEYSPACE_NAME,
                 SystemKeyspace.LEGACY_BATCHLOG);
  int pageSize = BatchlogManager.calculatePageSize(store);
  UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize);
  for (UntypedResultSet.Row row : rows)
  {
    if (apply(row, convertedBatches))
      convertedBatches++;
  }
  if (convertedBatches > 0)
    Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking();
}

相关文章

微信公众号

最新文章

更多