org.elasticsearch.common.collect.Tuple.tuple()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(12.9k)|赞(0)|评价(0)|浏览(86)

本文整理了Java中org.elasticsearch.common.collect.Tuple.tuple()方法的一些代码示例,展示了Tuple.tuple()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Tuple.tuple()方法的具体详情如下:
包路径:org.elasticsearch.common.collect.Tuple
类名称:Tuple
方法名:tuple

Tuple.tuple介绍

暂无

代码示例

代码示例来源:origin: org.elasticsearch/elasticsearch

/**
 * Returns an array of all registered pairs of handle IDs and exception classes. These pairs are
 * provided for every registered exception.
 *
 * @return an array of all registered pairs of handle IDs and exception classes
 */
static Tuple<Integer, Class<? extends ElasticsearchException>>[] classes() {
  @SuppressWarnings("unchecked")
  final Tuple<Integer, Class<? extends ElasticsearchException>>[] ts =
      Arrays.stream(ElasticsearchExceptionHandle.values())
          .map(h -> Tuple.tuple(h.id, h.exceptionClass)).toArray(Tuple[]::new);
  return ts;
}

代码示例来源:origin: org.elasticsearch/elasticsearch

public static Tuple<String, Integer> parseHostPort(final String remoteHost) {
  final String host = remoteHost.substring(0, indexOfPortSeparator(remoteHost));
  final int port = parsePort(remoteHost);
  return Tuple.tuple(host, port);
}

代码示例来源:origin: org.elasticsearch/elasticsearch

public Tuple<RemoveCorruptedShardDataCommand.CleanStatus, String> getCleanStatus(ShardPath shardPath,
                                         Directory indexDirectory,
                                         Lock writeLock,
                                         PrintStream printStream,
                                         boolean verbose) throws IOException {
  if (RemoveCorruptedShardDataCommand.isCorruptMarkerFileIsPresent(indexDirectory) == false) {
    return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null);
  }
  final CheckIndex.Status status;
  try (CheckIndex checker = new CheckIndex(indexDirectory, writeLock)) {
    checker.setChecksumsOnly(true);
    checker.setInfoStream(printStream, verbose);
    status = checker.checkIndex(null);
    if (status.missingSegments) {
      return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.UNRECOVERABLE,
        "Index is unrecoverable - there are missing segments");
    }
    return status.clean
      ? Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN_WITH_CORRUPTED_MARKER, null)
      : Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CORRUPTED,
        "Corrupted Lucene index segments found - " + status.totLoseDocCount + " documents will be lost.");
  }
}

代码示例来源:origin: org.elasticsearch/elasticsearch

/**
 * put an entry into the segment
 *
 * @param key   the key of the entry to add to the cache
 * @param value the value of the entry to add to the cache
 * @param now   the access time of this entry
 * @return a tuple of the new entry and the existing entry, if there was one otherwise null
 */
Tuple<Entry<K, V>, Entry<K, V>> put(K key, V value, long now) {
  Entry<K, V> entry = new Entry<>(key, value, now);
  Entry<K, V> existing = null;
  try (ReleasableLock ignored = writeLock.acquire()) {
    try {
      CompletableFuture<Entry<K, V>> future = map.put(key, CompletableFuture.completedFuture(entry));
      if (future != null) {
        existing = future.handle((ok, ex) -> {
          if (ok != null) {
            return ok;
          } else {
            return null;
          }
        }).get();
      }
    } catch (ExecutionException | InterruptedException e) {
      throw new IllegalStateException(e);
    }
  }
  return Tuple.tuple(entry, existing);
}

代码示例来源:origin: org.elasticsearch/elasticsearch

void updateRemoteCluster(
    final String clusterAlias,
    final List<String> addresses,
    final String proxyAddress,
    final ActionListener<Void> connectionListener) {
  final List<Tuple<String, Supplier<DiscoveryNode>>> nodes =
      addresses.stream().<Tuple<String, Supplier<DiscoveryNode>>>map(address -> Tuple.tuple(address, () ->
          buildSeedNode(clusterAlias, address, Strings.hasLength(proxyAddress)))
      ).collect(Collectors.toList());
  updateRemoteClusters(Collections.singletonMap(clusterAlias, new Tuple<>(proxyAddress, nodes)), connectionListener);
}

代码示例来源:origin: org.elasticsearch/elasticsearch

for (int i = 0; i < previous.getIoStats().devicesStats.length; i++) {
  FsInfo.DeviceStats deviceStats = previous.getIoStats().devicesStats[i];
  deviceMap.put(Tuple.tuple(deviceStats.majorDeviceNumber, deviceStats.minorDeviceNumber), deviceStats);
  final int majorDeviceNumber = Integer.parseInt(fields[0]);
  final int minorDeviceNumber = Integer.parseInt(fields[1]);
  if (!devicesNumbers.contains(Tuple.tuple(majorDeviceNumber, minorDeviceNumber))) {
    continue;
          writesCompleted,
          sectorsWritten,
          deviceMap.get(Tuple.tuple(majorDeviceNumber, minorDeviceNumber)));
  devicesStats.add(deviceStats);

代码示例来源:origin: org.elasticsearch/elasticsearch

} else {
  if (timeout == null) {
    listeners.put(listener, Tuple.tuple(waitingForGlobalCheckpoint, null));
  } else {
    listeners.put(
        listener,
        Tuple.tuple(
            waitingForGlobalCheckpoint,
            scheduler.schedule(

代码示例来源:origin: org.elasticsearch/elasticsearch

private static Map<String, Tuple<String, List<Tuple<String, Supplier<DiscoveryNode>>>>> buildRemoteClustersDynamicConfig(
    final Settings settings, final Setting.AffixSetting<List<String>> seedsSetting) {
  final Stream<Setting<List<String>>> allConcreteSettings = seedsSetting.getAllConcreteSettings(settings);
  return allConcreteSettings.collect(
      Collectors.toMap(seedsSetting::getNamespace, concreteSetting -> {
        String clusterName = seedsSetting.getNamespace(concreteSetting);
        List<String> addresses = concreteSetting.get(settings);
        final boolean proxyMode =
            REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).existsOrFallbackExists(settings);
        List<Tuple<String, Supplier<DiscoveryNode>>> nodes = new ArrayList<>(addresses.size());
        for (String address : addresses) {
          nodes.add(Tuple.tuple(address, () -> buildSeedNode(clusterName, address, proxyMode)));
        }
        return new Tuple<>(REMOTE_CLUSTERS_PROXY.getConcreteSettingForNamespace(clusterName).get(settings), nodes);
      }));
}

代码示例来源:origin: org.elasticsearch/elasticsearch

/**
 * Partitions the settings into those that are known and valid versus those that are unknown or invalid. The resulting tuple contains
 * the known and valid settings in the first component and the unknown or invalid settings in the second component. Note that archived
 * settings contained in the settings to partition are included in the first component.
 *
 * @param settings     the settings to partition
 * @param settingsType a string to identify the settings (for logging)
 * @param logger       a logger to sending warnings to
 * @return the partitioned settings
 */
private Tuple<Settings, Settings> partitionKnownAndValidSettings(
    final Settings settings, final String settingsType, final Logger logger) {
  final Settings existingArchivedSettings = settings.filter(k -> k.startsWith(ARCHIVED_SETTINGS_PREFIX));
  final Settings settingsExcludingExistingArchivedSettings =
      settings.filter(k -> k.startsWith(ARCHIVED_SETTINGS_PREFIX) == false);
  final Settings settingsWithUnknownOrInvalidArchived = clusterSettings.archiveUnknownOrInvalidSettings(
      settingsExcludingExistingArchivedSettings,
      e -> logUnknownSetting(settingsType, e, logger),
      (e, ex) -> logInvalidSetting(settingsType, e, ex, logger));
  return Tuple.tuple(
      Settings.builder()
          .put(settingsWithUnknownOrInvalidArchived.filter(k -> k.startsWith(ARCHIVED_SETTINGS_PREFIX) == false))
          .put(existingArchivedSettings)
          .build(),
      settingsWithUnknownOrInvalidArchived.filter(k -> k.startsWith(ARCHIVED_SETTINGS_PREFIX)));
}

代码示例来源:origin: org.elasticsearch/elasticsearch

public Tuple<RemoveCorruptedShardDataCommand.CleanStatus, String> getCleanStatus(ShardPath shardPath,
                                         Directory indexDirectory) throws IOException {
  final Path indexPath = shardPath.resolveIndex();
  final Path translogPath = shardPath.resolveTranslog();
  final List<IndexCommit> commits;
  try {
    commits = DirectoryReader.listCommits(indexDirectory);
  } catch (IndexNotFoundException infe) {
    throw new ElasticsearchException("unable to find a valid shard at [" + indexPath + "]", infe);
  }
  // Retrieve the generation and UUID from the existing data
  final Map<String, String> commitData = new HashMap<>(commits.get(commits.size() - 1).getUserData());
  final String translogUUID = commitData.get(Translog.TRANSLOG_UUID_KEY);
  if (translogUUID == null) {
    throw new ElasticsearchException("shard must have a valid translog UUID but got: [null]");
  }
  final boolean clean = isTranslogClean(shardPath, translogUUID);
  if (clean) {
    return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CLEAN, null);
  }
  // Hold the lock open for the duration of the tool running
  Set<Path> translogFiles;
  try {
    translogFiles = filesInDirectory(translogPath);
  } catch (IOException e) {
    throw new ElasticsearchException("failed to find existing translog files", e);
  }
  final String details = deletingFilesDetails(translogPath, translogFiles);
  return Tuple.tuple(RemoveCorruptedShardDataCommand.CleanStatus.CORRUPTED, details);
}

代码示例来源:origin: org.elasticsearch/elasticsearch

public FsInfo stats(FsInfo previous, @Nullable ClusterInfo clusterInfo) throws IOException {
  if (!nodeEnv.hasNodeFile()) {
    return new FsInfo(System.currentTimeMillis(), null, new FsInfo.Path[0]);
  }
  NodePath[] dataLocations = nodeEnv.nodePaths();
  FsInfo.Path[] paths = new FsInfo.Path[dataLocations.length];
  for (int i = 0; i < dataLocations.length; i++) {
    paths[i] = getFSInfo(dataLocations[i]);
  }
  FsInfo.IoStats ioStats = null;
  if (Constants.LINUX) {
    Set<Tuple<Integer, Integer>> devicesNumbers = new HashSet<>();
    for (int i = 0; i < dataLocations.length; i++) {
      if (dataLocations[i].majorDeviceNumber != -1 && dataLocations[i].minorDeviceNumber != -1) {
        devicesNumbers.add(Tuple.tuple(dataLocations[i].majorDeviceNumber, dataLocations[i].minorDeviceNumber));
      }
    }
    ioStats = ioStats(devicesNumbers, previous);
  }
  DiskUsage leastDiskEstimate = null;
  DiskUsage mostDiskEstimate = null;
  if (clusterInfo != null) {
    leastDiskEstimate = clusterInfo.getNodeLeastAvailableDiskUsages().get(nodeEnv.nodeId());
    mostDiskEstimate = clusterInfo.getNodeMostAvailableDiskUsages().get(nodeEnv.nodeId());
  }
  return new FsInfo(System.currentTimeMillis(), ioStats, paths, leastDiskEstimate, mostDiskEstimate);
}

代码示例来源:origin: org.elasticsearch/elasticsearch

madeDecision = decision;
    } else {
      return Tuple.tuple(decision, nodeDecisions);
return Tuple.tuple(madeDecision, nodeDecisions);

代码示例来源:origin: org.elasticsearch/elasticsearch

/**
 * Relocate a shard to another node, adding the target initializing
 * shard as well as assigning it.
 *
 * @return pair of source relocating and target initializing shards.
 */
public Tuple<ShardRouting,ShardRouting> relocateShard(ShardRouting startedShard, String nodeId, long expectedShardSize,
                           RoutingChangesObserver changes) {
  ensureMutable();
  relocatingShards++;
  ShardRouting source = startedShard.relocate(nodeId, expectedShardSize);
  ShardRouting target = source.getTargetRelocatingShard();
  updateAssigned(startedShard, source);
  node(target.currentNodeId()).add(target);
  assignedShardsAdd(target);
  addRecovery(target);
  changes.relocationStarted(startedShard, target);
  return Tuple.tuple(source, target);
}

代码示例来源:origin: org.elasticsearch/elasticsearch

nodeExplanationMap.put(node.getNodeId(),
  new NodeAllocationResult(node.getRoutingNode().node(), currentDecision, 0));
nodeWeights.add(Tuple.tuple(node.getNodeId(), currentWeight));

代码示例来源:origin: org.elasticsearch/elasticsearch

Tuple<ModelNode, Decision> nodeResult = Tuple.tuple(node, canAllocate);
if (rebalanceConditionsMet) {
  betterBalanceNodes.add(nodeResult);

代码示例来源:origin: com.strapdata.elasticsearch.test/framework

@Override
public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options)
  throws IOException, TransportException {
  requests.put(requestId, Tuple.tuple(node, action));
  capturedRequests.add(new CapturedRequest(node, requestId, action, request));
}

代码示例来源:origin: org.apache.servicemix.bundles/org.apache.servicemix.bundles.elasticsearch

/**
 * Returns an array of all registered pairs of handle IDs and exception classes. These pairs are
 * provided for every registered exception.
 *
 * @return an array of all registered pairs of handle IDs and exception classes
 */
static Tuple<Integer, Class<? extends ElasticsearchException>>[] classes() {
  @SuppressWarnings("unchecked")
  final Tuple<Integer, Class<? extends ElasticsearchException>>[] ts =
      Arrays.stream(ElasticsearchExceptionHandle.values())
          .map(h -> Tuple.tuple(h.id, h.exceptionClass)).toArray(Tuple[]::new);
  return ts;
}

代码示例来源:origin: com.strapdata.elasticsearch/elasticsearch

/**
 * Returns an array of all registered pairs of handle IDs and exception classes. These pairs are
 * provided for every registered exception.
 *
 * @return an array of all registered pairs of handle IDs and exception classes
 */
static Tuple<Integer, Class<? extends ElasticsearchException>>[] classes() {
  @SuppressWarnings("unchecked")
  final Tuple<Integer, Class<? extends ElasticsearchException>>[] ts =
      Arrays.stream(ElasticsearchExceptionHandle.values())
          .map(h -> Tuple.tuple(h.id, h.exceptionClass)).toArray(Tuple[]::new);
  return ts;
}

代码示例来源:origin: org.elasticsearch/elasticsearch

terminal.println("");
} else {
  indexCleanStatus = Tuple.tuple(CleanStatus.CLEAN, null);
  terminal.println("");
} else {
  translogCleanStatus = Tuple.tuple(CleanStatus.UNRECOVERABLE, null);

代码示例来源:origin: com.strapdata.elasticsearch.test/framework

public static Tuple<String, Object> parseTuple(XContentParser parser) throws IOException {
  parser.nextToken();
  advanceToFieldName(parser);
  Map<String,Object> map = parser.map();
  assert parser.currentToken() == XContentParser.Token.END_OBJECT;
  parser.nextToken();
  if (map.size() != 1) {
    throw new IllegalArgumentException("expected key value pair but found an object with " + map.size() + " fields");
  }
  Map.Entry<String, Object> entry = map.entrySet().iterator().next();
  return Tuple.tuple(entry.getKey(), entry.getValue());
}

相关文章

微信公众号

最新文章

更多