scala.collection.Seq.contains()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(6.3k)|赞(0)|评价(0)|浏览(97)

本文整理了Java中scala.collection.Seq.contains()方法的一些代码示例,展示了Seq.contains()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Seq.contains()方法的具体详情如下:
包路径:scala.collection.Seq
类名称:Seq
方法名:contains

Seq.contains介绍

暂无

代码示例

代码示例来源:origin: linkedin/cruise-control

private void executeAndVerifyProposals(ZkUtils zkUtils,
                    Collection<ExecutionProposal> proposalsToExecute,
                    Collection<ExecutionProposal> proposalsToCheck) {
 KafkaCruiseControlConfig configs = new KafkaCruiseControlConfig(getExecutorProperties());
 Executor executor = new Executor(configs, new SystemTime(), new MetricRegistry(), 86400000L, 43200000L);
 executor.setExecutionMode(false);
 executor.executeProposals(proposalsToExecute, Collections.emptySet(), null, EasyMock.mock(LoadMonitor.class), null, null, null);
 Map<TopicPartition, Integer> replicationFactors = new HashMap<>();
 for (ExecutionProposal proposal : proposalsToCheck) {
  int replicationFactor = zkUtils.getReplicasForPartition(proposal.topic(), proposal.partitionId()).size();
  replicationFactors.put(new TopicPartition(proposal.topic(), proposal.partitionId()), replicationFactor);
 }
 waitUntilExecutionFinishes(executor);
 for (ExecutionProposal proposal : proposalsToCheck) {
  TopicPartition tp = new TopicPartition(proposal.topic(), proposal.partitionId());
  int expectedReplicationFactor = replicationFactors.get(tp);
  assertEquals("Replication factor for partition " + tp + " should be " + expectedReplicationFactor,
         expectedReplicationFactor, zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).size());
  if (proposal.hasReplicaAction()) {
   for (int brokerId : proposal.newReplicas()) {
    assertTrue("The partition should have moved for " + tp,
          zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).contains(brokerId));
   }
  }
  assertEquals("The leader should have moved for " + tp,
         proposal.newLeader(), zkUtils.getLeaderForPartition(tp.topic(), tp.partition()).get());
 }
}

代码示例来源:origin: org.apache.spark/spark-sql_2.11

@Test
public void testFrequentItems() {
 Dataset<Row> df = spark.table("testData2");
 String[] cols = {"a"};
 Dataset<Row> results = df.stat().freqItems(cols, 0.2);
 Assert.assertTrue(results.collectAsList().get(0).getSeq(0).contains(1));
}

代码示例来源:origin: org.apache.spark/spark-sql_2.10

@Test
public void testFrequentItems() {
 Dataset<Row> df = spark.table("testData2");
 String[] cols = {"a"};
 Dataset<Row> results = df.stat().freqItems(cols, 0.2);
 Assert.assertTrue(results.collectAsList().get(0).getSeq(0).contains(1));
}

代码示例来源:origin: org.apache.spark/spark-sql

@Test
public void testFrequentItems() {
 Dataset<Row> df = spark.table("testData2");
 String[] cols = {"a"};
 Dataset<Row> results = df.stat().freqItems(cols, 0.2);
 Assert.assertTrue(results.collectAsList().get(0).getSeq(0).contains(1));
}

代码示例来源:origin: apache/samza

/**
 * Filter out properties from the original config that are not supported by Kafka.
 * For example, we allow users to set replication.factor as a property of the streams
 * and then parse it out so we can pass it separately as Kafka requires. But Kafka
 * will also throw if replication.factor is passed as a property on a new topic.
 *
 * @param originalConfig  The original config to filter
 * @return                The filtered config
 */
private static Map<String, String> filterUnsupportedProperties(Map<String, String> originalConfig) {
 Map<String, String> filteredConfig = new HashMap<>();
 for (Map.Entry<String, String> entry: originalConfig.entrySet()) {
  // Kafka requires replication factor, but not as a property, so we have to filter it out.
  if (!KafkaConfig.TOPIC_REPLICATION_FACTOR().equals(entry.getKey())) {
   if (LogConfig.configNames().contains(entry.getKey())) {
    filteredConfig.put(entry.getKey(), entry.getValue());
   } else {
    LOG.warn("Property '{}' is not a valid Kafka topic config. It will be ignored.", entry.getKey());
   }
  }
 }
 return filteredConfig;
}

代码示例来源:origin: org.apache.samza/samza-kafka

/**
 * Filter out properties from the original config that are not supported by Kafka.
 * For example, we allow users to set replication.factor as a property of the streams
 * and then parse it out so we can pass it separately as Kafka requires. But Kafka
 * will also throw if replication.factor is passed as a property on a new topic.
 *
 * @param originalConfig  The original config to filter
 * @return                The filtered config
 */
private static Map<String, String> filterUnsupportedProperties(Map<String, String> originalConfig) {
 Map<String, String> filteredConfig = new HashMap<>();
 for (Map.Entry<String, String> entry: originalConfig.entrySet()) {
  // Kafka requires replication factor, but not as a property, so we have to filter it out.
  if (!KafkaConfig.TOPIC_REPLICATION_FACTOR().equals(entry.getKey())) {
   if (LogConfig.configNames().contains(entry.getKey())) {
    filteredConfig.put(entry.getKey(), entry.getValue());
   } else {
    LOG.warn("Property '{}' is not a valid Kafka topic config. It will be ignored.", entry.getKey());
   }
  }
 }
 return filteredConfig;
}

代码示例来源:origin: org.apache.samza/samza-kafka_2.11

/**
 * Filter out properties from the original config that are not supported by Kafka.
 * For example, we allow users to set replication.factor as a property of the streams
 * and then parse it out so we can pass it separately as Kafka requires. But Kafka
 * will also throw if replication.factor is passed as a property on a new topic.
 *
 * @param originalConfig  The original config to filter
 * @return                The filtered config
 */
private static Map<String, String> filterUnsupportedProperties(Map<String, String> originalConfig) {
 Map<String, String> filteredConfig = new HashMap<>();
 for (Map.Entry<String, String> entry: originalConfig.entrySet()) {
  // Kafka requires replication factor, but not as a property, so we have to filter it out.
  if (!KafkaConfig.TOPIC_REPLICATION_FACTOR().equals(entry.getKey())) {
   if (LogConfig.configNames().contains(entry.getKey())) {
    filteredConfig.put(entry.getKey(), entry.getValue());
   } else {
    LOG.warn("Property '{}' is not a valid Kafka topic config. It will be ignored.", entry.getKey());
   }
  }
 }
 return filteredConfig;
}

代码示例来源:origin: kframework/k

private void convert(Sort sort, Production prod) {
  convert(sort, prod.klabel().isDefined() && prod.klabel().get().params().contains(sort));
}

代码示例来源:origin: kframework/k

private static void checkCircularModuleImports(Module mainModule, scala.collection.Seq<Module> visitedModules) {
  if (visitedModules.contains(mainModule)) {
    String msg = "Found circularity in module imports: ";
    for (Module m : mutable(visitedModules)) { // JavaConversions.seqAsJavaList(visitedModules)
      msg += m.getName() + " < ";
    }
    msg += visitedModules.head().getName();
    throw KEMException.compilerError(msg);
  }
}

相关文章