本文整理了Java中scala.collection.Seq
类的一些代码示例,展示了Seq
类的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Seq
类的具体详情如下:
包路径:scala.collection.Seq
类名称:Seq
暂无
代码示例来源:origin: apache/hive
@Override
public synchronized void onJobStart(SparkListenerJobStart jobStart) {
int jobId = jobStart.jobId();
int size = jobStart.stageIds().size();
int[] intStageIds = new int[size];
for (int i = 0; i < size; i++) {
Integer stageId = (Integer) jobStart.stageIds().apply(i);
intStageIds[i] = stageId;
stageIdToJobId.put(stageId, jobId);
}
jobIdToStageId.put(jobId, intStageIds);
}
代码示例来源:origin: apache/hive
@Override
public void onJobStart(SparkListenerJobStart jobStart) {
synchronized (stageToJobId) {
for (int i = 0; i < jobStart.stageIds().length(); i++) {
stageToJobId.put((Integer) jobStart.stageIds().apply(i), jobStart.jobId());
}
}
}
代码示例来源:origin: apache/ignite
/**
* Util method that checks that output schema contains only double types and returns list of field names.
*
* @param mdl Pipeline model.
* @return List of field names.
*/
private List<String> checkAndGetInputSchema(PipelineModel mdl) {
Transformer firstTransformer = mdl.transformers().head();
StructType inputSchema = firstTransformer.inputSchema();
List<StructField> input = new ArrayList<>(JavaConverters.seqAsJavaListConverter(inputSchema.fields()).asJava());
List<String> schema = new ArrayList<>();
for (StructField field : input) {
String fieldName = field.name();
schema.add(field.name());
if (!ScalarType.Double().base().equals(field.dataType().base()))
throw new IllegalArgumentException("Parser supports only double types [name=" +
fieldName + ",type=" + field.dataType() + "]");
}
return schema;
}
}
代码示例来源:origin: com.typesafe.play/play_2.10
/**
* Converts a Scala List to an Array.
*/
public static <T> T[] asArray(Class<T> clazz, scala.collection.Seq<T> scalaList) {
T[] arr = (T[]) Array.newInstance(clazz, scalaList.length());
scalaList.copyToArray(arr);
return arr;
}
代码示例来源:origin: linkedin/cruise-control
brokerSampleRetentionMs = Math.max(_minBrokerSampleStoreTopicRetentionTimeMs, brokerSampleRetentionMs);
int numberOfBrokersInCluster = zkUtils.getAllBrokersInCluster().size();
if (numberOfBrokersInCluster <= 1) {
throw new IllegalStateException(
代码示例来源:origin: pinterest/doctorkafka
public static String getBrokers(String zkUrl, SecurityProtocol securityProtocol) {
ZkUtils zkUtils = getZkUtils(zkUrl);
Seq<Broker> brokersSeq = zkUtils.getAllBrokersInCluster();
Broker[] brokers = new Broker[brokersSeq.size()];
brokersSeq.copyToArray(brokers);
String brokersStr = Arrays.stream(brokers)
.map(b -> b.brokerEndPoint(
ListenerName.forSecurityProtocol(securityProtocol)).connectionString())
.reduce(null, (a, b) -> (a == null) ? b : a + "," + b);
return brokersStr;
}
代码示例来源:origin: kframework/k
for (Sentence s : x.getValue()) {
Production p = (Production) s;
if (p.items().size() == 3) {
Terminal t = (Terminal) p.items().tail().head();
ul.separator = t.value();
ul.klabel = p.klabel().get();
ul.childSort = ((NonTerminal) p.items().head()).sort();
ul.pList = p;
} else if (p.items().size() == 1 && p.items().head() instanceof Terminal) {
ul.terminatorKLabel = p.klabel().get();
ul.pTerminator = p;
代码示例来源:origin: linkedin/cruise-control
private void executeAndVerifyProposals(ZkUtils zkUtils,
Collection<ExecutionProposal> proposalsToExecute,
Collection<ExecutionProposal> proposalsToCheck) {
KafkaCruiseControlConfig configs = new KafkaCruiseControlConfig(getExecutorProperties());
Executor executor = new Executor(configs, new SystemTime(), new MetricRegistry(), 86400000L, 43200000L);
executor.setExecutionMode(false);
executor.executeProposals(proposalsToExecute, Collections.emptySet(), null, EasyMock.mock(LoadMonitor.class), null, null, null);
Map<TopicPartition, Integer> replicationFactors = new HashMap<>();
for (ExecutionProposal proposal : proposalsToCheck) {
int replicationFactor = zkUtils.getReplicasForPartition(proposal.topic(), proposal.partitionId()).size();
replicationFactors.put(new TopicPartition(proposal.topic(), proposal.partitionId()), replicationFactor);
}
waitUntilExecutionFinishes(executor);
for (ExecutionProposal proposal : proposalsToCheck) {
TopicPartition tp = new TopicPartition(proposal.topic(), proposal.partitionId());
int expectedReplicationFactor = replicationFactors.get(tp);
assertEquals("Replication factor for partition " + tp + " should be " + expectedReplicationFactor,
expectedReplicationFactor, zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).size());
if (proposal.hasReplicaAction()) {
for (int brokerId : proposal.newReplicas()) {
assertTrue("The partition should have moved for " + tp,
zkUtils.getReplicasForPartition(tp.topic(), tp.partition()).contains(brokerId));
}
}
assertEquals("The leader should have moved for " + tp,
proposal.newLeader(), zkUtils.getLeaderForPartition(tp.topic(), tp.partition()).get());
}
}
代码示例来源:origin: apache/incubator-pinot
public static void stopServer(KafkaServerStartable serverStartable) {
serverStartable.shutdown();
FileUtils.deleteQuietly(new File(serverStartable.serverConfig().logDirs().apply(0)));
}
代码示例来源:origin: com.datastax.spark/spark-cassandra-connector_2.10
/**
* Returns the names of columns to be selected from the table.
*/
@SuppressWarnings("RedundantCast")
public String[] selectedColumnNames() {
ClassTag<String> classTag = getClassTag(String.class);
return (String[]) rdd().selectedColumnNames().toArray(classTag);
}
代码示例来源:origin: org.apache.spark/spark-sql_2.11
@Test
public void testFrequentItems() {
Dataset<Row> df = spark.table("testData2");
String[] cols = {"a"};
Dataset<Row> results = df.stat().freqItems(cols, 0.2);
Assert.assertTrue(results.collectAsList().get(0).getSeq(0).contains(1));
}
代码示例来源:origin: open-korean-text/elasticsearch-analysis-openkoreantext
@Override
protected Seq<KoreanToken> perform(Seq<KoreanToken> tokens) {
KoreanToken[] performed = new KoreanToken[tokens.length()];
int i = 0;
Iterator<KoreanToken> tokenIterator = tokens.iterator();
while (tokenIterator.hasNext()) {
KoreanToken token = tokenIterator.next();
performed[i++] = token.stem().nonEmpty() ? stem(token) : token;
}
return JavaConverters.asScalaBuffer(Arrays.asList(performed)).toSeq();
}
代码示例来源:origin: kframework/k
private static void checkCircularModuleImports(Module mainModule, scala.collection.Seq<Module> visitedModules) {
if (visitedModules.contains(mainModule)) {
String msg = "Found circularity in module imports: ";
for (Module m : mutable(visitedModules)) { // JavaConversions.seqAsJavaList(visitedModules)
msg += m.getName() + " < ";
}
msg += visitedModules.head().getName();
throw KEMException.compilerError(msg);
}
}
代码示例来源:origin: edu.cmu.ml.rtw/matt-util
public static <V> List<V> scalaToJavaList(scala.collection.Seq<V> scalaList) {
List<V> javaList = Lists.newArrayList();
scala.collection.Iterator<V> iterator = scalaList.iterator();
while (iterator.hasNext()) {
javaList.add(iterator.next());
}
return javaList;
}
}
代码示例来源:origin: io.zipkin.finagle2/zipkin-finagle
@Override public void incrementMessagesDropped(Throwable cause) {
if (cause instanceof FinagleSender.WrappedException) cause = cause.getCause();
Seq<Traversable<String>> paths = Throwables.mkString(cause).inits().toSeq();
for (Iterator<Traversable<String>> i = paths.iterator(); i.hasNext();) {
messagesDropped.counter(i.next().toSeq()).incr();
}
}
代码示例来源:origin: twitter/GraphJet
@Override
public int skip(int n) {
if (index + n < seq.length()) {
index += n;
return n;
}
int skipped = seq.length() - index;
index = seq.length();
return skipped;
}
}
代码示例来源:origin: kframework/k
Production p = (Production) s;
assert p.items().head() instanceof Terminal || p.items().head() instanceof RegexTerminal;
assert p.items().last() instanceof Terminal || p.items().last() instanceof RegexTerminal;
final ProductionItem body;
if (cfgInfo.isLeafCell(p.sort())) {
body = p.items().tail().head();
} else {
body = NonTerminal(Sorts.Bag());
Seq<ProductionItem> pi = Seq(p.items().head(), optDots, body, optDots, p.items().last());
Production p1 = Production(p.klabel().get(), p.sort(), pi, p.att());
Production p2 = Production(Sorts.Cell(), Seq(NonTerminal(p.sort())));
代码示例来源:origin: dcaoyuan/nbscala
@Override
public void run(ResultIterator resultIterator) throws Exception {
ScalaRootScope rootScope = ((ScalaParserResult) resultIterator.getParserResult()).rootScope();
if (rootScope == null) {
return;
}
rootScope.visibleDfns(ElementKind.CLASS);
scala.collection.Seq<AstDfn> tmpls = rootScope.visibleDfns(ElementKind.CLASS);
if (!tmpls.isEmpty()) {
scala.collection.Iterator itr = tmpls.iterator();
while (itr.hasNext()) {
AstDfn tmpl = (AstDfn) itr.next();
if (classes[0].length() > 0) {
classes[0] = classes[0] + " "; // NOI18N
}
classes[0] = classes[0] + tmpl.getName().toString().replace('.', '/') + "*.class"; // NOI18N
}
}
}
});
代码示例来源:origin: linkedin/kafka-monitor
/**
* @param zkUrl zookeeper connection url
* @return number of brokers in this cluster
*/
public static int getBrokerCount(String zkUrl) {
ZkUtils zkUtils = ZkUtils.apply(zkUrl, ZK_SESSION_TIMEOUT_MS, ZK_CONNECTION_TIMEOUT_MS, JaasUtils.isZkSecurityEnabled());
try {
return zkUtils.getAllBrokersInCluster().size();
} finally {
zkUtils.close();
}
}
代码示例来源:origin: com.github.pinterest/kafkastats
public static String getBrokers(String zkUrl, SecurityProtocol securityProtocol) {
ZkUtils zkUtils = getZkUtils(zkUrl);
Seq<Broker> brokersSeq = zkUtils.getAllBrokersInCluster();
Broker[] brokers = new Broker[brokersSeq.size()];
brokersSeq.copyToArray(brokers);
String brokersStr = Arrays.stream(brokers)
.map(b -> b.brokerEndPoint(
ListenerName.forSecurityProtocol(securityProtocol)).connectionString())
.reduce(null, (a, b) -> (a == null) ? b : a + "," + b);
return brokersStr;
}
内容来源于网络,如有侵权,请联系作者删除!