本文整理了Java中scala.collection.JavaConversions.asJavaIterable()
方法的一些代码示例,展示了JavaConversions.asJavaIterable()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。JavaConversions.asJavaIterable()
方法的具体详情如下:
包路径:scala.collection.JavaConversions
类名称:JavaConversions
方法名:asJavaIterable
暂无
代码示例来源:origin: neo4j/neo4j
@Override
public Iterable<Notification> getNotifications()
{
return JavaConversions.asJavaIterable( inner.notifications() );
}
代码示例来源:origin: Graylog2/graylog2-server
public Iterable<LogSegment> getSegments() {
return JavaConversions.asJavaIterable(kafkaLog.logSegments());
}
代码示例来源:origin: Graylog2/graylog2-server
/**
* Returns the first valid offset in the entire journal.
*
* @return first offset
*/
public long getLogStartOffset() {
final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments());
final LogSegment segment = Iterables.getFirst(logSegments, null);
if (segment == null) {
return 0;
}
return segment.baseOffset();
}
代码示例来源:origin: neo4j/neo4j
public void toString( PrintWriter writer )
{
inner.dumpToString( writer ); // legacy method - don't convert exceptions...
for ( Notification notification : JavaConversions.asJavaIterable( inner.notifications() ) )
{
writer.println( notification.getDescription() );
}
}
代码示例来源:origin: OryxProject/oryx
@Override
public Iterable<U> buildUpdates(JavaPairRDD<K, M> newData) {
return JavaConversions.asJavaIterable(scalaManager.buildUpdates(newData.rdd()));
}
代码示例来源:origin: Graylog2/graylog2-server
@Override
public Integer call() throws Exception {
loggerForCleaner.debug("Beginning log cleanup");
int total = 0;
final Timer.Context ctx = new Timer().time();
for (final Log kafkaLog : JavaConversions.asJavaIterable(logManager.allLogs())) {
if (kafkaLog.config().compact()) continue;
loggerForCleaner.debug("Garbage collecting {}", kafkaLog.name());
total += cleanupExpiredSegments(kafkaLog) +
cleanupSegmentsToMaintainSize(kafkaLog) +
cleanupSegmentsToRemoveCommitted(kafkaLog);
}
loggerForCleaner.debug("Log cleanup completed. {} files deleted in {} seconds",
total,
NANOSECONDS.toSeconds(ctx.stop()));
return total;
}
代码示例来源:origin: Graylog2/graylog2-server
private int cleanupSegmentsToRemoveCommitted(Log kafkaLog) {
if (kafkaLog.numberOfSegments() <= 1) {
loggerForCleaner.debug(
"[cleanup-committed] The journal is already minimal at {} segment(s), not trying to remove more segments.",
kafkaLog.numberOfSegments());
return 0;
}
// we need to iterate through all segments to the find the cutoff point for the committed offset.
// unfortunately finding the largest offset contained in a segment is expensive (it involves reading the entire file)
// so we have to get a global view.
final long committedOffset = KafkaJournal.this.committedOffset.get();
final HashSet<LogSegment> logSegments = Sets.newHashSet(
JavaConversions.asJavaIterable(kafkaLog.logSegments(committedOffset, Long.MAX_VALUE))
);
loggerForCleaner.debug("[cleanup-committed] Keeping segments {}", logSegments);
return kafkaLog.deleteOldSegments(new AbstractFunction1<LogSegment, Object>() {
@Override
public Object apply(LogSegment segment) {
final boolean shouldDelete = !logSegments.contains(segment);
if (shouldDelete) {
loggerForCleaner.debug(
"[cleanup-committed] Should delete segment {} because it is prior to committed offset {}",
segment,
committedOffset);
}
return shouldDelete;
}
});
}
}
代码示例来源:origin: com.typesafe.play/play_2.10
@Inject
public Langs(play.api.i18n.Langs langs) {
this.langs = langs;
List<Lang> availables = new ArrayList<Lang>();
for (play.api.i18n.Lang lang : JavaConversions.asJavaIterable(langs.availables())) {
availables.add(new Lang(lang));
}
this.availables = Collections.unmodifiableList(availables);
}
代码示例来源:origin: apache/tinkerpop
public static void refresh() {
if (null == CONTEXT)
throw new IllegalStateException("The Spark context has not been created.");
if (CONTEXT.isStopped())
recreateStopped();
final Set<String> keepNames = new HashSet<>();
for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) {
if (null != rdd.name()) {
keepNames.add(rdd.name());
NAME_TO_RDD.put(rdd.name(), rdd);
}
}
// remove all stale names in the NAME_TO_RDD map
NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove);
}
代码示例来源:origin: org.neo4j/neo4j-cypher
@Override
public Iterable<Notification> getNotifications()
{
return JavaConversions.asJavaIterable( inner.notifications() );
}
代码示例来源:origin: apache/crunch
@Override
protected Iterator<Object> getArrayElements(Object array) {
if (array instanceof scala.collection.Iterable) {
return JavaConversions.asJavaIterable((scala.collection.Iterable) array).iterator();
}
return (Iterator<Object>) super.getArrayElements(array);
}
代码示例来源:origin: org.graylog2/graylog2-server
public Iterable<LogSegment> getSegments() {
return JavaConversions.asJavaIterable(kafkaLog.logSegments());
}
代码示例来源:origin: org.graylog2/graylog2-shared
public Iterable<LogSegment> getSegments() {
return JavaConversions.asJavaIterable(kafkaLog.logSegments());
}
代码示例来源:origin: org.graylog2/graylog2-shared
/**
* Returns the first valid offset in the entire journal.
*
* @return first offset
*/
public long getLogStartOffset() {
final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments());
final LogSegment segment = Iterables.getFirst(logSegments, null);
if (segment == null) {
return 0;
}
return segment.baseOffset();
}
代码示例来源:origin: org.graylog2/graylog2-server
/**
* Returns the first valid offset in the entire journal.
*
* @return first offset
*/
public long getLogStartOffset() {
final Iterable<LogSegment> logSegments = JavaConversions.asJavaIterable(kafkaLog.logSegments());
final LogSegment segment = Iterables.getFirst(logSegments, null);
if (segment == null) {
return 0;
}
return segment.baseOffset();
}
代码示例来源:origin: org.apache.apex/malhar-contrib
/**
* There is always only one string in zkHost
* @param zkHost
* @return
*/
public static Set<String> getBrokers(Set<String> zkHost){
ZkClient zkclient = new ZkClient(zkHost.iterator().next(), 30000, 30000, ZKStringSerializer$.MODULE$);
Set<String> brokerHosts = new HashSet<String>();
for (Broker b : JavaConversions.asJavaIterable(ZkUtils.getAllBrokersInCluster(zkclient))) {
brokerHosts.add(b.connectionString());
}
zkclient.close();
return brokerHosts;
}
代码示例来源:origin: com.cloudera.oryx/oryx-lambda
@Override
public Iterable<U> buildUpdates(JavaPairRDD<K, M> newData) {
return JavaConversions.asJavaIterable(scalaManager.buildUpdates(newData.rdd()));
}
代码示例来源:origin: org.apache.tinkerpop/spark-gremlin
public static void refresh() {
if (null == CONTEXT)
throw new IllegalStateException("The Spark context has not been created.");
if (CONTEXT.isStopped())
recreateStopped();
final Set<String> keepNames = new HashSet<>();
for (final RDD<?> rdd : JavaConversions.asJavaIterable(CONTEXT.persistentRdds().values())) {
if (null != rdd.name()) {
keepNames.add(rdd.name());
NAME_TO_RDD.put(rdd.name(), rdd);
}
}
// remove all stale names in the NAME_TO_RDD map
NAME_TO_RDD.keySet().stream().filter(key -> !keepNames.contains(key)).collect(Collectors.toList()).forEach(NAME_TO_RDD::remove);
}
代码示例来源:origin: org.neo4j/neo4j-cypher
public void toString( PrintWriter writer )
{
inner.dumpToString( writer ); // legacy method - don't convert exceptions...
for ( Notification notification : JavaConversions.asJavaIterable( inner.notifications() ) )
{
writer.println( notification.getDescription() );
}
}
代码示例来源:origin: keeps/roda
@Override
public String persistenceId() {
for (String role : JavaConversions.asJavaIterable((Cluster.get(getContext().system()).selfRoles()))) {
if (role.startsWith("backend-")) {
return role + "-master";
}
}
return "master";
}
内容来源于网络,如有侵权,请联系作者删除!