org.apache.flink.api.java.operators.MapOperator.reduce()方法的使用及代码示例

x33g5p2x  于2022-01-25 转载在 其他  
字(7.2k)|赞(0)|评价(0)|浏览(161)

本文整理了Java中org.apache.flink.api.java.operators.MapOperator.reduce()方法的一些代码示例,展示了MapOperator.reduce()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。MapOperator.reduce()方法的具体详情如下:
包路径:org.apache.flink.api.java.operators.MapOperator
类名称:MapOperator
方法名:reduce

MapOperator.reduce介绍

暂无

代码示例

代码示例来源:origin: apache/flink

public static void main(String[] args) throws Exception {
  final long numSamples = args.length > 0 ? Long.parseLong(args[0]) : 1000000;
  final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  // count how many of the samples would randomly fall into
  // the unit circle
  DataSet<Long> count =
      env.generateSequence(1, numSamples)
      .map(new Sampler())
      .reduce(new SumReducer());
  long theCount = count.collect().get(0);
  System.out.println("We estimate Pi to be: " + (theCount * 4.0 / numSamples));
}

代码示例来源:origin: apache/flink

/**
 * Count the number of elements in a DataSet.
 *
 * @param input DataSet of elements to be counted
 * @param <T> element type
 * @return count
 */
public static <T> DataSet<LongValue> count(DataSet<T> input) {
  return input
    .map(new MapTo<>(new LongValue(1)))
      .returns(LONG_VALUE_TYPE_INFO)
      .name("Emit 1")
    .reduce(new AddLongValue())
      .name("Sum");
}

代码示例来源:origin: apache/flink

@Override
protected void testProgram() throws Exception {
  ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  env.setParallelism(4);
  DataSet<Long> input = env.generateSequence(1, 10);
  DataSet<Long> bc1 = env.generateSequence(1, 5);
  DataSet<Long> bc2 = env.generateSequence(6, 10);
  List<Long> result = input
      .map(new Mapper())
      .withBroadcastSet(bc1.union(bc2), BC_NAME)
      .reduce(new Reducer())
      .collect();
  Assert.assertEquals(Long.valueOf(3025), result.get(0));
}

代码示例来源:origin: apache/flink

.reduce(new UpdateAccumulator())

代码示例来源:origin: apache/flink

.rebalance()
.map(new FailingMapper2<Long>())
.reduce(new ReduceFunction<Long>() {
  @Override
  public Long reduce(Long value1, Long value2) {

代码示例来源:origin: apache/flink

.rebalance()
.map(new FailingMapper3<Long>())
.reduce(new ReduceFunction<Long>() {
  @Override
  public Long reduce(Long value1, Long value2) {

代码示例来源:origin: apache/flink

.reduce(new ReduceFunction<Long>() {
  @Override
  public Long reduce(Long value1, Long value2) {

代码示例来源:origin: apache/flink

.reduce(new SelectOneReducer<Long>());
DataSet<Long> bcInput2 = env.generateSequence(1, 10);

代码示例来源:origin: apache/flink

.reduce(new ReduceFunction<Long>() {
  @Override
  public Long reduce(Long value1, Long value2) {

代码示例来源:origin: apache/flink

.rebalance()
.map(new FailingMapper1<Long>())
.reduce(new ReduceFunction<Long>() {
  @Override
  public Long reduce(Long value1, Long value2) {
.rebalance()
.map(new FailingMapper1<Long>())
.reduce(new ReduceFunction<Long>() {
  @Override
  public Long reduce(Long value1, Long value2) {

代码示例来源:origin: apache/flink

.reduce(new SelectOneReducer<Long>());

代码示例来源:origin: apache/flink

.setParallelism(parallelism)
  .name("Square")
.reduce(new Sum())
  .setParallelism(parallelism)
  .name("Sum");
  .setParallelism(parallelism)
  .name("Square")
.reduce(new Sum())
  .setParallelism(parallelism)
  .name("Sum");

代码示例来源:origin: org.apache.flink/flink-examples-batch

public static void main(String[] args) throws Exception {
  final long numSamples = args.length > 0 ? Long.parseLong(args[0]) : 1000000;
  final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  // count how many of the samples would randomly fall into
  // the unit circle
  DataSet<Long> count =
      env.generateSequence(1, numSamples)
      .map(new Sampler())
      .reduce(new SumReducer());
  long theCount = count.collect().get(0);
  System.out.println("We estimate Pi to be: " + (theCount * 4.0 / numSamples));
}

代码示例来源:origin: org.apache.flink/flink-java-examples

public static void main(String[] args) throws Exception {
  final long numSamples = args.length > 0 ? Long.parseLong(args[0]) : 1000000;
  
  final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  
  // count how many of the samples would randomly fall into
  // the unit circle
  DataSet<Long> count = 
      env.generateSequence(1, numSamples)
      .map(new Sampler())
      .reduce(new SumReducer());
  long theCount = count.collect().get(0);
  System.out.println("We estimate Pi to be: " + (theCount * 4.0 / numSamples));
}

代码示例来源:origin: org.apache.flink/flink-examples-batch_2.10

public static void main(String[] args) throws Exception {
  final long numSamples = args.length > 0 ? Long.parseLong(args[0]) : 1000000;
  
  final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  
  // count how many of the samples would randomly fall into
  // the unit circle
  DataSet<Long> count = 
      env.generateSequence(1, numSamples)
      .map(new Sampler())
      .reduce(new SumReducer());
  long theCount = count.collect().get(0);
  System.out.println("We estimate Pi to be: " + (theCount * 4.0 / numSamples));
}

代码示例来源:origin: com.alibaba.blink/flink-examples-batch

public static void main(String[] args) throws Exception {
  final long numSamples = args.length > 0 ? Long.parseLong(args[0]) : 1000000;
  final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
  // count how many of the samples would randomly fall into
  // the unit circle
  DataSet<Long> count =
      env.generateSequence(1, numSamples)
      .map(new Sampler())
      .reduce(new SumReducer());
  long theCount = count.collect().get(0);
  System.out.println("We estimate Pi to be: " + (theCount * 4.0 / numSamples));
}

代码示例来源:origin: org.apache.flink/flink-gelly_2.11

/**
 * Count the number of elements in a DataSet.
 *
 * @param input DataSet of elements to be counted
 * @param <T> element type
 * @return count
 */
public static <T> DataSet<LongValue> count(DataSet<T> input) {
  return input
    .map(new MapTo<>(new LongValue(1)))
      .returns(LONG_VALUE_TYPE_INFO)
      .name("Emit 1")
    .reduce(new AddLongValue())
      .name("Sum");
}

代码示例来源:origin: org.apache.flink/flink-gelly_2.10

/**
 * Count the number of elements in a DataSet.
 *
 * @param input DataSet of elements to be counted
 * @param <T> element type
 * @return count
 */
public static <T> DataSet<LongValue> count(DataSet<T> input) {
  return input
    .map(new MapTo<T, LongValue>(new LongValue(1)))
      .returns(LONG_VALUE_TYPE_INFO)
      .name("Emit 1")
    .reduce(new AddLongValue())
      .name("Sum");
}

代码示例来源:origin: com.alibaba.blink/flink-gelly

/**
 * Count the number of elements in a DataSet.
 *
 * @param input DataSet of elements to be counted
 * @param <T> element type
 * @return count
 */
public static <T> DataSet<LongValue> count(DataSet<T> input) {
  return input
    .map(new MapTo<>(new LongValue(1)))
      .returns(LONG_VALUE_TYPE_INFO)
      .name("Emit 1")
    .reduce(new AddLongValue())
      .name("Sum");
}

代码示例来源:origin: amidst/toolbox

/**
 * {@inheritDoc}
 */
@Override
public double updateModel(DataFlink<DataInstance> dataUpdate) {
  try {
    Configuration config = new Configuration();
    config.setString(BN_NAME, this.dag.getName());
    config.setBytes(EFBN_NAME, Serialization.serializeObject(efBayesianNetwork));
    DataSet<DataInstance> dataset = dataUpdate.getDataSet();
    this.sumSS = dataset.map(new SufficientSatisticsMAP())
        .withParameters(config)
        .reduce(new SufficientSatisticsReduce())
        .collect().get(0);
    //Add the prior
    sumSS.sum(efBayesianNetwork.createInitSufficientStatistics());
    JobExecutionResult result = dataset.getExecutionEnvironment().getLastJobExecutionResult();
    numInstances = result.getAccumulatorResult(ParallelMaximumLikelihood.COUNTER_NAME+"_"+this.dag.getName());
    numInstances++;//Initial counts
  }catch(Exception ex){
    throw new UndeclaredThrowableException(ex);
  }
  return this.getLogMarginalProbability();
}

相关文章