backtype.storm.task.TopologyContext.getThisTargets()方法的使用及代码示例

x33g5p2x  于2022-01-30 转载在 其他  
字(12.3k)|赞(0)|评价(0)|浏览(104)

本文整理了Java中backtype.storm.task.TopologyContext.getThisTargets()方法的一些代码示例,展示了TopologyContext.getThisTargets()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。TopologyContext.getThisTargets()方法的具体详情如下:
包路径:backtype.storm.task.TopologyContext
类名称:TopologyContext
方法名:getThisTargets

TopologyContext.getThisTargets介绍

[英]Gets information about who is consuming the outputs of this component, and how.
[中]获取有关谁正在使用此组件的输出以及如何使用的信息。

代码示例

代码示例来源:origin: alibaba/jstorm

public Map<String, List<Integer>> getThisTargetComponentTasks() {
  Map<String, Map<String, Grouping>> outputGroupings = getThisTargets();
  Map<String, List<Integer>> ret = new HashMap<>();
  Set<String> targetComponents = new HashSet<>();
  for (Map.Entry<String, Map<String, Grouping>> entry : outputGroupings.entrySet()) {
    Map<String, Grouping> componentGrouping = entry.getValue();
    targetComponents.addAll(componentGrouping.keySet());
  }
  for (String component : targetComponents) {
    ret.put(component, getComponentTasks(component));
  }
  return ret;
}

代码示例来源:origin: alibaba/jstorm

@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
  _delegate.open(conf, context, new SpoutOutputCollector(new StreamOverrideCollector(collector)));
  _outputTasks = new ArrayList<>();
  for(String component: Utils.get(context.getThisTargets(),
                  _coordStream,
                  new HashMap<String, Grouping>()).keySet()) {
    _outputTasks.addAll(context.getComponentTasks(component));
  }
  _rand = new Random(Utils.secureRandomLong());
}

代码示例来源:origin: alibaba/jstorm

Map<String, Map<String, Grouping>> output_groupings = topology_context.getThisTargets();
for (Entry<String, Map<String, Grouping>> entry : output_groupings.entrySet()) {
  String stream_id = entry.getKey();

代码示例来源:origin: alibaba/mdrill

public static Map<String, Map<String, MkGrouper>> outbound_components(
    TopologyContext topology_context) {
  Map<String, Map<String, MkGrouper>> rr = new HashMap<String, Map<String, MkGrouper>>();
  // <Stream_id,<component,Grouping>>
  Map<String, Map<String, Grouping>> output_groupings = topology_context
      .getThisTargets();
  for (Entry<String, Map<String, Grouping>> entry : output_groupings
      .entrySet()) {
    Map<String, Grouping> component_grouping = entry.getValue();
    String stream_id = entry.getKey();
    Fields out_fields = topology_context.getThisOutputFields(stream_id);
    Map<String, MkGrouper> componentGrouper = new HashMap<String, MkGrouper>();
    for (Entry<String, Grouping> cg : component_grouping.entrySet()) {
      String component = cg.getKey();
      Grouping tgrouping = cg.getValue();
      int num_tasks = topology_context.getComponentTasks(component)
          .size();
      if (num_tasks > 0) {
        MkGrouper grouper = new MkGrouper(out_fields, tgrouping,
            num_tasks);
        componentGrouper.put(component, grouper);
      }
    }
    if (componentGrouper.size() > 0) {
      rr.put(stream_id, componentGrouper);
    }
  }
  return rr;
}

代码示例来源:origin: alibaba/mdrill

if(context!=null){
  Map<String, Map<String, Grouping>> targets = context.getThisTargets();
  for (Map<String, Grouping> e : targets.values()) {
    for (String componentId : e.keySet()) {

代码示例来源:origin: alibaba/jstorm

for (Map.Entry<String, Map<String, Grouping>> entry : this.getThisTargets().entrySet()) {
  Map stringTargetMap = new HashMap<>();
  for (Map.Entry<String, Grouping> innerEntry : entry.getValue().entrySet()) {

代码示例来源:origin: alibaba/jstorm

/**
 * get current task's output task list
 */
public static Set<Integer> worker_output_tasks(WorkerData workerData) {
  ContextMaker context_maker = workerData.getContextMaker();
  Set<Integer> taskIds = workerData.getTaskIds();
  StormTopology topology = workerData.getSysTopology();
  Set<Integer> rtn = new HashSet<>();
  for (Integer taskId : taskIds) {
    TopologyContext context = context_maker.makeTopologyContext(topology, taskId, null);
    // <StreamId, <ComponentId, Grouping>>
    Map<String, Map<String, Grouping>> targets = context.getThisTargets();
    for (Map<String, Grouping> e : targets.values()) {
      for (String componentId : e.keySet()) {
        List<Integer> tasks = context.getComponentTasks(componentId);
        rtn.addAll(tasks);
      }
    }
  }
  return rtn;
}

代码示例来源:origin: alibaba/jstorm

public void prepare(Map config, TopologyContext context, OutputCollector collector) {
  TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
  if (_delegate instanceof TimeoutCallback) {
    callback = new TimeoutItems();
  }
  _tracked = new TimeCacheMap<>(context.maxTopologyMessageTimeout(), callback);
  _collector = collector;
  _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
  for (String component : Utils.get(context.getThisTargets(), Constants.COORDINATED_STREAM_ID, new HashMap<String, Grouping>()).keySet()) {
    for (Integer task : context.getComponentTasks(component)) {
      _countOutTasks.add(task);
    }
  }
  if (!_sourceArgs.isEmpty()) {
    _numSourceReports = 0;
    for (Entry<String, SourceArgs> entry : _sourceArgs.entrySet()) {
      if (entry.getValue().singleCount) {
        _numSourceReports += 1;
      } else {
        _numSourceReports += context.getComponentTasks(entry.getKey()).size();
      }
    }
  }
}

代码示例来源:origin: alibaba/mdrill

public void prepare(Map config, TopologyContext context, OutputCollector collector) {
  TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
  if(_delegate instanceof TimeoutCallback) {
    callback = new TimeoutItems();
  }
  _tracked = new TimeCacheMap<Object, TrackingInfo>(context.maxTopologyMessageTimeout(config), callback);
  _collector = collector;
  _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
  for(String component: Utils.get(context.getThisTargets(),
                  Constants.COORDINATED_STREAM_ID,
                  new HashMap<String, Grouping>())
                  .keySet()) {
    for(Integer task: context.getComponentTasks(component)) {
      _countOutTasks.add(task);
    }
  }
  if(!_sourceArgs.isEmpty()) {
    _numSourceReports = 0;
    for(Entry<String, SourceArgs> entry: _sourceArgs.entrySet()) {
      if(entry.getValue().singleCount) {
        _numSourceReports+=1;
      } else {
        _numSourceReports+=context.getComponentTasks(entry.getKey()).size();
      }
    }
  }
}

代码示例来源:origin: alibaba/jstorm

for (String component : Utils.get(context.getThisTargets(),
    COORD_STREAM(batchGroup),
    new HashMap<String, Grouping>()).keySet()) {

代码示例来源:origin: com.alibaba.jstorm/jstorm-core

public Map<String, List<Integer>> getThisTargetComponentTasks() {
  Map<String, Map<String, Grouping>> outputGroupings = getThisTargets();
  Map<String, List<Integer>> ret = new HashMap<>();
  Set<String> targetComponents = new HashSet<>();
  for (Map.Entry<String, Map<String, Grouping>> entry : outputGroupings.entrySet()) {
    Map<String, Grouping> componentGrouping = entry.getValue();
    targetComponents.addAll(componentGrouping.keySet());
  }
  for (String component : targetComponents) {
    ret.put(component, getComponentTasks(component));
  }
  return ret;
}

代码示例来源:origin: com.alibaba.jstorm/jstorm-core

/**
 * get current task's output <Stream_id, <componentId, MkGrouper>>
 */
public static Map<String, Map<String, MkGrouper>> outbound_components(TopologyContext topology_context, WorkerData workerData) {
  Map<String, Map<String, MkGrouper>> rr = new HashMap<String, Map<String, MkGrouper>>();
  // <Stream_id,<component,Grouping>>
  Map<String, Map<String, Grouping>> output_groupings = topology_context.getThisTargets();
  for (Entry<String, Map<String, Grouping>> entry : output_groupings.entrySet()) {
    String stream_id = entry.getKey();
    Map<String, Grouping> component_grouping = entry.getValue();
    Fields out_fields = topology_context.getThisOutputFields(stream_id);
    Map<String, MkGrouper> componentGrouper = new HashMap<String, MkGrouper>();
    for (Entry<String, Grouping> cg : component_grouping.entrySet()) {
      String component = cg.getKey();
      Grouping tgrouping = cg.getValue();
      List<Integer> outTasks = topology_context.getComponentTasks(component);
      // ATTENTION: If topology set one component parallelism as 0
      // so we don't need send tuple to it
      if (outTasks.size() > 0) {
        MkGrouper grouper = new MkGrouper(topology_context, out_fields, tgrouping, component, stream_id, workerData);
        componentGrouper.put(component, grouper);
      }
      LOG.info("outbound_components, {}-{} for task-{} on {}", component, outTasks, topology_context.getThisTaskId(), stream_id);
    }
    if (componentGrouper.size() > 0) {
      rr.put(stream_id, componentGrouper);
    }
  }
  return rr;
}

代码示例来源:origin: com.n3twork.storm/storm-core

@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
  _delegate.open(conf, context, new SpoutOutputCollector(new StreamOverrideCollector(collector)));
  _outputTasks = new ArrayList<Integer>();
  for(String component: Utils.get(context.getThisTargets(),
                  _coordStream,
                  new HashMap<String, Grouping>()).keySet()) {
    _outputTasks.addAll(context.getComponentTasks(component));
  }
  _rand = new Random(Utils.secureRandomLong());
}

代码示例来源:origin: com.alibaba.jstorm/jstorm-core

@Override
public void open(Map conf, TopologyContext context, SpoutOutputCollector collector) {
  _delegate.open(conf, context, new SpoutOutputCollector(new StreamOverrideCollector(collector)));
  _outputTasks = new ArrayList<>();
  for(String component: Utils.get(context.getThisTargets(),
                  _coordStream,
                  new HashMap<String, Grouping>()).keySet()) {
    _outputTasks.addAll(context.getComponentTasks(component));
  }
  _rand = new Random(Utils.secureRandomLong());
}

代码示例来源:origin: com.alibaba.jstorm/jstorm-core

for (Map.Entry<String, Map<String, Grouping>> entry : this.getThisTargets().entrySet()) {
  Map stringTargetMap = new HashMap<String, Object>();
  for (Map.Entry<String, Grouping> innerEntry : entry.getValue().entrySet()) {

代码示例来源:origin: com.alibaba.jstorm/jstorm-core

/**
 * get current task's output task list
 */
public static Set<Integer> worker_output_tasks(WorkerData workerData) {
  ContextMaker context_maker = workerData.getContextMaker();
  Set<Integer> taskIds = workerData.getTaskids();
  StormTopology topology = workerData.getSysTopology();
  Set<Integer> rtn = new HashSet<>();
  for (Integer taskId : taskIds) {
    TopologyContext context = context_maker.makeTopologyContext(topology, taskId, null);
    // <StreamId, <ComponentId, Grouping>>
    Map<String, Map<String, Grouping>> targets = context.getThisTargets();
    for (Map<String, Grouping> e : targets.values()) {
      for (String componentId : e.keySet()) {
        List<Integer> tasks = context.getComponentTasks(componentId);
        rtn.addAll(tasks);
      }
    }
  }
  return rtn;
}

代码示例来源:origin: com.alibaba.jstorm/jstorm-core

public void prepare(Map config, TopologyContext context, OutputCollector collector) {
  TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
  if (_delegate instanceof TimeoutCallback) {
    callback = new TimeoutItems();
  }
  _tracked = new TimeCacheMap<Object, TrackingInfo>(context.maxTopologyMessageTimeout(), callback);
  _collector = collector;
  _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
  for (String component : Utils.get(context.getThisTargets(), Constants.COORDINATED_STREAM_ID, new HashMap<String, Grouping>()).keySet()) {
    for (Integer task : context.getComponentTasks(component)) {
      _countOutTasks.add(task);
    }
  }
  if (!_sourceArgs.isEmpty()) {
    _numSourceReports = 0;
    for (Entry<String, SourceArgs> entry : _sourceArgs.entrySet()) {
      if (entry.getValue().singleCount) {
        _numSourceReports += 1;
      } else {
        _numSourceReports += context.getComponentTasks(entry.getKey()).size();
      }
    }
  }
}

代码示例来源:origin: com.n3twork.storm/storm-core

public void prepare(Map config, TopologyContext context, OutputCollector collector) {
  TimeCacheMap.ExpiredCallback<Object, TrackingInfo> callback = null;
  if(_delegate instanceof TimeoutCallback) {
    callback = new TimeoutItems();
  }
  _tracked = new TimeCacheMap<Object, TrackingInfo>(context.maxTopologyMessageTimeout(), callback);
  _collector = collector;
  _delegate.prepare(config, context, new OutputCollector(new CoordinatedOutputCollector(collector)));
  for(String component: Utils.get(context.getThisTargets(),
                  Constants.COORDINATED_STREAM_ID,
                  new HashMap<String, Grouping>())
                  .keySet()) {
    for(Integer task: context.getComponentTasks(component)) {
      _countOutTasks.add(task);
    }
  }
  if(!_sourceArgs.isEmpty()) {
    _numSourceReports = 0;
    for(Entry<String, SourceArgs> entry: _sourceArgs.entrySet()) {
      if(entry.getValue().singleCount) {
        _numSourceReports+=1;
      } else {
        _numSourceReports+=context.getComponentTasks(entry.getKey()).size();
      }
    }
  }
}

代码示例来源:origin: com.n3twork.storm/storm-core

for(String component: Utils.get(context.getThisTargets(),
            COORD_STREAM(batchGroup),
            new HashMap<String, Grouping>()).keySet()) {

代码示例来源:origin: com.alibaba.jstorm/jstorm-core

for(String component: Utils.get(context.getThisTargets(),
            COORD_STREAM(batchGroup),
            new HashMap<String, Grouping>()).keySet()) {

相关文章

微信公众号

最新文章

更多